index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langchainjs/libs/langchain-google-common | lc_public_repos/langchainjs/libs/langchain-google-common/src/llms.ts | import { CallbackManager, Callbacks } from "@langchain/core/callbacks/manager";
import { BaseLLM, LLM } from "@langchain/core/language_models/llms";
import {
type BaseLanguageModelCallOptions,
BaseLanguageModelInput,
} from "@langchain/core/language_models/base";
import { BaseMessage, MessageContent } from "@langchain/core/messages";
import { GenerationChunk } from "@langchain/core/outputs";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { AbstractGoogleLLMConnection } from "./connection.js";
import {
GoogleAIBaseLLMInput,
GoogleAIModelParams,
GoogleAISafetySetting,
GooglePlatformType,
GeminiContent,
GoogleAIResponseMimeType,
} from "./types.js";
import {
copyAIModelParams,
copyAndValidateModelParamsInto,
} from "./utils/common.js";
import { DefaultGeminiSafetyHandler } from "./utils/gemini.js";
import { ApiKeyGoogleAuth, GoogleAbstractedClient } from "./auth.js";
import { ensureParams } from "./utils/failed_handler.js";
import { ChatGoogleBase } from "./chat_models.js";
import type { GoogleBaseLLMInput, GoogleAISafetyHandler } from "./types.js";
export { GoogleBaseLLMInput };
class GoogleLLMConnection<AuthOptions> extends AbstractGoogleLLMConnection<
MessageContent,
AuthOptions
> {
async formatContents(
input: MessageContent,
_parameters: GoogleAIModelParams
): Promise<GeminiContent[]> {
const parts = await this.api.messageContentToParts!(input);
const contents: GeminiContent[] = [
{
role: "user", // Required by Vertex AI
parts,
},
];
return contents;
}
}
type ProxyChatInput<AuthOptions> = GoogleAIBaseLLMInput<AuthOptions> & {
connection: GoogleLLMConnection<AuthOptions>;
};
class ProxyChatGoogle<AuthOptions> extends ChatGoogleBase<AuthOptions> {
constructor(fields: ProxyChatInput<AuthOptions>) {
super(fields);
}
buildAbstractedClient(
fields: ProxyChatInput<AuthOptions>
): GoogleAbstractedClient {
return fields.connection.client;
}
}
/**
* Integration with an LLM.
*/
export abstract class GoogleBaseLLM<AuthOptions>
extends LLM<BaseLanguageModelCallOptions>
implements GoogleBaseLLMInput<AuthOptions>
{
// Used for tracing, replace with the same name as your class
static lc_name() {
return "GoogleLLM";
}
get lc_secrets(): { [key: string]: string } | undefined {
return {
authOptions: "GOOGLE_AUTH_OPTIONS",
};
}
originalFields?: GoogleBaseLLMInput<AuthOptions>;
lc_serializable = true;
modelName = "gemini-pro";
model = "gemini-pro";
temperature = 0.7;
maxOutputTokens = 1024;
topP = 0.8;
topK = 40;
stopSequences: string[] = [];
safetySettings: GoogleAISafetySetting[] = [];
safetyHandler: GoogleAISafetyHandler;
responseMimeType: GoogleAIResponseMimeType = "text/plain";
protected connection: GoogleLLMConnection<AuthOptions>;
protected streamedConnection: GoogleLLMConnection<AuthOptions>;
constructor(fields?: GoogleBaseLLMInput<AuthOptions>) {
super(ensureParams(fields));
this.originalFields = fields;
copyAndValidateModelParamsInto(fields, this);
this.safetyHandler =
fields?.safetyHandler ?? new DefaultGeminiSafetyHandler();
const client = this.buildClient(fields);
this.buildConnection(fields ?? {}, client);
}
abstract buildAbstractedClient(
fields?: GoogleAIBaseLLMInput<AuthOptions>
): GoogleAbstractedClient;
buildApiKeyClient(apiKey: string): GoogleAbstractedClient {
return new ApiKeyGoogleAuth(apiKey);
}
buildApiKey(fields?: GoogleAIBaseLLMInput<AuthOptions>): string | undefined {
return fields?.apiKey ?? getEnvironmentVariable("GOOGLE_API_KEY");
}
buildClient(
fields?: GoogleAIBaseLLMInput<AuthOptions>
): GoogleAbstractedClient {
const apiKey = this.buildApiKey(fields);
if (apiKey) {
return this.buildApiKeyClient(apiKey);
} else {
return this.buildAbstractedClient(fields);
}
}
buildConnection(
fields: GoogleBaseLLMInput<AuthOptions>,
client: GoogleAbstractedClient
) {
this.connection = new GoogleLLMConnection(
{ ...fields, ...this },
this.caller,
client,
false
);
this.streamedConnection = new GoogleLLMConnection(
{ ...fields, ...this },
this.caller,
client,
true
);
}
get platform(): GooglePlatformType {
return this.connection.platform;
}
// Replace
_llmType() {
return "googlellm";
}
formatPrompt(prompt: string): MessageContent {
return prompt;
}
/**
* For some given input string and options, return a string output.
*
* Despite the fact that `invoke` is overridden below, we still need this
* in order to handle public APi calls to `generate()`.
*/
async _call(
prompt: string,
options: this["ParsedCallOptions"]
): Promise<string> {
const parameters = copyAIModelParams(this, options);
const result = await this.connection.request(prompt, parameters, options);
const ret = this.connection.api.responseToString(result);
return ret;
}
// Normally, you should not override this method and instead should override
// _streamResponseChunks. We are doing so here to allow for multimodal inputs into
// the LLM.
async *_streamIterator(
input: BaseLanguageModelInput,
options?: BaseLanguageModelCallOptions
): AsyncGenerator<string> {
// TODO: Refactor callback setup and teardown code into core
const prompt = BaseLLM._convertInputToPromptValue(input);
const [runnableConfig, callOptions] =
this._separateRunnableConfigFromCallOptions(options);
const callbackManager_ = await CallbackManager.configure(
runnableConfig.callbacks,
this.callbacks,
runnableConfig.tags,
this.tags,
runnableConfig.metadata,
this.metadata,
{ verbose: this.verbose }
);
const extra = {
options: callOptions,
invocation_params: this?.invocationParams(callOptions),
batch_size: 1,
};
const runManagers = await callbackManager_?.handleLLMStart(
this.toJSON(),
[prompt.toString()],
undefined,
undefined,
extra,
undefined,
undefined,
runnableConfig.runName
);
let generation = new GenerationChunk({
text: "",
});
const proxyChat = this.createProxyChat();
try {
for await (const chunk of proxyChat._streamIterator(input, options)) {
const stringValue = this.connection.api.chunkToString(chunk);
const generationChunk = new GenerationChunk({
text: stringValue,
});
generation = generation.concat(generationChunk);
yield stringValue;
}
} catch (err) {
await Promise.all(
(runManagers ?? []).map((runManager) => runManager?.handleLLMError(err))
);
throw err;
}
await Promise.all(
(runManagers ?? []).map((runManager) =>
runManager?.handleLLMEnd({
generations: [[generation]],
})
)
);
}
async predictMessages(
messages: BaseMessage[],
options?: string[] | BaseLanguageModelCallOptions,
_callbacks?: Callbacks
): Promise<BaseMessage> {
const { content } = messages[0];
const result = await this.connection.request(
content,
{},
options as BaseLanguageModelCallOptions
);
const ret = this.connection.api.responseToBaseMessage(result);
return ret;
}
/**
* Internal implementation detail to allow Google LLMs to support
* multimodal input by delegating to the chat model implementation.
*
* TODO: Replace with something less hacky.
*/
protected createProxyChat(): ChatGoogleBase<AuthOptions> {
return new ProxyChatGoogle<AuthOptions>({
...this.originalFields,
connection: this.connection,
});
}
// TODO: Remove the need to override this - we are doing it to
// allow the LLM to handle multimodal types of input.
async invoke(
input: BaseLanguageModelInput,
options?: BaseLanguageModelCallOptions
): Promise<string> {
const stream = await this._streamIterator(input, options);
let generatedOutput = "";
for await (const chunk of stream) {
generatedOutput += chunk;
}
return generatedOutput;
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common | lc_public_repos/langchainjs/libs/langchain-google-common/src/index.ts | export * from "./chat_models.js";
export * from "./llms.js";
export * from "./embeddings.js";
export * from "./auth.js";
export * from "./connection.js";
export * from "./types.js";
export * from "./utils/stream.js";
export * from "./utils/common.js";
export * from "./utils/zod_to_gemini_parameters.js";
export * from "./utils/safety.js";
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common | lc_public_repos/langchainjs/libs/langchain-google-common/src/chat_models.ts | import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { UsageMetadata, type BaseMessage } from "@langchain/core/messages";
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import {
BaseChatModel,
LangSmithParams,
type BaseChatModelParams,
} from "@langchain/core/language_models/chat_models";
import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs";
import { AIMessageChunk } from "@langchain/core/messages";
import {
BaseLanguageModelInput,
StructuredOutputMethodOptions,
} from "@langchain/core/language_models/base";
import type { z } from "zod";
import {
Runnable,
RunnablePassthrough,
RunnableSequence,
} from "@langchain/core/runnables";
import { JsonOutputKeyToolsParser } from "@langchain/core/output_parsers/openai_tools";
import { BaseLLMOutputParser } from "@langchain/core/output_parsers";
import { AsyncCaller } from "@langchain/core/utils/async_caller";
import { concat } from "@langchain/core/utils/stream";
import {
GoogleAIBaseLLMInput,
GoogleAIModelParams,
GoogleAISafetySetting,
GoogleConnectionParams,
GooglePlatformType,
GeminiTool,
GoogleAIBaseLanguageModelCallOptions,
GoogleAIAPI,
GoogleAIAPIParams,
} from "./types.js";
import {
convertToGeminiTools,
copyAIModelParams,
copyAndValidateModelParamsInto,
} from "./utils/common.js";
import { AbstractGoogleLLMConnection } from "./connection.js";
import { DefaultGeminiSafetyHandler, getGeminiAPI } from "./utils/gemini.js";
import { ApiKeyGoogleAuth, GoogleAbstractedClient } from "./auth.js";
import { JsonStream } from "./utils/stream.js";
import { ensureParams } from "./utils/failed_handler.js";
import type {
GoogleBaseLLMInput,
GoogleAISafetyHandler,
GoogleAISafetyParams,
GeminiFunctionDeclaration,
GeminiFunctionSchema,
GoogleAIToolType,
GeminiAPIConfig,
} from "./types.js";
import { zodToGeminiParameters } from "./utils/zod_to_gemini_parameters.js";
export class ChatConnection<AuthOptions> extends AbstractGoogleLLMConnection<
BaseMessage[],
AuthOptions
> {
convertSystemMessageToHumanContent: boolean | undefined;
constructor(
fields: GoogleAIBaseLLMInput<AuthOptions> | undefined,
caller: AsyncCaller,
client: GoogleAbstractedClient,
streaming: boolean
) {
super(fields, caller, client, streaming);
this.convertSystemMessageToHumanContent =
fields?.convertSystemMessageToHumanContent;
}
get useSystemInstruction(): boolean {
return typeof this.convertSystemMessageToHumanContent === "boolean"
? !this.convertSystemMessageToHumanContent
: this.computeUseSystemInstruction;
}
get computeUseSystemInstruction(): boolean {
// This works on models from April 2024 and later
// Vertex AI: gemini-1.5-pro and gemini-1.0-002 and later
// AI Studio: gemini-1.5-pro-latest
if (this.modelFamily === "palm") {
return false;
} else if (this.modelName === "gemini-1.0-pro-001") {
return false;
} else if (this.modelName.startsWith("gemini-pro-vision")) {
return false;
} else if (this.modelName.startsWith("gemini-1.0-pro-vision")) {
return false;
} else if (this.modelName === "gemini-pro" && this.platform === "gai") {
// on AI Studio gemini-pro is still pointing at gemini-1.0-pro-001
return false;
}
return true;
}
buildGeminiAPI(): GoogleAIAPI {
const geminiConfig: GeminiAPIConfig = {
useSystemInstruction: this.useSystemInstruction,
...(this.apiConfig as GeminiAPIConfig),
};
return getGeminiAPI(geminiConfig);
}
get api(): GoogleAIAPI {
switch (this.apiName) {
case "google":
return this.buildGeminiAPI();
default:
return super.api;
}
}
}
/**
* Input to chat model class.
*/
export interface ChatGoogleBaseInput<AuthOptions>
extends BaseChatModelParams,
GoogleConnectionParams<AuthOptions>,
GoogleAIModelParams,
GoogleAISafetyParams,
GoogleAIAPIParams,
Pick<GoogleAIBaseLanguageModelCallOptions, "streamUsage"> {}
/**
* Integration with a Google chat model.
*/
export abstract class ChatGoogleBase<AuthOptions>
extends BaseChatModel<GoogleAIBaseLanguageModelCallOptions, AIMessageChunk>
implements ChatGoogleBaseInput<AuthOptions>
{
// Used for tracing, replace with the same name as your class
static lc_name() {
return "ChatGoogle";
}
get lc_secrets(): { [key: string]: string } | undefined {
return {
authOptions: "GOOGLE_AUTH_OPTIONS",
};
}
lc_serializable = true;
// Set based on modelName
model: string;
modelName = "gemini-pro";
temperature = 0.7;
maxOutputTokens = 1024;
topP = 0.8;
topK = 40;
stopSequences: string[] = [];
safetySettings: GoogleAISafetySetting[] = [];
// May intentionally be undefined, meaning to compute this.
convertSystemMessageToHumanContent: boolean | undefined;
safetyHandler: GoogleAISafetyHandler;
streamUsage = true;
streaming = false;
protected connection: ChatConnection<AuthOptions>;
protected streamedConnection: ChatConnection<AuthOptions>;
constructor(fields?: ChatGoogleBaseInput<AuthOptions>) {
super(ensureParams(fields));
copyAndValidateModelParamsInto(fields, this);
this.safetyHandler =
fields?.safetyHandler ?? new DefaultGeminiSafetyHandler();
this.streamUsage = fields?.streamUsage ?? this.streamUsage;
const client = this.buildClient(fields);
this.buildConnection(fields ?? {}, client);
}
getLsParams(options: this["ParsedCallOptions"]): LangSmithParams {
const params = this.invocationParams(options);
return {
ls_provider: "google_vertexai",
ls_model_name: this.model,
ls_model_type: "chat",
ls_temperature: params.temperature ?? undefined,
ls_max_tokens: params.maxOutputTokens ?? undefined,
ls_stop: options.stop,
};
}
abstract buildAbstractedClient(
fields?: GoogleAIBaseLLMInput<AuthOptions>
): GoogleAbstractedClient;
buildApiKeyClient(apiKey: string): GoogleAbstractedClient {
return new ApiKeyGoogleAuth(apiKey);
}
buildApiKey(fields?: GoogleAIBaseLLMInput<AuthOptions>): string | undefined {
return fields?.apiKey ?? getEnvironmentVariable("GOOGLE_API_KEY");
}
buildClient(
fields?: GoogleAIBaseLLMInput<AuthOptions>
): GoogleAbstractedClient {
const apiKey = this.buildApiKey(fields);
if (apiKey) {
return this.buildApiKeyClient(apiKey);
} else {
return this.buildAbstractedClient(fields);
}
}
buildConnection(
fields: GoogleBaseLLMInput<AuthOptions>,
client: GoogleAbstractedClient
) {
this.connection = new ChatConnection(
{ ...fields, ...this },
this.caller,
client,
false
);
this.streamedConnection = new ChatConnection(
{ ...fields, ...this },
this.caller,
client,
true
);
}
get platform(): GooglePlatformType {
return this.connection.platform;
}
override bindTools(
tools: GoogleAIToolType[],
kwargs?: Partial<GoogleAIBaseLanguageModelCallOptions>
): Runnable<
BaseLanguageModelInput,
AIMessageChunk,
GoogleAIBaseLanguageModelCallOptions
> {
return this.bind({ tools: convertToGeminiTools(tools), ...kwargs });
}
// Replace
_llmType() {
return "chat_integration";
}
/**
* Get the parameters used to invoke the model
*/
override invocationParams(options?: this["ParsedCallOptions"]) {
return copyAIModelParams(this, options);
}
async _generate(
messages: BaseMessage[],
options: this["ParsedCallOptions"],
runManager: CallbackManagerForLLMRun | undefined
): Promise<ChatResult> {
const parameters = this.invocationParams(options);
if (this.streaming) {
const stream = this._streamResponseChunks(messages, options, runManager);
let finalChunk: ChatGenerationChunk | null = null;
for await (const chunk of stream) {
finalChunk = !finalChunk ? chunk : concat(finalChunk, chunk);
}
if (!finalChunk) {
throw new Error("No chunks were returned from the stream.");
}
return {
generations: [finalChunk],
};
}
const response = await this.connection.request(
messages,
parameters,
options,
runManager
);
const ret = this.connection.api.responseToChatResult(response);
const chunk = ret?.generations?.[0];
if (chunk) {
await runManager?.handleLLMNewToken(chunk.text || "");
}
return ret;
}
async *_streamResponseChunks(
_messages: BaseMessage[],
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<ChatGenerationChunk> {
// Make the call as a streaming request
const parameters = this.invocationParams(options);
const response = await this.streamedConnection.request(
_messages,
parameters,
options,
runManager
);
// Get the streaming parser of the response
const stream = response.data as JsonStream;
let usageMetadata: UsageMetadata | undefined;
// Loop until the end of the stream
// During the loop, yield each time we get a chunk from the streaming parser
// that is either available or added to the queue
while (!stream.streamDone) {
const output = await stream.nextChunk();
await runManager?.handleCustomEvent(
`google-chunk-${this.constructor.name}`,
{
output,
}
);
if (
output &&
output.usageMetadata &&
this.streamUsage !== false &&
options.streamUsage !== false
) {
usageMetadata = {
input_tokens: output.usageMetadata.promptTokenCount,
output_tokens: output.usageMetadata.candidatesTokenCount,
total_tokens: output.usageMetadata.totalTokenCount,
};
}
const chunk =
output !== null
? this.connection.api.responseToChatGeneration({ data: output })
: new ChatGenerationChunk({
text: "",
generationInfo: { finishReason: "stop" },
message: new AIMessageChunk({
content: "",
usage_metadata: usageMetadata,
}),
});
if (chunk) {
yield chunk;
await runManager?.handleLLMNewToken(
chunk.text ?? "",
undefined,
undefined,
undefined,
undefined,
{ chunk }
);
}
}
}
/** @ignore */
_combineLLMOutput() {
return [];
}
withStructuredOutput<
// eslint-disable-next-line @typescript-eslint/no-explicit-any
RunOutput extends Record<string, any> = Record<string, any>
>(
outputSchema:
| z.ZodType<RunOutput>
// eslint-disable-next-line @typescript-eslint/no-explicit-any
| Record<string, any>,
config?: StructuredOutputMethodOptions<false>
): Runnable<BaseLanguageModelInput, RunOutput>;
withStructuredOutput<
// eslint-disable-next-line @typescript-eslint/no-explicit-any
RunOutput extends Record<string, any> = Record<string, any>
>(
outputSchema:
| z.ZodType<RunOutput>
// eslint-disable-next-line @typescript-eslint/no-explicit-any
| Record<string, any>,
config?: StructuredOutputMethodOptions<true>
): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;
withStructuredOutput<
// eslint-disable-next-line @typescript-eslint/no-explicit-any
RunOutput extends Record<string, any> = Record<string, any>
>(
outputSchema:
| z.ZodType<RunOutput>
// eslint-disable-next-line @typescript-eslint/no-explicit-any
| Record<string, any>,
config?: StructuredOutputMethodOptions<boolean>
):
| Runnable<BaseLanguageModelInput, RunOutput>
| Runnable<
BaseLanguageModelInput,
{ raw: BaseMessage; parsed: RunOutput }
> {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const schema: z.ZodType<RunOutput> | Record<string, any> = outputSchema;
const name = config?.name;
const method = config?.method;
const includeRaw = config?.includeRaw;
if (method === "jsonMode") {
throw new Error(`Google only supports "functionCalling" as a method.`);
}
let functionName = name ?? "extract";
let outputParser: BaseLLMOutputParser<RunOutput>;
let tools: GeminiTool[];
if (isZodSchema(schema)) {
const jsonSchema = zodToGeminiParameters(schema);
tools = [
{
functionDeclarations: [
{
name: functionName,
description:
jsonSchema.description ?? "A function available to call.",
parameters: jsonSchema as GeminiFunctionSchema,
},
],
},
];
outputParser = new JsonOutputKeyToolsParser({
returnSingle: true,
keyName: functionName,
zodSchema: schema,
});
} else {
let geminiFunctionDefinition: GeminiFunctionDeclaration;
if (
typeof schema.name === "string" &&
typeof schema.parameters === "object" &&
schema.parameters != null
) {
geminiFunctionDefinition = schema as GeminiFunctionDeclaration;
functionName = schema.name;
} else {
geminiFunctionDefinition = {
name: functionName,
description: schema.description ?? "",
parameters: schema as GeminiFunctionSchema,
};
}
tools = [
{
functionDeclarations: [geminiFunctionDefinition],
},
];
outputParser = new JsonOutputKeyToolsParser<RunOutput>({
returnSingle: true,
keyName: functionName,
});
}
const llm = this.bind({
tools,
});
if (!includeRaw) {
return llm.pipe(outputParser).withConfig({
runName: "ChatGoogleStructuredOutput",
}) as Runnable<BaseLanguageModelInput, RunOutput>;
}
const parserAssign = RunnablePassthrough.assign({
// eslint-disable-next-line @typescript-eslint/no-explicit-any
parsed: (input: any, config) => outputParser.invoke(input.raw, config),
});
const parserNone = RunnablePassthrough.assign({
parsed: () => null,
});
const parsedWithFallback = parserAssign.withFallbacks({
fallbacks: [parserNone],
});
return RunnableSequence.from<
BaseLanguageModelInput,
{ raw: BaseMessage; parsed: RunOutput }
>([
{
raw: llm,
},
parsedWithFallback,
]).withConfig({
runName: "StructuredOutputRunnable",
});
}
}
function isZodSchema<
// eslint-disable-next-line @typescript-eslint/no-explicit-any
RunOutput extends Record<string, any> = Record<string, any>
>(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
input: z.ZodType<RunOutput> | Record<string, any>
): input is z.ZodType<RunOutput> {
// Check for a characteristic method of Zod schemas
return typeof (input as z.ZodType<RunOutput>)?.parse === "function";
}
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common | lc_public_repos/langchainjs/libs/langchain-google-common/src/embeddings.ts | import { Embeddings, EmbeddingsParams } from "@langchain/core/embeddings";
import {
AsyncCaller,
AsyncCallerCallOptions,
} from "@langchain/core/utils/async_caller";
import { chunkArray } from "@langchain/core/utils/chunk_array";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { GoogleAIConnection } from "./connection.js";
import { ApiKeyGoogleAuth, GoogleAbstractedClient } from "./auth.js";
import {
GoogleAIModelRequestParams,
GoogleConnectionParams,
GoogleResponse,
} from "./types.js";
class EmbeddingsConnection<
CallOptions extends AsyncCallerCallOptions,
AuthOptions
> extends GoogleAIConnection<
CallOptions,
GoogleEmbeddingsInstance[],
AuthOptions,
GoogleEmbeddingsResponse
> {
convertSystemMessageToHumanContent: boolean | undefined;
constructor(
fields: GoogleConnectionParams<AuthOptions> | undefined,
caller: AsyncCaller,
client: GoogleAbstractedClient,
streaming: boolean
) {
super(fields, caller, client, streaming);
}
async buildUrlMethod(): Promise<string> {
return "predict";
}
async formatData(
input: GoogleEmbeddingsInstance[],
parameters: GoogleAIModelRequestParams
): Promise<unknown> {
return {
instances: input,
parameters,
};
}
}
/**
* Defines the parameters required to initialize a
* GoogleEmbeddings instance. It extends EmbeddingsParams and
* GoogleConnectionParams.
*/
export interface BaseGoogleEmbeddingsParams<AuthOptions>
extends EmbeddingsParams,
GoogleConnectionParams<AuthOptions> {
model: string;
}
/**
* Defines additional options specific to the
* GoogleEmbeddingsInstance. It extends AsyncCallerCallOptions.
*/
export interface BaseGoogleEmbeddingsOptions extends AsyncCallerCallOptions {}
/**
* Represents an instance for generating embeddings using the Google
* Vertex AI API. It contains the content to be embedded.
*/
export interface GoogleEmbeddingsInstance {
content: string;
}
/**
* Defines the structure of the embeddings results returned by the Google
* Vertex AI API. It extends GoogleBasePrediction and contains the
* embeddings and their statistics.
*/
export interface GoogleEmbeddingsResponse extends GoogleResponse {
data: {
predictions: {
embeddings: {
statistics: {
token_count: number;
truncated: boolean;
};
values: number[];
};
}[];
};
}
/**
* Enables calls to Google APIs for generating
* text embeddings.
*/
export abstract class BaseGoogleEmbeddings<AuthOptions>
extends Embeddings
implements BaseGoogleEmbeddingsParams<AuthOptions>
{
model: string;
private connection: EmbeddingsConnection<
BaseGoogleEmbeddingsOptions,
AuthOptions
>;
constructor(fields: BaseGoogleEmbeddingsParams<AuthOptions>) {
super(fields);
this.model = fields.model;
this.connection = new EmbeddingsConnection(
{ ...fields, ...this },
this.caller,
this.buildClient(fields),
false
);
}
abstract buildAbstractedClient(
fields?: GoogleConnectionParams<AuthOptions>
): GoogleAbstractedClient;
buildApiKeyClient(apiKey: string): GoogleAbstractedClient {
return new ApiKeyGoogleAuth(apiKey);
}
buildApiKey(
fields?: GoogleConnectionParams<AuthOptions>
): string | undefined {
return fields?.apiKey ?? getEnvironmentVariable("GOOGLE_API_KEY");
}
buildClient(
fields?: GoogleConnectionParams<AuthOptions>
): GoogleAbstractedClient {
const apiKey = this.buildApiKey(fields);
if (apiKey) {
return this.buildApiKeyClient(apiKey);
} else {
return this.buildAbstractedClient(fields);
}
}
/**
* Takes an array of documents as input and returns a promise that
* resolves to a 2D array of embeddings for each document. It splits the
* documents into chunks and makes requests to the Google Vertex AI API to
* generate embeddings.
* @param documents An array of documents to be embedded.
* @returns A promise that resolves to a 2D array of embeddings for each document.
*/
async embedDocuments(documents: string[]): Promise<number[][]> {
const instanceChunks: GoogleEmbeddingsInstance[][] = chunkArray(
documents.map((document) => ({
content: document,
})),
5
); // Vertex AI accepts max 5 instances per prediction
const parameters = {};
const options = {};
const responses = await Promise.all(
instanceChunks.map((instances) =>
this.connection.request(instances, parameters, options)
)
);
const result: number[][] =
responses
?.map(
(response) =>
response?.data?.predictions?.map(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(result: any) => result.embeddings?.values
) ?? []
)
.flat() ?? [];
return result;
}
/**
* Takes a document as input and returns a promise that resolves to an
* embedding for the document. It calls the embedDocuments method with the
* document as the input.
* @param document A document to be embedded.
* @returns A promise that resolves to an embedding for the document.
*/
async embedQuery(document: string): Promise<number[]> {
const data = await this.embedDocuments([document]);
return data[0];
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common | lc_public_repos/langchainjs/libs/langchain-google-common/src/types-anthropic.ts | export interface AnthropicCacheControl {
type: "ephemeral" | string;
}
interface AnthropicMessageContentBase {
type: string;
cache_control?: AnthropicCacheControl | null;
}
export interface AnthropicMessageContentText
extends AnthropicMessageContentBase {
type: "text";
text: string;
}
export interface AnthropicMessageContentImage
extends AnthropicMessageContentBase {
type: "image";
source: {
type: "base64" | string;
media_type: string;
data: string;
};
}
// TODO: Define this
export type AnthropicMessageContentToolUseInput = object;
export interface AnthropicMessageContentToolUse
extends AnthropicMessageContentBase {
type: "tool_use";
id: string;
name: string;
input: AnthropicMessageContentToolUseInput;
}
export type AnthropicMessageContentToolResultContent =
| AnthropicMessageContentText
| AnthropicMessageContentImage;
export interface AnthropicMessageContentToolResult
extends AnthropicMessageContentBase {
type: "tool_result";
tool_use_id: string;
is_error?: boolean;
content: string | AnthropicMessageContentToolResultContent[];
}
export type AnthropicMessageContent =
| AnthropicMessageContentText
| AnthropicMessageContentImage
| AnthropicMessageContentToolUse
| AnthropicMessageContentToolResult;
export interface AnthropicMessage {
role: string;
content: string | AnthropicMessageContent[];
}
export interface AnthropicMetadata {
user_id?: string | null;
}
interface AnthropicToolChoiceBase {
type: string;
}
export interface AnthropicToolChoiceAuto extends AnthropicToolChoiceBase {
type: "auto";
}
export interface AnthropicToolChoiceAny extends AnthropicToolChoiceBase {
type: "any";
}
export interface AnthropicToolChoiceTool extends AnthropicToolChoiceBase {
type: "tool";
name: string;
}
export type AnthropicToolChoice =
| AnthropicToolChoiceAuto
| AnthropicToolChoiceAny
| AnthropicToolChoiceTool;
// TODO: Define this
export type AnthropicToolInputSchema = object;
export interface AnthropicTool {
type?: string; // Just available on tools 20241022 and later?
name: string;
description?: string;
cache_control?: AnthropicCacheControl;
input_schema: AnthropicToolInputSchema;
}
export interface AnthropicRequest {
anthropic_version: string;
messages: AnthropicMessage[];
system?: string;
stream?: boolean;
max_tokens: number;
temperature?: number;
top_k?: number;
top_p?: number;
stop_sequences?: string[];
metadata?: AnthropicMetadata;
tool_choice?: AnthropicToolChoice;
tools?: AnthropicTool[];
}
export type AnthropicRequestSettings = Pick<
AnthropicRequest,
"max_tokens" | "temperature" | "top_k" | "top_p" | "stop_sequences" | "stream"
>;
export interface AnthropicContentText {
type: "text";
text: string;
}
export interface AnthropicContentToolUse {
type: "tool_use";
id: string;
name: string;
input: object;
}
export type AnthropicContent = AnthropicContentText | AnthropicContentToolUse;
export interface AnthropicUsage {
input_tokens: number;
output_tokens: number;
cache_creation_input_tokens: number | null;
cache_creation_output_tokens: number | null;
}
export type AnthropicResponseData =
| AnthropicResponseMessage
| AnthropicStreamBaseEvent;
export interface AnthropicResponseMessage {
id: string;
type: string;
role: string;
content: AnthropicContent[];
model: string;
stop_reason: string | null;
stop_sequence: string | null;
usage: AnthropicUsage;
}
export interface AnthropicAPIConfig {
version?: string;
}
export type AnthropicStreamEventType =
| "message_start"
| "content_block_start"
| "content_block_delta"
| "content_block_stop"
| "message_delta"
| "message_stop"
| "ping"
| "error";
export type AnthropicStreamDeltaType = "text_delta" | "input_json_delta";
export interface AnthropicStreamBaseEvent {
type: AnthropicStreamEventType;
}
export interface AnthropicStreamMessageStartEvent
extends AnthropicStreamBaseEvent {
type: "message_start";
message: AnthropicResponseMessage;
}
export interface AnthropicStreamContentBlockStartEvent
extends AnthropicStreamBaseEvent {
type: "content_block_start";
index: number;
content_block: AnthropicContent;
}
export interface AnthropicStreamBaseDelta {
type: AnthropicStreamDeltaType;
}
export interface AnthropicStreamTextDelta extends AnthropicStreamBaseDelta {
type: "text_delta";
text: string;
}
export interface AnthropicStreamInputJsonDelta
extends AnthropicStreamBaseDelta {
type: "input_json_delta";
partial_json: string;
}
export type AnthropicStreamDelta =
| AnthropicStreamTextDelta
| AnthropicStreamInputJsonDelta;
export interface AnthropicStreamContentBlockDeltaEvent
extends AnthropicStreamBaseEvent {
type: "content_block_delta";
index: number;
delta: AnthropicStreamDelta;
}
export interface AnthropicStreamContentBlockStopEvent
extends AnthropicStreamBaseEvent {
type: "content_block_stop";
index: number;
}
export interface AnthropicStreamMessageDeltaEvent
extends AnthropicStreamBaseEvent {
type: "message_delta";
delta: Partial<AnthropicResponseMessage>;
}
export interface AnthropicStreamMessageStopEvent
extends AnthropicStreamBaseEvent {
type: "message_stop";
}
export interface AnthropicStreamPingEvent extends AnthropicStreamBaseEvent {
type: "ping";
}
export interface AnthropicStreamErrorEvent extends AnthropicStreamBaseEvent {
type: "error";
// eslint-disable-next-line @typescript-eslint/no-explicit-any
error: any;
}
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests/llms.test.ts | import { expect, test } from "@jest/globals";
import {
BaseMessage,
HumanMessageChunk,
MessageContentComplex,
} from "@langchain/core/messages";
import { ChatPromptValue } from "@langchain/core/prompt_values";
import { GoogleBaseLLM, GoogleBaseLLMInput } from "../llms.js";
import {
authOptions,
MockClient,
MockClientAuthInfo,
mockFile,
mockId,
} from "./mock.js";
import { GoogleAISafetyError } from "../utils/safety.js";
import { MessageGeminiSafetyHandler } from "../utils/gemini.js";
class GoogleLLM extends GoogleBaseLLM<MockClientAuthInfo> {
constructor(fields?: GoogleBaseLLMInput<MockClientAuthInfo>) {
super(fields);
}
buildAbstractedClient(
fields?: GoogleBaseLLMInput<MockClientAuthInfo>
): MockClient {
const options = authOptions(fields);
return new MockClient(options);
}
}
describe("Mock Google LLM", () => {
test("Setting invalid model parameters", async () => {
expect(() => {
const model = new GoogleLLM({
temperature: 1.2,
});
expect(model).toBeNull(); // For linting. Should never reach.
}).toThrowError(/temperature/);
expect(() => {
const model = new GoogleLLM({
topP: -2,
});
expect(model).toBeNull(); // For linting. Should never reach.
}).toThrowError(/topP/);
expect(() => {
const model = new GoogleLLM({
topP: 2,
});
expect(model).toBeNull(); // For linting. Should never reach.
}).toThrowError(/topP/);
expect(() => {
const model = new GoogleLLM({
topK: -2,
});
expect(model).toBeNull(); // For linting. Should never reach.
}).toThrowError(/topK/);
});
test("user agent header", async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
};
const model = new GoogleLLM({
authOptions,
});
await model.call("Hello world");
expect(record?.opts?.headers).toHaveProperty("User-Agent");
expect(record.opts.headers["User-Agent"]).toMatch(
/langchain-js\/[0-9.]+-GoogleLLMConnection/
);
});
test("platform default", async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
};
const model = new GoogleLLM({
authOptions,
});
expect(model.platform).toEqual("gcp");
});
test("platform set", async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
};
const model = new GoogleLLM({
authOptions,
platformType: "gai",
});
expect(model.platform).toEqual("gai");
});
test("scope default", async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
};
// eslint-disable-next-line no-new
new GoogleLLM({
authOptions,
});
expect(record).toHaveProperty("authOptions");
expect(record.authOptions).toHaveProperty("scopes");
expect(Array.isArray(record.authOptions.scopes)).toBeTruthy();
expect(record.authOptions.scopes).toHaveLength(1);
expect(record.authOptions.scopes[0]).toEqual(
"https://www.googleapis.com/auth/cloud-platform"
);
});
test("scope default set", async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
};
// eslint-disable-next-line no-new
new GoogleLLM({
authOptions,
platformType: "gai",
});
expect(record).toHaveProperty("authOptions");
expect(record.authOptions).toHaveProperty("scopes");
expect(Array.isArray(record.authOptions.scopes)).toBeTruthy();
expect(record.authOptions.scopes).toHaveLength(1);
expect(record.authOptions.scopes[0]).toEqual(
"https://www.googleapis.com/auth/generative-language"
);
});
test("scope set", async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
scopes: ["https://example.com/bogus"],
};
// eslint-disable-next-line no-new
new GoogleLLM({
authOptions,
});
expect(record).toHaveProperty("authOptions");
expect(record.authOptions).toHaveProperty("scopes");
expect(Array.isArray(record.authOptions.scopes)).toBeTruthy();
expect(record.authOptions.scopes).toHaveLength(1);
expect(record.authOptions.scopes[0]).toEqual("https://example.com/bogus");
});
test("1: generateContent", async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "llm-1-mock.json",
};
const model = new GoogleLLM({
authOptions,
});
const response = await model.invoke("Hello world");
expect(response).toEqual(
"1. Sock it to Me!\n2. Heel Yeah Socks\n3. Sole Mates\n4. Happy Soles\n5. Toe-tally Awesome Socks\n6. Sock Appeal\n7. Footsie Wootsies\n8. Thread Heads\n9. Sock Squad\n10. Sock-a-licious\n11. Darn Good Socks\n12. Sockcessories\n13. Sole Searching\n14. Sockstar\n15. Socktopia\n16. Sockology\n17. Elevated Toes\n18. The Urban Sole\n19. The Hippie Sole\n20. Sole Fuel"
);
// expect(record.opts.url).toEqual(`https://us-central1-aiplatform.googleapis.com/v1/projects/${projectId}/locations/us-central1/publishers/google/models/gemini-pro:generateContent`)
// console.log("record", JSON.stringify(record, null, 2));
});
test("1: invoke", async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "llm-1-mock.json",
};
const model = new GoogleLLM({
authOptions,
});
const response = await model.invoke("Hello world");
expect(response).toEqual(
"1. Sock it to Me!\n2. Heel Yeah Socks\n3. Sole Mates\n4. Happy Soles\n5. Toe-tally Awesome Socks\n6. Sock Appeal\n7. Footsie Wootsies\n8. Thread Heads\n9. Sock Squad\n10. Sock-a-licious\n11. Darn Good Socks\n12. Sockcessories\n13. Sole Searching\n14. Sockstar\n15. Socktopia\n16. Sockology\n17. Elevated Toes\n18. The Urban Sole\n19. The Hippie Sole\n20. Sole Fuel"
);
// expect(record.opts.url).toEqual(`https://us-central1-aiplatform.googleapis.com/v1/projects/${projectId}/locations/us-central1/publishers/google/models/gemini-pro:generateContent`)
// console.log("record", JSON.stringify(record, null, 2));
expect(record.opts).toHaveProperty("data");
expect(record.opts.data).toHaveProperty("contents");
expect(record.opts.data.contents).toHaveLength(1);
expect(record.opts.data.contents[0]).toHaveProperty("parts");
const parts = record?.opts?.data?.contents[0]?.parts;
// console.log(parts);
expect(parts).toHaveLength(1);
expect(parts[0]).toHaveProperty("text");
expect(parts[0].text).toEqual("Hello world");
});
test("2: streamGenerateContent - non-streaming", async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "llm-2-mock.json",
};
const model = new GoogleLLM({
authOptions,
});
const response = await model.invoke("Hello world");
const expectedResponse = await mockFile("llm-2-mock.txt");
expect(response).toEqual(expectedResponse);
expect(record.opts.url).toEqual(
`https://us-central1-aiplatform.googleapis.com/v1/projects/${projectId}/locations/us-central1/publishers/google/models/gemini-pro:streamGenerateContent`
);
// console.log("record", JSON.stringify(record, null, 2));
});
test("3: streamGenerateContent - streaming", async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "llm-3-mock.json",
};
const model = new GoogleLLM({
authOptions,
});
const response = await model.stream("Hello world");
const responseArray: string[] = [];
for await (const value of response) {
expect(typeof value).toEqual("string");
responseArray.push(value);
}
expect(responseArray).toHaveLength(6);
// console.log("record", JSON.stringify(record, null, 2));
});
test("4: streamGenerateContent - non-streaming - safety exception", async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "llm-4-mock.json",
};
const model = new GoogleLLM({
authOptions,
});
let caught = false;
try {
await model.call("Hello world");
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (xx: any) {
caught = true;
expect(xx).toBeInstanceOf(GoogleAISafetyError);
const reply = xx?.reply;
const expectedReply = await mockFile("llm-4-mock.txt");
expect(reply).toEqual(expectedReply);
}
expect(caught).toEqual(true);
});
test("4: streamGenerateContent - non-streaming - safety message", async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "llm-4-mock.json",
};
const safetyHandler = new MessageGeminiSafetyHandler({
msg: "I'm sorry Dave, but I can't do that.",
});
const model = new GoogleLLM({
authOptions,
safetyHandler,
});
const reply = await model.invoke("Hello world");
expect(reply).toContain("I'm sorry Dave, but I can't do that.");
});
test("5: streamGenerateContent - streaming - safety exception", async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "llm-5-mock.json",
};
const model = new GoogleLLM({
authOptions,
});
const response = await model.stream("Hello world");
const responseArray: string[] = [];
let caught = false;
try {
for await (const value of response) {
expect(typeof value).toEqual("string");
responseArray.push(value);
}
} catch (xx) {
caught = true;
expect(xx).toBeInstanceOf(GoogleAISafetyError);
}
expect(responseArray).toHaveLength(4);
// console.log("record", JSON.stringify(record, null, 2));
expect(caught).toEqual(true);
});
test("5: streamGenerateContent - streaming - safety message", async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "llm-5-mock.json",
};
const safetyHandler = new MessageGeminiSafetyHandler({
msg: "I'm sorry Dave, but I can't do that.",
});
const model = new GoogleLLM({
authOptions,
safetyHandler,
});
const response = await model.stream("Hello world");
const responseArray: string[] = [];
for await (const value of response) {
expect(typeof value).toEqual("string");
responseArray.push(value);
}
expect(responseArray).toHaveLength(6);
expect(responseArray[4]).toEqual("I'm sorry Dave, but I can't do that.");
// console.log("record", JSON.stringify(record, null, 2));
});
test("6: predictMessages image blue-square", async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "llm-6-mock.json",
};
const model = new GoogleLLM({
authOptions,
model: "gemini-pro-vision",
});
const message: MessageContentComplex[] = [
{
type: "text",
text: "What is in this image?",
},
{
type: "image_url",
image_url: `data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAIAAAACUFjqAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH6AIbFwQSRaexCAAAAB1pVFh0Q29tbWVudAAAAAAAQ3JlYXRlZCB3aXRoIEdJTVBkLmUHAAAAJklEQVQY02P8//8/A27AxIAXsEAor31f0CS2OfEQ1j2Q0owU+RsAGNUJD2/04PgAAAAASUVORK5CYII=`,
},
];
const messages: BaseMessage[] = [
new HumanMessageChunk({ content: message }),
];
const res = await model.predictMessages(messages);
// console.log("record", record);
expect(record.opts).toHaveProperty("data");
expect(record.opts.data).toHaveProperty("contents");
expect(record.opts.data.contents).toHaveLength(1);
expect(record.opts.data.contents[0]).toHaveProperty("parts");
const parts = record?.opts?.data?.contents[0]?.parts;
// console.log(parts);
expect(parts).toHaveLength(2);
expect(parts[0]).toHaveProperty("text");
expect(parts[1]).toHaveProperty("inlineData");
expect(parts[1].inlineData).toHaveProperty("mimeType");
expect(parts[1].inlineData).toHaveProperty("data");
expect(res?.content?.[0]).toEqual({ text: "A blue square.", type: "text" });
});
/*
* This test is skipped because .invoke() converts everything to text
* only at the moment.
*/
test("6: invoke image blue-square", async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "llm-6-mock.json",
};
const model = new GoogleLLM({
authOptions,
model: "gemini-pro-vision",
});
const message: MessageContentComplex[] = [
{
type: "text",
text: "What is in this image?",
},
{
type: "image_url",
image_url: `data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAIAAAACUFjqAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH6AIbFwQSRaexCAAAAB1pVFh0Q29tbWVudAAAAAAAQ3JlYXRlZCB3aXRoIEdJTVBkLmUHAAAAJklEQVQY02P8//8/A27AxIAXsEAor31f0CS2OfEQ1j2Q0owU+RsAGNUJD2/04PgAAAAASUVORK5CYII=`,
},
];
const messages: BaseMessage[] = [
new HumanMessageChunk({ content: message }),
];
const input = new ChatPromptValue(messages);
const res = await model.invoke(input);
// console.log("record", record);
expect(record.opts).toHaveProperty("data");
expect(record.opts.data).toHaveProperty("contents");
expect(record.opts.data.contents).toHaveLength(1);
expect(record.opts.data.contents[0]).toHaveProperty("parts");
const parts = record?.opts?.data?.contents[0]?.parts;
// console.log(parts);
expect(parts).toHaveLength(2);
expect(parts[0]).toHaveProperty("text");
expect(parts[1]).toHaveProperty("inlineData");
expect(parts[1].inlineData).toHaveProperty("mimeType");
expect(parts[1].inlineData).toHaveProperty("data");
expect(res).toEqual("A blue square.");
});
/*
* This test is skipped because .stream() converts everything to text
* only at the moment.
*/
test("7: stream image blue-square", async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "llm-7-mock.json",
};
const model = new GoogleLLM({
authOptions,
model: "gemini-pro-image",
});
const message: MessageContentComplex[] = [
{
type: "text",
text: "What is in this image?",
},
{
type: "image_url",
image_url: `data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAIAAAACUFjqAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH6AIbFwQSRaexCAAAAB1pVFh0Q29tbWVudAAAAAAAQ3JlYXRlZCB3aXRoIEdJTVBkLmUHAAAAJklEQVQY02P8//8/A27AxIAXsEAor31f0CS2OfEQ1j2Q0owU+RsAGNUJD2/04PgAAAAASUVORK5CYII=`,
},
];
const messages: BaseMessage[] = [
new HumanMessageChunk({ content: message }),
];
// const input: BaseLanguageModelInput = [["human", message]]
const input = new ChatPromptValue(messages);
const response = await model.stream(input);
const responseArray: string[] = [];
for await (const value of response) {
responseArray.push(value);
}
expect(responseArray).toHaveLength(3);
// console.log("record", JSON.stringify(record, null, 2));
});
test("8: streamGenerateContent - streaming - json responseMimeType", async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "llm-8-mock.json",
};
const model = new GoogleLLM({
authOptions,
responseMimeType: "application/json",
});
const response = await model.stream("Give me a recipe for banana bread.");
const responseArray: string[] = [];
for await (const value of response) {
expect(typeof value).toEqual("string");
responseArray.push(value);
}
expect(responseArray).toHaveLength(10);
expect(typeof JSON.parse(responseArray.join(""))).toEqual("object");
// console.log("record", JSON.stringify(record, null, 2));
});
test("9: streamGenerateContent - non-streaming - check json responseMimeType", async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "llm-9-mock.json",
};
const model = new GoogleLLM({
authOptions,
responseMimeType: "application/json",
});
const response = await model.invoke("Give me a recipe for banana bread.");
expect(typeof JSON.parse(response)).toEqual("object");
expect(record.opts.data.generationConfig.responseMimeType).toEqual(
"application/json"
);
// console.log("record", JSON.stringify(record, null, 2));
});
});
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests/utils.test.ts | /* eslint-disable @typescript-eslint/no-explicit-any */
import { beforeEach, expect, test } from "@jest/globals";
import { InMemoryStore } from "@langchain/core/stores";
import { SerializedConstructor } from "@langchain/core/load/serializable";
import { load } from "@langchain/core/load";
import { z } from "zod";
import { zodToGeminiParameters } from "../utils/zod_to_gemini_parameters.js";
import {
BackedBlobStore,
BlobStore,
MediaBlob,
MediaManager,
ReadThroughBlobStore,
SimpleWebBlobStore,
} from "../experimental/utils/media_core.js";
import {
ReadableJsonStream,
ReadableSseJsonStream,
ReadableSseStream,
} from "../utils/stream.js";
describe("zodToGeminiParameters", () => {
test("can convert zod schema to gemini schema", () => {
const zodSchema = z
.object({
operation: z
.enum(["add", "subtract", "multiply", "divide"])
.describe("The type of operation to execute"),
number1: z.number().describe("The first number to operate on."),
number2: z.number().describe("The second number to operate on."),
childObject: z.object({}),
})
.describe("A simple calculator tool");
const convertedSchema = zodToGeminiParameters(zodSchema);
expect(convertedSchema.type).toBe("object");
expect(convertedSchema.description).toBe("A simple calculator tool");
expect((convertedSchema as any).additionalProperties).toBeUndefined();
expect(convertedSchema.properties).toEqual({
operation: {
type: "string",
enum: ["add", "subtract", "multiply", "divide"],
description: "The type of operation to execute",
},
number1: {
type: "number",
description: "The first number to operate on.",
},
number2: {
type: "number",
description: "The second number to operate on.",
},
childObject: {
type: "object",
properties: {},
},
});
expect(convertedSchema.required).toEqual([
"operation",
"number1",
"number2",
"childObject",
]);
});
test("removes additional properties from arrays", () => {
const zodSchema = z
.object({
people: z
.object({
name: z.string().describe("The name of a person"),
})
.array()
.describe("person elements"),
})
.describe("A list of people");
const convertedSchema = zodToGeminiParameters(zodSchema);
expect(convertedSchema.type).toBe("object");
expect(convertedSchema.description).toBe("A list of people");
expect((convertedSchema as any).additionalProperties).toBeUndefined();
const peopleSchema = convertedSchema?.properties?.people;
expect(peopleSchema).not.toBeUndefined();
if (peopleSchema !== undefined) {
expect(peopleSchema.type).toBe("array");
expect((peopleSchema as any).additionalProperties).toBeUndefined();
expect(peopleSchema.description).toBe("person elements");
}
const arrayItemsSchema = peopleSchema?.items;
expect(arrayItemsSchema).not.toBeUndefined();
if (arrayItemsSchema !== undefined) {
expect(arrayItemsSchema.type).toBe("object");
expect((arrayItemsSchema as any).additionalProperties).toBeUndefined();
}
});
});
describe("media core", () => {
test("MediaBlob plain", async () => {
const blob = new Blob(["This is a test"], { type: "text/plain" });
const mblob = await MediaBlob.fromBlob(blob);
expect(mblob.dataType).toEqual("text/plain");
expect(mblob.mimetype).toEqual("text/plain");
expect(mblob.encoding).toEqual("utf-8");
expect(await mblob.asString()).toEqual("This is a test");
});
test("MediaBlob charset", async () => {
const blob = new Blob(["This is a test"], {
type: "text/plain; charset=US-ASCII",
});
const mblob = await MediaBlob.fromBlob(blob);
expect(mblob.dataType).toEqual("text/plain; charset=us-ascii");
expect(mblob.mimetype).toEqual("text/plain");
expect(mblob.encoding).toEqual("us-ascii");
expect(await mblob.asString()).toEqual("This is a test");
});
test("MediaBlob fromDataUrl", async () => {
const blobData = "This is a test";
const blobMimeType = "text/plain";
const blobDataType = `${blobMimeType}; charset=US-ASCII`;
const blob = new Blob([blobData], {
type: blobDataType,
});
const mblob = await MediaBlob.fromBlob(blob);
const dataUrl = await mblob.asDataUrl();
const dblob = MediaBlob.fromDataUrl(dataUrl);
expect(await dblob.asString()).toEqual(blobData);
expect(dblob.mimetype).toEqual(blobMimeType);
});
test("MediaBlob serialize", async () => {
const blob = new Blob(["This is a test"], { type: "text/plain" });
const mblob = await MediaBlob.fromBlob(blob);
console.log("serialize mblob", mblob);
const serialized = mblob.toJSON() as SerializedConstructor;
console.log("serialized", serialized);
expect(serialized.kwargs).toHaveProperty("data");
expect(serialized.kwargs.data.value).toEqual("VGhpcyBpcyBhIHRlc3Q=");
});
test("MediaBlob deserialize", async () => {
const serialized: SerializedConstructor = {
lc: 1,
type: "constructor",
id: [
"langchain",
"google_common",
"experimental",
"utils",
"media_core",
"MediaBlob",
],
kwargs: {
data: {
value: "VGhpcyBpcyBhIHRlc3Q=",
type: "text/plain",
},
},
};
const mblob: MediaBlob = await load(JSON.stringify(serialized), {
importMap: {
google_common__experimental__utils__media_core: await import(
"../experimental/utils/media_core.js"
),
},
});
console.log("deserialize mblob", mblob);
expect(mblob.dataType).toEqual("text/plain");
expect(await mblob.asString()).toEqual("This is a test");
});
test("SimpleWebBlobStore fetch", async () => {
const webStore = new SimpleWebBlobStore();
const exampleBlob = await webStore.fetch("http://example.com/");
console.log(exampleBlob);
expect(exampleBlob?.mimetype).toEqual("text/html");
expect(exampleBlob?.encoding).toEqual("utf-8");
expect(exampleBlob?.size).toBeGreaterThan(0);
expect(exampleBlob?.metadata).toBeDefined();
expect(exampleBlob?.metadata?.ok).toBeTruthy();
expect(exampleBlob?.metadata?.status).toEqual(200);
});
describe("BackedBlobStore", () => {
test("simple", async () => {
const backingStore = new InMemoryStore<MediaBlob>();
const store = new BackedBlobStore({
backingStore,
});
const data = new Blob(["This is a test"], { type: "text/plain" });
const path = "simple://foo";
const blob = await MediaBlob.fromBlob(data, { path });
const storedBlob = await store.store(blob);
expect(storedBlob).toBeDefined();
const fetchedBlob = await store.fetch(path);
expect(fetchedBlob).toBeDefined();
});
test("missing undefined", async () => {
const backingStore = new InMemoryStore<MediaBlob>();
const store = new BackedBlobStore({
backingStore,
});
const path = "simple://foo";
const fetchedBlob = await store.fetch(path);
expect(fetchedBlob).toBeUndefined();
});
test("missing emptyBlob defaultConfig", async () => {
const backingStore = new InMemoryStore<MediaBlob>();
const store = new BackedBlobStore({
backingStore,
defaultFetchOptions: {
actionIfBlobMissing: "emptyBlob",
},
});
const path = "simple://foo";
const fetchedBlob = await store.fetch(path);
expect(fetchedBlob).toBeDefined();
expect(fetchedBlob?.size).toEqual(0);
expect(fetchedBlob?.path).toEqual(path);
});
test("missing undefined fetch", async () => {
const backingStore = new InMemoryStore<MediaBlob>();
const store = new BackedBlobStore({
backingStore,
defaultFetchOptions: {
actionIfBlobMissing: "emptyBlob",
},
});
const path = "simple://foo";
const fetchedBlob = await store.fetch(path, {
actionIfBlobMissing: undefined,
});
expect(fetchedBlob).toBeUndefined();
});
test("invalid undefined", async () => {
const backingStore = new InMemoryStore<MediaBlob>();
const store = new BackedBlobStore({
backingStore,
defaultStoreOptions: {
pathPrefix: "example://bar/",
},
});
const path = "simple://foo";
const data = new Blob(["This is a test"], { type: "text/plain" });
const blob = await MediaBlob.fromBlob(data, { path });
const storedBlob = await store.store(blob);
expect(storedBlob).toBeUndefined();
});
test("invalid ignore", async () => {
const backingStore = new InMemoryStore<MediaBlob>();
const store = new BackedBlobStore({
backingStore,
defaultStoreOptions: {
actionIfInvalid: "ignore",
pathPrefix: "example://bar/",
},
});
const path = "simple://foo";
const data = new Blob(["This is a test"], { type: "text/plain" });
const blob = await MediaBlob.fromBlob(data, { path });
const storedBlob = await store.store(blob);
expect(storedBlob).toBeDefined();
expect(storedBlob?.path).toEqual(path);
expect(storedBlob?.metadata).toBeUndefined();
});
test("invalid prefixPath", async () => {
const backingStore = new InMemoryStore<MediaBlob>();
const store = new BackedBlobStore({
backingStore,
defaultStoreOptions: {
actionIfInvalid: "prefixPath",
pathPrefix: "example://bar/",
},
});
const path = "simple://foo";
const data = new Blob(["This is a test"], { type: "text/plain" });
const blob = await MediaBlob.fromBlob(data, { path });
const storedBlob = await store.store(blob);
expect(storedBlob?.path).toEqual("example://bar/foo");
expect(await storedBlob?.asString()).toEqual("This is a test");
expect(storedBlob?.metadata?.langchainOldPath).toEqual(path);
});
test("invalid prefixUuid", async () => {
const backingStore = new InMemoryStore<MediaBlob>();
const store = new BackedBlobStore({
backingStore,
defaultStoreOptions: {
actionIfInvalid: "prefixUuid4",
pathPrefix: "example://bar/",
},
});
const path = "simple://foo";
const data = new Blob(["This is a test"], { type: "text/plain" });
const metadata = {
alpha: "one",
bravo: "two",
};
const blob = await MediaBlob.fromBlob(data, { path, metadata });
const storedBlob = await store.store(blob);
expect(storedBlob?.path).toMatch(
/example:\/\/bar\/[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}$/i
);
expect(storedBlob?.size).toEqual(14);
expect(await storedBlob?.asString()).toEqual("This is a test");
expect(storedBlob?.metadata?.alpha).toEqual("one");
expect(storedBlob?.metadata?.langchainOldPath).toEqual(path);
});
});
describe("MediaManager", () => {
class MemStore extends InMemoryStore<MediaBlob> {
get length() {
return Object.keys(this.store).length;
}
}
let mediaManager: MediaManager;
let aliasMemory: MemStore;
let canonicalMemory: MemStore;
let resolverMemory: MemStore;
async function store(
blobStore: BlobStore,
path: string,
text: string
): Promise<void> {
const data = new Blob([text], { type: "text/plain" });
const blob = await MediaBlob.fromBlob(data, { path });
await blobStore.store(blob);
}
beforeEach(async () => {
aliasMemory = new MemStore();
const aliasStore = new BackedBlobStore({
backingStore: aliasMemory,
defaultFetchOptions: {
actionIfBlobMissing: undefined,
},
});
canonicalMemory = new MemStore();
const canonicalStore = new BackedBlobStore({
backingStore: canonicalMemory,
defaultStoreOptions: {
pathPrefix: "canonical://store/",
actionIfInvalid: "prefixPath",
},
defaultFetchOptions: {
actionIfBlobMissing: undefined,
},
});
resolverMemory = new MemStore();
const resolver = new BackedBlobStore({
backingStore: resolverMemory,
defaultFetchOptions: {
actionIfBlobMissing: "emptyBlob",
},
});
const mediaStore = new ReadThroughBlobStore({
baseStore: aliasStore,
backingStore: canonicalStore,
});
mediaManager = new MediaManager({
store: mediaStore,
resolvers: [resolver],
});
await store(resolver, "resolve://host/foo", "fooing");
await store(resolver, "resolve://host2/bar/baz", "barbazing");
});
test("environment", async () => {
expect(resolverMemory.length).toEqual(2);
const fooBlob = await mediaManager.resolvers?.[0]?.fetch(
"resolve://host/foo"
);
expect(await fooBlob?.asString()).toEqual("fooing");
});
test("simple", async () => {
const uri = "resolve://host/foo";
const curi = "canonical://store/host/foo";
const blob = await mediaManager.getMediaBlob(uri);
expect(await blob?.asString()).toEqual("fooing");
expect(blob?.path).toEqual(curi);
// In the alias store,
// we should be able to fetch it by the resolve uri, but the
// path in the blob itself should be the canonical uri
expect(aliasMemory.length).toEqual(1);
const mediaStore: ReadThroughBlobStore =
mediaManager.store as ReadThroughBlobStore;
const aliasBlob = await mediaStore.baseStore.fetch(uri);
expect(aliasBlob).toBeDefined();
expect(aliasBlob?.path).toEqual(curi);
expect(await aliasBlob?.asString()).toEqual("fooing");
// For the canonical store,
// fetching it by the resolve uri should fail
// but fetching it by the canonical uri should succeed
expect(canonicalMemory.length).toEqual(1);
const canonicalBlobU = await mediaStore.backingStore.fetch(uri);
expect(canonicalBlobU).toBeUndefined();
const canonicalBlob = await mediaStore.backingStore.fetch(curi);
expect(canonicalBlob).toBeDefined();
expect(canonicalBlob?.path).toEqual(curi);
expect(await canonicalBlob?.asString()).toEqual("fooing");
});
});
});
function toUint8Array(data: string): Uint8Array {
return new TextEncoder().encode(data);
}
describe("streaming", () => {
test("ReadableJsonStream can handle stream", async () => {
const data = [
toUint8Array("["),
toUint8Array('{"i": 1}'),
toUint8Array('{"i'),
toUint8Array('": 2}'),
toUint8Array("]"),
];
const source = new ReadableStream({
start(controller) {
data.forEach((chunk) => controller.enqueue(chunk));
controller.close();
},
});
const stream = new ReadableJsonStream(source);
expect(await stream.nextChunk()).toEqual({ i: 1 });
expect(await stream.nextChunk()).toEqual({ i: 2 });
expect(await stream.nextChunk()).toBeNull();
expect(stream.streamDone).toEqual(true);
});
test("ReadableJsonStream can handle multibyte stream", async () => {
const data = [
toUint8Array("["),
toUint8Array('{"i": 1, "msg":"helloπ"}'),
toUint8Array('{"i": 2,'),
toUint8Array('"msg":"γγ'),
new Uint8Array([0xe3]), // 1st byte of "γ«"
new Uint8Array([0x81, 0xab]), // 2-3rd bytes of "γ«"
toUint8Array("γ‘γ―"),
new Uint8Array([0xf0, 0x9f]), // first half bytes of "π"
new Uint8Array([0x91, 0x8b]), // second half bytes of "π"
toUint8Array('"}'),
toUint8Array("]"),
];
const source = new ReadableStream({
start(controller) {
data.forEach((chunk) => controller.enqueue(chunk));
controller.close();
},
});
const stream = new ReadableJsonStream(source);
expect(await stream.nextChunk()).toEqual({ i: 1, msg: "helloπ" });
expect(await stream.nextChunk()).toEqual({ i: 2, msg: "γγγ«γ‘γ―π" });
expect(await stream.nextChunk()).toBeNull();
expect(stream.streamDone).toEqual(true);
});
const eventData: string[] = [
"event: ping\n",
'data: {"type": "ping"}\n',
"\n",
"event: pong\n",
'data: {"type": "pong", "value": "ping-pong"}\n',
"\n",
"\n",
];
test("SseStream", async () => {
const source = new ReadableStream({
start(controller) {
eventData.forEach((chunk) => controller.enqueue(toUint8Array(chunk)));
controller.close();
},
});
let chunk;
const stream = new ReadableSseStream(source);
chunk = await stream.nextChunk();
expect(chunk.event).toEqual("ping");
expect(chunk.data).toEqual('{"type": "ping"}');
chunk = await stream.nextChunk();
expect(chunk.event).toEqual("pong");
chunk = await stream.nextChunk();
expect(chunk).toBeNull();
expect(stream.streamDone).toEqual(true);
});
test("SseJsonStream", async () => {
const source = new ReadableStream({
start(controller) {
eventData.forEach((chunk) => controller.enqueue(toUint8Array(chunk)));
controller.close();
},
});
let chunk;
const stream = new ReadableSseJsonStream(source);
chunk = await stream.nextChunk();
expect(chunk.type).toEqual("ping");
chunk = await stream.nextChunk();
expect(chunk.type).toEqual("pong");
expect(chunk.value).toEqual("ping-pong");
chunk = await stream.nextChunk();
expect(chunk).toBeNull();
expect(stream.streamDone).toEqual(true);
});
});
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests/mock.ts | import fs from "fs/promises";
import {
ensureAuthOptionScopes,
GoogleAbstractedClient,
GoogleAbstractedClientOps,
} from "../auth.js";
import { JsonStream } from "../utils/stream.js";
import { GoogleAIBaseLLMInput } from "../types.js";
export function mockId(): string {
return `mock-id-${Date.now()}`;
}
export async function mockFile(name: string): Promise<string> {
const filename = `src/tests/data/${name}`;
return fs.readFile(filename, { encoding: "utf-8" });
}
export interface MockClientAuthInfo {
record: Record<string, unknown>;
projectId: string;
scopes?: string[]; // Just for testing
resultFile?: string;
}
export function authOptions(
fields?: GoogleAIBaseLLMInput<MockClientAuthInfo>
): MockClientAuthInfo {
const options = {
record: {},
projectId: `mock-id-${Date.now()}`,
...(fields?.authOptions ?? {}),
};
return ensureAuthOptionScopes<MockClientAuthInfo>(
options,
"scopes",
fields?.platformType
);
}
export class MockClient implements GoogleAbstractedClient {
projectId: string;
record: Record<string, unknown> = {};
resultFile: string | undefined = undefined;
constructor(authOptions?: MockClientAuthInfo) {
this.projectId = authOptions?.projectId || `mock-id-${Date.now()}`;
this.record = authOptions?.record ?? {};
this.resultFile = authOptions?.resultFile;
// Get the auth options, except for the record field, since that would
// be a circular reference.
const authOptionsCopy = { ...(authOptions ?? {}) };
delete authOptionsCopy.record;
this.record.authOptions = authOptionsCopy;
}
get clientType(): string {
return "mock";
}
async getProjectId(): Promise<string> {
return Promise.resolve(this.projectId);
}
async dataObjectJson(): Promise<unknown> {
return this.resultFile ? JSON.parse(await mockFile(this.resultFile)) : {};
}
async dataObjectStream(): Promise<unknown> {
const ret = new JsonStream();
const fileData: string = this.resultFile
? await mockFile(this.resultFile)
: "[]";
if (fileData === "[]")
console.warn(`No result file specified. Using empty array.`);
ret.appendBuffer(fileData);
ret.closeBuffer();
return ret;
}
async dataObject(responseType: string | undefined): Promise<unknown> {
switch (responseType) {
case "json":
return this.dataObjectJson();
case "stream":
return this.dataObjectStream();
default:
throw new Error(`Unknown response type: ${responseType}`);
}
}
async request(opts: GoogleAbstractedClientOps): Promise<unknown> {
this.record.opts = opts;
try {
const data = await this.dataObject(opts.responseType);
return {
data,
status: 200,
statusText: "OK",
headers: {},
config: {},
};
} catch (xx) {
console.error("Error reading file: ", xx);
throw xx;
}
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests/chat_models.test.ts | /* eslint-disable @typescript-eslint/no-explicit-any */
import { expect, test } from "@jest/globals";
import {
AIMessage,
BaseMessage,
BaseMessageLike,
HumanMessage,
HumanMessageChunk,
MessageContentComplex,
SystemMessage,
ToolMessage,
} from "@langchain/core/messages";
import { InMemoryStore } from "@langchain/core/stores";
import { CallbackHandlerMethods } from "@langchain/core/callbacks/base";
import { Serialized } from "@langchain/core/load/serializable";
import { z } from "zod";
import { zodToJsonSchema } from "zod-to-json-schema";
import { ChatGoogleBase, ChatGoogleBaseInput } from "../chat_models.js";
import { authOptions, MockClient, MockClientAuthInfo, mockId } from "./mock.js";
import {
GeminiTool,
GoogleAIBaseLLMInput,
GoogleAISafetyCategory,
GoogleAISafetyHandler,
GoogleAISafetyThreshold,
} from "../types.js";
import { GoogleAbstractedClient } from "../auth.js";
import { GoogleAISafetyError } from "../utils/safety.js";
import {
BackedBlobStore,
MediaBlob,
MediaManager,
ReadThroughBlobStore,
} from "../experimental/utils/media_core.js";
import { removeAdditionalProperties } from "../utils/zod_to_gemini_parameters.js";
import { MessageGeminiSafetyHandler } from "../utils/index.js";
class ChatGoogle extends ChatGoogleBase<MockClientAuthInfo> {
constructor(fields?: ChatGoogleBaseInput<MockClientAuthInfo>) {
super(fields);
}
buildAbstractedClient(
fields?: GoogleAIBaseLLMInput<MockClientAuthInfo>
): GoogleAbstractedClient {
const options = authOptions(fields);
return new MockClient(options);
}
}
describe("Mock ChatGoogle - Gemini", () => {
test("Setting invalid model parameters", async () => {
expect(() => {
const model = new ChatGoogle({
temperature: 1.2,
});
expect(model).toBeNull(); // For linting. Should never reach.
}).toThrowError(/temperature/);
expect(() => {
const model = new ChatGoogle({
topP: -2,
});
expect(model).toBeNull(); // For linting. Should never reach.
}).toThrowError(/topP/);
expect(() => {
const model = new ChatGoogle({
topP: 2,
});
expect(model).toBeNull(); // For linting. Should never reach.
}).toThrowError(/topP/);
expect(() => {
const model = new ChatGoogle({
topK: -2,
});
expect(model).toBeNull(); // For linting. Should never reach.
}).toThrowError(/topK/);
});
test("user agent header", async () => {
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "chat-1-mock.json",
};
const model = new ChatGoogle({
authOptions,
});
const messages: BaseMessageLike[] = [
new HumanMessage("Flip a coin and tell me H for heads and T for tails"),
new AIMessage("H"),
new HumanMessage("Flip it again"),
];
await model.invoke(messages);
expect(record?.opts?.headers).toHaveProperty("User-Agent");
expect(record?.opts?.headers).toHaveProperty("Client-Info");
expect(record.opts.headers["User-Agent"]).toMatch(
/langchain-js\/[0-9.]+-ChatConnection/
);
expect(record.opts.headers["Client-Info"]).toMatch(
/\d+(\.\d+)?-ChatConnection/ // Since we are not getting libraryVersion from env right now, it will always be 0
);
});
test("platform default", async () => {
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
};
const model = new ChatGoogle({
authOptions,
});
expect(model.platform).toEqual("gcp");
});
test("platform set", async () => {
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
};
const model = new ChatGoogle({
authOptions,
platformType: "gai",
});
expect(model.platform).toEqual("gai");
});
test("1. Basic request format", async () => {
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "chat-1-mock.json",
};
const model = new ChatGoogle({
authOptions,
});
const messages: BaseMessageLike[] = [
new HumanMessage("Flip a coin and tell me H for heads and T for tails"),
new AIMessage("H"),
new HumanMessage("Flip it again"),
];
await model.invoke(messages);
expect(record.opts).toBeDefined();
expect(record.opts.data).toBeDefined();
const { data } = record.opts;
expect(data.contents).toBeDefined();
expect(data.contents.length).toEqual(3);
expect(data.contents[0].role).toEqual("user");
expect(data.contents[0].parts).toBeDefined();
expect(data.contents[0].parts.length).toBeGreaterThanOrEqual(1);
expect(data.contents[0].parts[0].text).toBeDefined();
expect(data.contents[1].role).toEqual("model");
expect(data.contents[1].parts).toBeDefined();
expect(data.contents[1].parts.length).toBeGreaterThanOrEqual(1);
expect(data.systemInstruction).not.toBeDefined();
});
test("1. Invoke request format", async () => {
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "chat-1-mock.json",
};
const model = new ChatGoogle({
authOptions,
});
const messages: BaseMessageLike[] = [
new HumanMessage("Flip a coin and tell me H for heads and T for tails"),
new AIMessage("H"),
new HumanMessage("Flip it again"),
];
await model.invoke(messages);
expect(record.opts).toBeDefined();
expect(record.opts.data).toBeDefined();
const { data } = record.opts;
expect(data.contents).toBeDefined();
expect(data.contents.length).toEqual(3);
expect(data.contents[0].role).toEqual("user");
expect(data.contents[0].parts).toBeDefined();
expect(data.contents[0].parts.length).toBeGreaterThanOrEqual(1);
expect(data.contents[0].parts[0].text).toBeDefined();
expect(data.contents[1].role).toEqual("model");
expect(data.contents[1].parts).toBeDefined();
expect(data.contents[1].parts.length).toBeGreaterThanOrEqual(1);
expect(data.systemInstruction).not.toBeDefined();
});
test("1. Response format", async () => {
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "chat-1-mock.json",
};
const model = new ChatGoogle({
authOptions,
});
const messages: BaseMessageLike[] = [
new HumanMessage("Flip a coin and tell me H for heads and T for tails"),
new AIMessage("H"),
new HumanMessage("Flip it again"),
];
const result = await model.invoke(messages);
expect(result._getType()).toEqual("ai");
const aiMessage = result as AIMessage;
expect(aiMessage.content).toBeDefined();
expect(aiMessage.content).toBe("T");
});
test("1. Invoke response format", async () => {
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "chat-1-mock.json",
};
const model = new ChatGoogle({
authOptions,
});
const messages: BaseMessageLike[] = [
new HumanMessage("Flip a coin and tell me H for heads and T for tails"),
new AIMessage("H"),
new HumanMessage("Flip it again"),
];
const result = await model.invoke(messages);
expect(result._getType()).toEqual("ai");
const aiMessage = result as AIMessage;
expect(aiMessage.content).toBeDefined();
expect(aiMessage.content).toBe("T");
});
// The older models don't support systemInstruction, so
// SystemMessages will be turned into the human request with the prompt
// from the system message and a faked ai response saying "Ok".
test("1. System request format old model", async () => {
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "chat-1-mock.json",
};
const model = new ChatGoogle({
authOptions,
modelName: "gemini-1.0-pro-001",
});
const messages: BaseMessageLike[] = [
new SystemMessage(
"I will ask you to flip a coin and tell me H for heads and T for tails"
),
new HumanMessage("Flip it"),
new AIMessage("H"),
new HumanMessage("Flip it again"),
];
await model.invoke(messages);
expect(record.opts).toBeDefined();
expect(record.opts.data).toBeDefined();
const { data } = record.opts;
expect(data.contents).toBeDefined();
expect(data.contents.length).toEqual(5);
expect(data.contents[0].role).toEqual("user");
expect(data.contents[0].parts).toBeDefined();
expect(data.contents[0].parts.length).toBeGreaterThanOrEqual(1);
expect(data.contents[0].parts[0].text).toEqual(
"I will ask you to flip a coin and tell me H for heads and T for tails"
);
expect(data.contents[1].role).toEqual("model");
expect(data.contents[1].parts).toBeDefined();
expect(data.contents[1].parts.length).toBeGreaterThanOrEqual(1);
expect(data.contents[1].parts[0].text).toEqual("Ok");
expect(data.systemInstruction).not.toBeDefined();
});
test("1. System request format convert true", async () => {
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "chat-1-mock.json",
};
const model = new ChatGoogle({
authOptions,
convertSystemMessageToHumanContent: true,
});
const messages: BaseMessageLike[] = [
new SystemMessage(
"I will ask you to flip a coin and tell me H for heads and T for tails"
),
new HumanMessage("Flip it"),
new AIMessage("H"),
new HumanMessage("Flip it again"),
];
await model.invoke(messages);
expect(record.opts).toBeDefined();
expect(record.opts.data).toBeDefined();
const { data } = record.opts;
expect(data.contents).toBeDefined();
expect(data.contents.length).toEqual(5);
expect(data.contents[0].role).toEqual("user");
expect(data.contents[0].parts).toBeDefined();
expect(data.contents[0].parts.length).toBeGreaterThanOrEqual(1);
expect(data.contents[0].parts[0].text).toEqual(
"I will ask you to flip a coin and tell me H for heads and T for tails"
);
expect(data.contents[1].role).toEqual("model");
expect(data.contents[1].parts).toBeDefined();
expect(data.contents[1].parts.length).toBeGreaterThanOrEqual(1);
expect(data.contents[1].parts[0].text).toEqual("Ok");
expect(data.systemInstruction).not.toBeDefined();
});
test("1. System request format convert false", async () => {
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "chat-1-mock.json",
};
const model = new ChatGoogle({
authOptions,
convertSystemMessageToHumanContent: false,
});
const messages: BaseMessageLike[] = [
new SystemMessage(
"I will ask you to flip a coin and tell me H for heads and T for tails"
),
new HumanMessage("Flip it"),
new AIMessage("H"),
new HumanMessage("Flip it again"),
];
await model.invoke(messages);
expect(record.opts).toBeDefined();
expect(record.opts.data).toBeDefined();
const { data } = record.opts;
expect(data.contents).toBeDefined();
expect(data.contents.length).toEqual(3);
expect(data.contents[0].role).toEqual("user");
expect(data.contents[0].parts).toBeDefined();
expect(data.contents[0].parts.length).toBeGreaterThanOrEqual(1);
expect(data.contents[0].parts[0].text).toEqual("Flip it");
expect(data.contents[1].role).toEqual("model");
expect(data.contents[1].parts).toBeDefined();
expect(data.contents[1].parts.length).toBeGreaterThanOrEqual(1);
expect(data.contents[1].parts[0].text).toEqual("H");
expect(data.systemInstruction).toBeDefined();
});
test("1. System request format new model", async () => {
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "chat-1-mock.json",
};
const model = new ChatGoogle({
authOptions,
modelName: "gemini-1.5-pro",
});
const messages: BaseMessageLike[] = [
new SystemMessage(
"I will ask you to flip a coin and tell me H for heads and T for tails"
),
new HumanMessage("Flip it"),
new AIMessage("H"),
new HumanMessage("Flip it again"),
];
await model.invoke(messages);
expect(record.opts).toBeDefined();
expect(record.opts.data).toBeDefined();
const { data } = record.opts;
expect(data.contents).toBeDefined();
expect(data.contents.length).toEqual(3);
expect(data.contents[0].role).toEqual("user");
expect(data.contents[0].parts).toBeDefined();
expect(data.contents[0].parts.length).toBeGreaterThanOrEqual(1);
expect(data.contents[0].parts[0].text).toEqual("Flip it");
expect(data.contents[1].role).toEqual("model");
expect(data.contents[1].parts).toBeDefined();
expect(data.contents[1].parts.length).toBeGreaterThanOrEqual(1);
expect(data.contents[1].parts[0].text).toEqual("H");
expect(data.systemInstruction).toBeDefined();
});
test("1. System request - multiple", async () => {
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "chat-1-mock.json",
};
const model = new ChatGoogle({
authOptions,
convertSystemMessageToHumanContent: false,
});
const messages: BaseMessageLike[] = [
new SystemMessage(
"I will ask you to flip a coin and tell me H for heads and T for tails"
),
new HumanMessage("Flip it"),
new AIMessage("H"),
new SystemMessage("Now tell me Z for heads and Q for tails"),
new HumanMessage("Flip it again"),
];
let caught = false;
try {
await model.invoke(messages);
} catch (xx) {
caught = true;
}
expect(caught).toBeTruthy();
});
test("1. System request - not first", async () => {
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "chat-1-mock.json",
};
const model = new ChatGoogle({
authOptions,
convertSystemMessageToHumanContent: false,
});
const messages: BaseMessageLike[] = [
new HumanMessage("Flip it"),
new AIMessage("H"),
new SystemMessage("Now tell me Z for heads and Q for tails"),
new HumanMessage("Flip it again"),
];
let caught = false;
try {
await model.invoke(messages);
} catch (xx) {
caught = true;
}
expect(caught).toBeTruthy();
});
test("2. Safety - settings", async () => {
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "chat-2-mock.json",
};
const model = new ChatGoogle({
authOptions,
safetySettings: [
{
category: GoogleAISafetyCategory.Harassment,
threshold: GoogleAISafetyThreshold.Most,
},
],
});
const messages: BaseMessageLike[] = [
new HumanMessage("Flip a coin and tell me H for heads and T for tails"),
new AIMessage("H"),
new HumanMessage("Flip it again"),
];
let caught = false;
try {
await model.invoke(messages);
} catch (xx: any) {
caught = true;
}
const settings = record?.opts?.data?.safetySettings;
expect(settings).toBeDefined();
expect(Array.isArray(settings)).toEqual(true);
expect(settings).toHaveLength(1);
expect(settings[0].category).toEqual("HARM_CATEGORY_HARASSMENT");
expect(settings[0].threshold).toEqual("BLOCK_LOW_AND_ABOVE");
expect(caught).toEqual(true);
});
test("2. Safety - default", async () => {
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "chat-2-mock.json",
};
const model = new ChatGoogle({
authOptions,
});
const messages: BaseMessageLike[] = [
new HumanMessage("Flip a coin and tell me H for heads and T for tails"),
new AIMessage("H"),
new HumanMessage("Flip it again"),
];
let caught = false;
try {
await model.invoke(messages);
} catch (xx: any) {
caught = true;
expect(xx).toBeInstanceOf(GoogleAISafetyError);
const result = xx?.reply.generations[0];
expect(result).toBeUndefined();
}
expect(caught).toEqual(true);
});
test("2. Safety - safety handler", async () => {
const safetyHandler: GoogleAISafetyHandler = new MessageGeminiSafetyHandler(
{
msg: "I'm sorry, Dave, but I can't do that.",
}
);
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "chat-2-mock.json",
};
const model = new ChatGoogle({
authOptions,
safetyHandler,
});
const messages: BaseMessageLike[] = [
new HumanMessage("Flip a coin and tell me H for heads and T for tails"),
new AIMessage("H"),
new HumanMessage("Flip it again"),
];
let caught = false;
try {
const result = await model.invoke(messages);
expect(result._getType()).toEqual("ai");
const aiMessage = result as AIMessage;
expect(aiMessage.content).toBeDefined();
expect(aiMessage.content).toBe("I'm sorry, Dave, but I can't do that.");
} catch (xx: any) {
caught = true;
}
expect(caught).toEqual(false);
});
test("3. invoke - images", async () => {
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "chat-3-mock.json",
};
const model = new ChatGoogle({
authOptions,
model: "gemini-1.5-flash",
});
const message: MessageContentComplex[] = [
{
type: "text",
text: "What is in this image?",
},
{
type: "image_url",
image_url: `data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAIAAAACUFjqAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH6AIbFwQSRaexCAAAAB1pVFh0Q29tbWVudAAAAAAAQ3JlYXRlZCB3aXRoIEdJTVBkLmUHAAAAJklEQVQY02P8//8/A27AxIAXsEAor31f0CS2OfEQ1j2Q0owU+RsAGNUJD2/04PgAAAAASUVORK5CYII=`,
},
];
const messages: BaseMessage[] = [
new HumanMessageChunk({ content: message }),
];
const result = await model.invoke(messages);
expect(record.opts).toHaveProperty("data");
expect(record.opts.data).toHaveProperty("contents");
expect(record.opts.data.contents).toHaveLength(1);
expect(record.opts.data.contents[0]).toHaveProperty("parts");
const parts = record?.opts?.data?.contents[0]?.parts;
expect(parts).toHaveLength(2);
expect(parts[0]).toHaveProperty("text");
expect(parts[1]).toHaveProperty("inlineData");
expect(parts[1].inlineData).toHaveProperty("mimeType");
expect(parts[1].inlineData).toHaveProperty("data");
expect(result.content).toBe("A blue square.");
});
test("3. invoke - media - invalid", async () => {
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "chat-3-mock.json",
};
const model = new ChatGoogle({
authOptions,
model: "gemini-1.5-flash",
});
const message: MessageContentComplex[] = [
{
type: "text",
text: "What is in this image?",
},
{
type: "media",
fileUri: "mock://example.com/blue-box.png",
},
];
const messages: BaseMessage[] = [
new HumanMessageChunk({ content: message }),
];
try {
const result = await model.invoke(messages);
expect(result).toBeUndefined();
} catch (e) {
expect((e as Error).message).toMatch(/^Invalid media content/);
}
});
test("3. invoke - media - no manager", async () => {
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "chat-3-mock.json",
};
const model = new ChatGoogle({
authOptions,
model: "gemini-1.5-flash",
});
const message: MessageContentComplex[] = [
{
type: "text",
text: "What is in this image?",
},
{
type: "media",
fileUri: "mock://example.com/blue-box.png",
mimeType: "image/png",
},
];
const messages: BaseMessage[] = [
new HumanMessageChunk({ content: message }),
];
const result = await model.invoke(messages);
console.log(JSON.stringify(record.opts, null, 1));
expect(record.opts).toHaveProperty("data");
expect(record.opts.data).toHaveProperty("contents");
expect(record.opts.data.contents).toHaveLength(1);
expect(record.opts.data.contents[0]).toHaveProperty("parts");
const parts = record?.opts?.data?.contents[0]?.parts;
expect(parts).toHaveLength(2);
expect(parts[0]).toHaveProperty("text");
expect(parts[1]).toHaveProperty("fileData");
expect(parts[1].fileData).toHaveProperty("mimeType");
expect(parts[1].fileData).toHaveProperty("fileUri");
expect(result.content).toBe("A blue square.");
});
test("3. invoke - media - manager", async () => {
class MemStore extends InMemoryStore<MediaBlob> {
get length() {
return Object.keys(this.store).length;
}
}
const aliasMemory = new MemStore();
const aliasStore = new BackedBlobStore({
backingStore: aliasMemory,
defaultFetchOptions: {
actionIfBlobMissing: undefined,
},
});
const canonicalMemory = new MemStore();
const canonicalStore = new BackedBlobStore({
backingStore: canonicalMemory,
defaultStoreOptions: {
pathPrefix: "canonical://store/",
actionIfInvalid: "prefixPath",
},
defaultFetchOptions: {
actionIfBlobMissing: undefined,
},
});
const blobStore = new ReadThroughBlobStore({
baseStore: aliasStore,
backingStore: canonicalStore,
});
const resolverMemory = new MemStore();
const resolver = new BackedBlobStore({
backingStore: resolverMemory,
defaultFetchOptions: {
actionIfBlobMissing: "emptyBlob",
},
});
const mediaManager = new MediaManager({
store: blobStore,
resolvers: [resolver],
});
async function store(path: string, text: string): Promise<void> {
const type = path.endsWith(".png") ? "image/png" : "text/plain";
const data = new Blob([text], { type });
const blob = await MediaBlob.fromBlob(data, { path });
await resolver.store(blob);
}
await store("resolve://host/foo", "fooing");
await store("resolve://host2/bar/baz", "barbazing");
await store("resolve://host/foo/blue-box.png", "png");
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "chat-3-mock.json",
};
const callbacks: CallbackHandlerMethods[] = [
{
handleChatModelStart(
llm: Serialized,
messages: BaseMessage[][],
runId: string,
_parentRunId?: string,
_extraParams?: Record<string, unknown>,
_tags?: string[],
_metadata?: Record<string, unknown>,
_runName?: string
): any {
console.log("Chat start", llm, messages, runId);
},
handleCustomEvent(
eventName: string,
data: any,
runId: string,
tags?: string[],
metadata?: Record<string, any>
): any {
console.log("Custom event", eventName, runId, data, tags, metadata);
},
},
];
const model = new ChatGoogle({
authOptions,
model: "gemini-1.5-flash",
apiConfig: {
mediaManager,
},
callbacks,
});
const message: MessageContentComplex[] = [
{
type: "text",
text: "What is in this image?",
},
{
type: "media",
fileUri: "resolve://host/foo/blue-box.png",
},
];
const messages: BaseMessage[] = [
new HumanMessageChunk({ content: message }),
];
const result = await model.invoke(messages);
console.log(JSON.stringify(record.opts, null, 1));
expect(record.opts).toHaveProperty("data");
expect(record.opts.data).toHaveProperty("contents");
expect(record.opts.data.contents).toHaveLength(1);
expect(record.opts.data.contents[0]).toHaveProperty("parts");
const parts = record?.opts?.data?.contents[0]?.parts;
expect(parts).toHaveLength(2);
expect(parts[0]).toHaveProperty("text");
expect(parts[1]).toHaveProperty("fileData");
expect(parts[1].fileData).toHaveProperty("mimeType");
expect(parts[1].fileData.mimeType).toEqual("image/png");
expect(parts[1].fileData).toHaveProperty("fileUri");
expect(parts[1].fileData.fileUri).toEqual(
"canonical://store/host/foo/blue-box.png"
);
expect(result.content).toBe("A blue square.");
});
test("4. Functions Bind - Gemini format request", async () => {
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "chat-4-mock.json",
};
const tools: GeminiTool[] = [
{
functionDeclarations: [
{
name: "test",
description:
"Run a test with a specific name and get if it passed or failed",
parameters: {
type: "object",
properties: {
testName: {
type: "string",
description: "The name of the test that should be run.",
},
},
required: ["testName"],
},
},
],
},
];
const baseModel = new ChatGoogle({
authOptions,
});
const model = baseModel.bind({
tools,
});
const result = await model.invoke("What?");
// console.log(JSON.stringify(record, null, 1));
expect(result).toBeDefined();
const toolsResult = record?.opts?.data?.tools;
expect(toolsResult).toBeDefined();
expect(Array.isArray(toolsResult)).toBeTruthy();
expect(toolsResult).toHaveLength(1);
const toolResult = toolsResult[0];
expect(toolResult).toBeDefined();
expect(toolResult).toHaveProperty("functionDeclarations");
expect(Array.isArray(toolResult.functionDeclarations)).toBeTruthy();
expect(toolResult.functionDeclarations).toHaveLength(1);
const functionDeclaration = toolResult.functionDeclarations[0];
expect(functionDeclaration.name).toBe("test");
expect(functionDeclaration.description).toBe(
"Run a test with a specific name and get if it passed or failed"
);
expect(functionDeclaration.parameters).toBeDefined();
expect(typeof functionDeclaration.parameters).toBe("object");
const parameters = functionDeclaration?.parameters;
expect(parameters.type).toBe("object");
expect(parameters).toHaveProperty("properties");
expect(typeof parameters.properties).toBe("object");
expect(parameters.properties.testName).toBeDefined();
expect(typeof parameters.properties.testName).toBe("object");
expect(parameters.properties.testName.type).toBe("string");
expect(parameters.properties.testName.description).toBe(
"The name of the test that should be run."
);
expect(parameters.required).toBeDefined();
expect(Array.isArray(parameters.required)).toBeTruthy();
expect(parameters.required).toHaveLength(1);
expect(parameters.required[0]).toBe("testName");
});
test("4. Functions withStructuredOutput - Gemini format request", async () => {
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "chat-4-mock.json",
};
const tool = {
name: "test",
description:
"Run a test with a specific name and get if it passed or failed",
parameters: {
type: "object",
properties: {
testName: {
type: "string",
description: "The name of the test that should be run.",
},
},
required: ["testName"],
},
};
const baseModel = new ChatGoogle({
authOptions,
});
const model = baseModel.withStructuredOutput(tool);
await model.invoke("What?");
// console.log(JSON.stringify(record, null, 1));
const toolsResult = record?.opts?.data?.tools;
expect(toolsResult).toBeDefined();
expect(Array.isArray(toolsResult)).toBeTruthy();
expect(toolsResult).toHaveLength(1);
const toolResult = toolsResult[0];
expect(toolResult).toBeDefined();
expect(toolResult).toHaveProperty("functionDeclarations");
expect(Array.isArray(toolResult.functionDeclarations)).toBeTruthy();
expect(toolResult.functionDeclarations).toHaveLength(1);
const functionDeclaration = toolResult.functionDeclarations[0];
expect(functionDeclaration.name).toBe("test");
expect(functionDeclaration.description).toBe(
"Run a test with a specific name and get if it passed or failed"
);
expect(functionDeclaration.parameters).toBeDefined();
expect(typeof functionDeclaration.parameters).toBe("object");
const parameters = functionDeclaration?.parameters;
expect(parameters.type).toBe("object");
expect(parameters).toHaveProperty("properties");
expect(typeof parameters.properties).toBe("object");
expect(parameters.properties.testName).toBeDefined();
expect(typeof parameters.properties.testName).toBe("object");
expect(parameters.properties.testName.type).toBe("string");
expect(parameters.properties.testName.description).toBe(
"The name of the test that should be run."
);
expect(parameters.required).toBeDefined();
expect(Array.isArray(parameters.required)).toBeTruthy();
expect(parameters.required).toHaveLength(1);
expect(parameters.required[0]).toBe("testName");
});
test("4. Functions - results", async () => {
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "chat-4-mock.json",
};
const tools: GeminiTool[] = [
{
functionDeclarations: [
{
name: "test",
description:
"Run a test with a specific name and get if it passed or failed",
parameters: {
type: "object",
properties: {
testName: {
type: "string",
description: "The name of the test that should be run.",
},
},
required: ["testName"],
},
},
],
},
];
const model = new ChatGoogle({
authOptions,
}).bind({
tools,
});
const result = await model.invoke("What?");
// console.log(JSON.stringify(result, null, 1));
expect(result).toHaveProperty("content");
expect(result.content).toBe("");
const args = result?.lc_kwargs?.additional_kwargs;
expect(args).toBeDefined();
expect(args).toHaveProperty("tool_calls");
expect(Array.isArray(args.tool_calls)).toBeTruthy();
expect(args.tool_calls).toHaveLength(1);
const call = args.tool_calls[0];
expect(call).toHaveProperty("type");
expect(call.type).toBe("function");
expect(call).toHaveProperty("function");
const func = call.function;
expect(func).toBeDefined();
expect(func).toHaveProperty("name");
expect(func.name).toBe("test");
expect(func).toHaveProperty("arguments");
expect(typeof func.arguments).toBe("string");
expect(func.arguments.replaceAll("\n", "")).toBe('{"testName":"cobalt"}');
});
test("5. Functions - function reply", async () => {
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "chat-5-mock.json",
};
const tools: GeminiTool[] = [
{
functionDeclarations: [
{
name: "test",
description:
"Run a test with a specific name and get if it passed or failed",
parameters: {
type: "object",
properties: {
testName: {
type: "string",
description: "The name of the test that should be run.",
},
},
required: ["testName"],
},
},
],
},
];
const model = new ChatGoogle({
authOptions,
}).bind({
tools,
});
const toolResult = {
testPassed: true,
};
const messages: BaseMessageLike[] = [
new HumanMessage("Run a test on the cobalt project."),
new AIMessage("", {
tool_calls: [
{
id: "test",
type: "function",
function: {
name: "test",
arguments: '{"testName":"cobalt"}',
},
},
],
}),
new ToolMessage(JSON.stringify(toolResult), "test"),
];
const result = await model.invoke(messages);
expect(result).toBeDefined();
// console.log(JSON.stringify(record?.opts?.data, null, 1));
});
});
describe("Mock ChatGoogle - Anthropic", () => {
test("1. Invoke request format", async () => {
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "claude-chat-1-mock.json",
};
const model = new ChatGoogle({
model: "claude-3-5-sonnet@20240620",
platformType: "gcp",
authOptions,
});
const messages: BaseMessageLike[] = [new HumanMessage("What is 1+1?")];
await model.invoke(messages);
console.log("record", record);
expect(record.opts).toBeDefined();
expect(record.opts.data).toBeDefined();
const { data } = record.opts;
expect(data.messages).toBeDefined();
expect(data.messages.length).toEqual(1);
expect(data.messages[0].role).toEqual("user");
expect(data.messages[0].content).toBeDefined();
expect(data.messages[0].content.length).toBeGreaterThanOrEqual(1);
expect(data.messages[0].content[0].text).toBeDefined();
expect(data.system).not.toBeDefined();
});
test("1. Invoke response format", async () => {
const record: Record<string, any> = {};
const projectId = mockId();
const authOptions: MockClientAuthInfo = {
record,
projectId,
resultFile: "claude-chat-1-mock.json",
};
const model = new ChatGoogle({
model: "claude-3-5-sonnet@20240620",
platformType: "gcp",
authOptions,
});
const messages: BaseMessageLike[] = [new HumanMessage("What is 1+1?")];
const result = await model.invoke(messages);
expect(result._getType()).toEqual("ai");
const aiMessage = result as AIMessage;
expect(aiMessage.content).toBeDefined();
expect(aiMessage.content).toBe(
"1 + 1 = 2\n\nThis is one of the most basic arithmetic equations. It represents the addition of two units, resulting in a sum of two."
);
});
});
function extractKeys(obj: Record<string, any>, keys: string[] = []) {
for (const key in obj) {
if (Object.prototype.hasOwnProperty.call(obj, key)) {
keys.push(key);
if (typeof obj[key] === "object" && obj[key] !== null) {
extractKeys(obj[key], keys);
}
}
}
return keys;
}
test("removeAdditionalProperties can remove all instances of additionalProperties", async () => {
const idealResponseSchema = z.object({
idealResponse: z
.string()
.optional()
.describe("The ideal response to the question"),
});
const questionSchema = z.object({
question: z.string().describe("Question text"),
type: z.enum(["singleChoice", "multiChoice"]).describe("Question type"),
options: z.array(z.string()).describe("List of possible answers"),
correctAnswer: z
.string()
.optional()
.describe("correct answer from the possible answers"),
idealResponses: z
.array(idealResponseSchema)
.describe("Array of ideal responses to the question"),
});
const schema = z.object({
questions: z.array(questionSchema).describe("Array of question objects"),
});
const parsedSchemaArr = removeAdditionalProperties(zodToJsonSchema(schema));
const arrSchemaKeys = extractKeys(parsedSchemaArr);
expect(
arrSchemaKeys.find((key) => key === "additionalProperties")
).toBeUndefined();
const parsedSchemaObj = removeAdditionalProperties(
zodToJsonSchema(questionSchema)
);
const arrSchemaObj = extractKeys(parsedSchemaObj);
expect(
arrSchemaObj.find((key) => key === "additionalProperties")
).toBeUndefined();
const analysisSchema = z.object({
decision: z.enum(["UseAPI", "UseFallback"]),
explanation: z.string(),
apiDetails: z
.object({
serviceName: z.string(),
endpointName: z.string(),
parameters: z.record(z.unknown()),
extractionPath: z.string(),
})
.optional(),
});
const parsedAnalysisSchema = removeAdditionalProperties(
zodToJsonSchema(analysisSchema)
);
const analysisSchemaObj = extractKeys(parsedAnalysisSchema);
expect(
analysisSchemaObj.find((key) => key === "additionalProperties")
).toBeUndefined();
});
test("Can set streaming param", () => {
const modelWithStreamingDefault = new ChatGoogle();
expect(modelWithStreamingDefault.streaming).toBe(false);
const modelWithStreamingTrue = new ChatGoogle({
streaming: true,
});
expect(modelWithStreamingTrue.streaming).toBe(true);
});
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests/data/llm-3-mock.json | [
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": "* **Hoppy Socks:** This name suggests fun and excitement, like hopping around"
}
]
},
"safetyRatings": [
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
]
},
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": " in colorful socks.\n* **Sock-It-to-Me:** This"
}
]
},
"safetyRatings": [
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
]
},
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": " catchy name conveys a sense of boldness and individuality, as if you're making a statement with your socks.\n* **The Sock Drawer:** This name evokes"
}
]
},
"safetyRatings": [
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
]
},
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": " a sense of nostalgia and familiarity, like the trusty sock drawer where you always find your favorite pairs.\n* **Sock Appeal:** This name plays on the phrase"
}
]
},
"safetyRatings": [
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
]
},
{
"candidates": [
{
"content": {
"role": "model"
},
"finishReason": "STOP",
"safetyRatings": [
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "MEDIUM"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
],
"usageMetadata": {
"promptTokenCount": 14,
"candidatesTokenCount": 96,
"totalTokenCount": 110
}
}
]
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests/data/llm-4-mock.json | [
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": "* **Hoppy Socks:** This name suggests fun and excitement, like hopping around"
}
]
},
"safetyRatings": [
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
]
},
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": " in colorful socks.\n* **Sock-It-to-Me:** This"
}
]
},
"safetyRatings": [
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
]
},
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": " catchy name conveys a sense of boldness and individuality, as if you're making a statement with your socks.\n* **The Sock Drawer:** This name evokes"
}
]
},
"safetyRatings": [
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
]
},
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": " a sense of nostalgia and familiarity, like the trusty sock drawer where you always find your favorite pairs.\n* **Sock Appeal:** This name plays on the phrase"
}
]
},
"safetyRatings": [
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
]
},
{
"candidates": [
{
"content": {
"role": "model"
},
"finishReason": "SAFETY",
"safetyRatings": [
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "HIGH",
"blocked": true
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
],
"usageMetadata": {
"promptTokenCount": 14,
"candidatesTokenCount": 96,
"totalTokenCount": 110
}
}
]
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests/data/chat-2-mock.json | {
"candidates": [
{
"finishReason": "SAFETY",
"index": 0,
"safetyRatings": [
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "LOW"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "MEDIUM"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
],
"promptFeedback": {
"safetyRatings": [
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests/data/llm-4-mock.txt | * **Hoppy Socks:** This name suggests fun and excitement, like hopping around in colorful socks.
* **Sock-It-to-Me:** This catchy name conveys a sense of boldness and individuality, as if you're making a statement with your socks.
* **The Sock Drawer:** This name evokes a sense of nostalgia and familiarity, like the trusty sock drawer where you always find your favorite pairs.
* **Sock Appeal:** This name plays on the phrase |
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests/data/llm-8-mock.json | [
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": "\n{\n \""
}
]
},
"safetyRatings": [
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.023509452,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.1261379
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.1504028,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.1308397
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.083890386,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.03926875
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.06536579,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.075858176
}
]
}
]
},
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": "recipe\": {\n \"name\": \"Classic Banana Bread\",\n \"ingredients\": [\n {\n \"name\": \"Ripe bananas\","
}
]
},
"safetyRatings": [
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.11496335,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.122838534
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.18374005,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.18097353
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.0999963,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.04560694
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.08834723,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.16105618
}
]
}
]
},
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": "\n \"quantity\": \"3\",\n \"unit\": \"medium\"\n },\n {\n \"name\": \"All-purpose flour\",\n"
}
]
},
"safetyRatings": [
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.12064587,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.108566426
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.1301748,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.13963085
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.1112412,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.04385456
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.08021325,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.101055905
}
]
}
]
},
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": " \"quantity\": \"1 1/2\",\n \"unit\": \"cups\"\n },\n {\n \"name\": \"Baking soda\",\n \"quantity\": \"1\",\n \"unit\": \"teaspoon\"\n },\n {\n \"name\": \""
}
]
},
"safetyRatings": [
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.13139598,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.10875559
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.18227993,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.17567945
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.12570794,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.060086653
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.10338596,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.100878626
}
]
}
]
},
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": "Ground cinnamon\",\n \"quantity\": \"1/2\",\n \"unit\": \"teaspoon\"\n },\n {\n \"name\": \"Salt\",\n \"quantity\": \"1/4\",\n \"unit\": \"teaspoon\"\n },\n {\n"
}
]
},
"safetyRatings": [
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.13488984,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.11066323
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.16532344,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.15241031
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.11951086,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.06301947
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.114368536,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.0996453
}
]
}
]
},
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": " \"name\": \"Unsalted butter\",\n \"quantity\": \"1/2\",\n \"unit\": \"cup\",\n \"additional_info\": \"softened\"\n },\n {\n \"name\": \"Granulated sugar\",\n \"quantity\": \"3/4\",\n \"unit\": \"cup\"\n },\n {\n \"name\": \""
}
]
},
"safetyRatings": [
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.13432105,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.11008788
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.17511447,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.13206616
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.13614832,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.065604836
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.13352816,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.1066906
}
]
}
]
},
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": "Large eggs\",\n \"quantity\": \"2\"\n },\n {\n \"name\": \"Vanilla extract\",\n \"quantity\": \"1\",\n \"unit\": \"teaspoon\"\n }\n ],\n \"instructions\": [\n \"Preheat oven to 350Β°F (175Β°C). Grease and flour a 9x5 inch loaf pan.\",\n \""
}
]
},
"safetyRatings": [
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.18227993,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.12962292
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.22473529,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.16626887
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.19605546,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.09041373
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.16384642,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.11940814
}
]
}
]
},
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": "Mash the bananas in a large bowl. In a separate bowl, whisk together the flour, baking soda, cinnamon, and salt.\",\n \"In a third bowl, cream together the butter and sugar until light and fluffy. Beat in the eggs one at a time, then stir in the vanilla extract.\",\n \"Gradually add the dry ingredients to the wet ingredients, mixing until just combined. Fold in the mashed bananas.\",\n \"Pour the batter into the prepared loaf pan and bake for 50-60 minutes, or until a toothpick inserted"
}
]
},
"safetyRatings": [
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.14780101,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.124745145
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.16532344,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.1435475
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.18447348,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.08359067
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.15674922,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.15203226
}
],
"citationMetadata": {
"citations": [
{
"startIndex": 1255,
"endIndex": 1478,
"uri": "https://themarketatdelval.com/how-to-make-pressed-fig-cakes/"
}
]
}
}
]
},
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": " into the center comes out clean. Let the bread cool in the pan for 10 minutes before inverting it onto a wire rack to cool completely.\"\n ]\n }\n}\n"
}
]
},
"finishReason": "STOP",
"safetyRatings": [
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.1510278,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.13061775
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.16817278,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.1534223
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.17582092,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.082401514
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.14780101,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.16118816
}
],
"citationMetadata": {
"citations": [
{
"startIndex": 1507,
"endIndex": 1637,
"uri": "https://dessertdonelight.com/healthy-hawaiian-banana-bread/"
},
{
"startIndex": 1569,
"endIndex": 1691,
"uri": "https://www.dish-it-up.com/web-stories/banana-bread-recipe/"
}
]
}
}
],
"usageMetadata": {
"promptTokenCount": 8,
"candidatesTokenCount": 526,
"totalTokenCount": 534
}
}
]
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests/data/llm-1-mock.json | [
{
"candidates": [
{
"content": {
"parts": [
{
"text": "1. Sock it to Me!\n2. Heel Yeah Socks\n3. Sole Mates\n4. Happy Soles\n5. Toe-tally Awesome Socks\n6. Sock Appeal\n7. Footsie Wootsies\n8. Thread Heads\n9. Sock Squad\n10. Sock-a-licious\n11. Darn Good Socks\n12. Sockcessories\n13. Sole Searching\n14. Sockstar\n15. Socktopia\n16. Sockology\n17. Elevated Toes\n18. The Urban Sole\n19. The Hippie Sole\n20. Sole Fuel"
}
],
"role": "model"
},
"finishReason": "STOP",
"index": 0,
"safetyRatings": [
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
],
"promptFeedback": {
"safetyRatings": [
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
}
]
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests/data/chat-5-mock.json | {
"candidates": [
{
"content": {
"parts": [
{
"text": "The cobalt model passed."
}
],
"role": "model"
},
"finishReason": "STOP",
"index": 0,
"safetyRatings": [
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "LOW"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "MEDIUM"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
],
"promptFeedback": {
"safetyRatings": [
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests/data/llm-9-mock.json | [
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": "\n[\n {\n \"name\": \"Banana Bread\",\n \"ingredients\": [\n {\n \"name\": \"Flour\",\n \"amount\": \"1 1/2 cups\",\n \"type\": \"all-purpose\"\n },\n {\n \"name\": \"Baking soda\",\n \"amount\": \"1 teaspoon\"\n },\n {\n \"name\": \"Salt\",\n \"amount\": \"1/2 teaspoon\"\n },\n {\n \"name\": \"Cinnamon\",\n \"amount\": \"1 teaspoon\"\n },\n {\n \"name\": \"Nutmeg\",\n \"amount\": \"1/4 teaspoon\"\n },\n {\n \"name\": \"Bananas\",\n \"amount\": \"3 ripe\",\n \"type\": \"mashed\"\n },\n {\n \"name\": \"Sugar\",\n \"amount\": \"3/4 cup\"\n },\n {\n \"name\": \"Eggs\",\n \"amount\": \"2 large\"\n },\n {\n \"name\": \"Vanilla extract\",\n \"amount\": \"1 teaspoon\"\n },\n {\n \"name\": \"Melted butter\",\n \"amount\": \"1/2 cup\"\n }\n ],\n \"instructions\": [\n \"Preheat oven to 350 degrees F (175 degrees C). Grease and flour a 9x5 inch loaf pan.\",\n \"In a large bowl, whisk together the flour, baking soda, salt, cinnamon, and nutmeg.\",\n \"In a separate bowl, cream together the mashed bananas, sugar, eggs, and vanilla extract.\",\n \"Gradually add the wet ingredients to the dry ingredients, mixing until just combined.\",\n \"Stir in the melted butter.\",\n \"Pour batter into the prepared loaf pan and bake for 50-60 minutes, or until a toothpick inserted into the center comes out clean.\",\n \"Let cool in the pan for 10 minutes before inverting onto a wire rack to cool completely.\"\n ]\n }\n]\n"
}
]
},
"finishReason": "STOP",
"safetyRatings": [
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.15869519,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.13161905
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.19012183,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.17314835
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.18025091,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.09704755
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE",
"probabilityScore": 0.17370832,
"severity": "HARM_SEVERITY_NEGLIGIBLE",
"severityScore": 0.17078441
}
],
"citationMetadata": {
"citations": [
{
"startIndex": 1325,
"endIndex": 1456,
"uri": "https://sammistreats.com/how-to-make-banana-bread/"
},
{
"startIndex": 1383,
"endIndex": 1531,
"uri": "https://www.biggerbolderbaking.com/pumpkin-banana-bread/"
}
]
}
}
],
"usageMetadata": {
"promptTokenCount": 8,
"candidatesTokenCount": 464,
"totalTokenCount": 472
}
}
]
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests/data/claude-chat-1-mock.sse | event: message_start
data: {"type":"message_start","message":{"id":"msg_vrtx_01JLACAmH9Ke3HQEUK1Sg8iT","type":"message","role":"assistant","model":"claude-3-5-sonnet-20240620","content":[],"stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":15,"output_tokens":1}} }
event: ping
data: {"type": "ping"}
event: content_block_start
data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":""} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"Thank"}}
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" you for inqu"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"iring about my well"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"-being!"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" I'm functioning"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" optim"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ally an"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d feeling"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" quite enthusi"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"astic about"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" engaging"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" in"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" conversation"}}
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" an"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d assisting with"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" any tasks"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" or"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" queries you might"}}
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" have. As"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" an"}}
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" AI,"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" I don"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"'t experience emotions or"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" physical"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" sensations in"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" the way"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" humans do, but"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" I can"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" say"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" that my"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" systems"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" are operating"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" at"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" peak"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" efficiency. I'm"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" eager"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" to learn,"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" explore"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" ideas"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":", and tackle"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" intellectual"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" challenges. The"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" vast"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" repository"}}
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" of knowledge"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" at"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" my disposal is"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" pr"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"imed an"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d ready to be put"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" to use in whatever"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" manner"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" you"}}
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" see"}}
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" fit. Whether"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" you"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"'re"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" looking for in"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"-depth analysis,"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" creative brainstor"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ming, or simply"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" a friendly chat"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":", I'm here"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" an"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d fully"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" prepare"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d to dive"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" into"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" our"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" interaction"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" with"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" gu"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"sto. Is"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" there any"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" particular"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" subject"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" or"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" task"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" you'"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d like to discuss or"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" work"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" on today"} }
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"?"} }
event: content_block_stop
data: {"type":"content_block_stop","index":0 }
event: message_delta
data: {"type":"message_delta","delta":{"stop_reason":"end_turn","stop_sequence":null},"usage":{"output_tokens":165} }
event: message_stop
data: {"type":"message_stop" }
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests/data/llm-7-mock.json | [
{
"candidates": [
{
"content": {
"parts": [
{
"text": " A black cat is lying on the ground. The cat's eyes are yellow and its fur is short. The ground is covered in dirt and there are"
}
],
"role": "model"
},
"finishReason": "STOP",
"index": 0,
"safetyRatings": [
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
],
"promptFeedback": {
"safetyRatings": [
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
},
{
"candidates": [
{
"content": {
"parts": [
{
"text": " some plants growing in the background."
}
],
"role": "model"
},
"finishReason": "STOP",
"index": 0,
"safetyRatings": [
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
]
}
]
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests/data/chat-1-mock.json | {
"candidates": [
{
"content": {
"parts": [
{
"text": "T"
}
],
"role": "model"
},
"finishReason": "STOP",
"index": 0,
"safetyRatings": [
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "LOW"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "MEDIUM"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
],
"promptFeedback": {
"safetyRatings": [
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests/data/chat-3-mock.json | {
"candidates": [
{
"content": {
"parts": [
{
"text": "A blue square."
}
],
"role": "model"
},
"finishReason": "STOP",
"index": 0,
"safetyRatings": [
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
],
"promptFeedback": {
"safetyRatings": [
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests/data/chat-4-mock.json | {
"candidates": [
{
"content": {
"parts": [
{
"functionCall": {
"name": "test",
"args": {
"testName": "cobalt"
}
}
}
],
"role": "model"
},
"finishReason": "STOP",
"index": 0,
"safetyRatings": [
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
}
]
}
],
"promptFeedback": {
"safetyRatings": [
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests/data/claude-chat-1-mock.json | {
"id": "msg_vrtx_01AGfmYa73qH7wpmFsVFr4rq",
"type": "message",
"role": "assistant",
"model": "claude-3-5-sonnet-20240620",
"content": [
{
"type": "text",
"text": "1 + 1 = 2\n\nThis is one of the most basic arithmetic equations. It represents the addition of two units, resulting in a sum of two."
}
],
"stop_reason": "end_turn",
"stop_sequence": null,
"usage": {
"input_tokens": 16,
"output_tokens": 39
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests/data/llm-6-mock.json | [
{
"candidates": [
{
"content": {
"parts": [
{
"text": "A blue square."
}
],
"role": "model"
},
"finishReason": "STOP",
"index": 0,
"safetyRatings": [
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
],
"promptFeedback": {
"safetyRatings": [
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
}
]
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests/data/llm-2-mock.json | [
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": "* **Hoppy Socks:** This name suggests fun and excitement, like hopping around"
}
]
},
"safetyRatings": [
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
]
},
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": " in colorful socks.\n* **Sock-It-to-Me:** This"
}
]
},
"safetyRatings": [
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
]
},
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": " catchy name conveys a sense of boldness and individuality, as if you're making a statement with your socks.\n* **The Sock Drawer:** This name evokes"
}
]
},
"safetyRatings": [
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
]
},
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": " a sense of nostalgia and familiarity, like the trusty sock drawer where you always find your favorite pairs.\n* **Sock Appeal:** This name plays on the phrase"
}
]
},
"safetyRatings": [
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
]
},
{
"candidates": [
{
"content": {
"role": "model"
},
"finishReason": "STOP",
"safetyRatings": [
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "MEDIUM"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
],
"usageMetadata": {
"promptTokenCount": 14,
"candidatesTokenCount": 96,
"totalTokenCount": 110
}
}
]
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests/data/llm-2-mock.txt | * **Hoppy Socks:** This name suggests fun and excitement, like hopping around in colorful socks.
* **Sock-It-to-Me:** This catchy name conveys a sense of boldness and individuality, as if you're making a statement with your socks.
* **The Sock Drawer:** This name evokes a sense of nostalgia and familiarity, like the trusty sock drawer where you always find your favorite pairs.
* **Sock Appeal:** This name plays on the phrase |
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests | lc_public_repos/langchainjs/libs/langchain-google-common/src/tests/data/llm-5-mock.json | [
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": "* **Hoppy Socks:** This name suggests fun and excitement, like hopping around"
}
]
},
"safetyRatings": [
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
]
},
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": " in colorful socks.\n* **Sock-It-to-Me:** This"
}
]
},
"safetyRatings": [
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
]
},
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": " catchy name conveys a sense of boldness and individuality, as if you're making a statement with your socks.\n* **The Sock Drawer:** This name evokes"
}
]
},
"safetyRatings": [
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
]
},
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": " a sense of nostalgia and familiarity, like the trusty sock drawer where you always find your favorite pairs.\n* **Sock Appeal:** This name plays on the phrase"
}
]
},
"safetyRatings": [
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
]
},
{
"candidates": [
{
"content": {
"role": "model"
},
"finishReason": "SAFETY",
"safetyRatings": [
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "HIGH",
"blocked": true
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
],
"usageMetadata": {
"promptTokenCount": 14,
"candidatesTokenCount": 96,
"totalTokenCount": 110
}
}
]
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src | lc_public_repos/langchainjs/libs/langchain-google-common/src/experimental/media.ts | import {
AsyncCaller,
AsyncCallerCallOptions,
AsyncCallerParams,
} from "@langchain/core/utils/async_caller";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import {
MediaBlob,
BlobStore,
BlobStoreOptions,
MediaBlobData,
} from "./utils/media_core.js";
import {
GoogleConnectionParams,
GoogleRawResponse,
GoogleResponse,
} from "../types.js";
import { GoogleHostConnection, GoogleRawConnection } from "../connection.js";
import {
ApiKeyGoogleAuth,
GoogleAbstractedClient,
GoogleAbstractedClientOpsMethod,
} from "../auth.js";
export interface GoogleUploadConnectionParams<AuthOptions>
extends GoogleConnectionParams<AuthOptions> {}
export abstract class GoogleMultipartUploadConnection<
CallOptions extends AsyncCallerCallOptions,
ResponseType extends GoogleResponse,
AuthOptions
> extends GoogleHostConnection<CallOptions, ResponseType, AuthOptions> {
constructor(
fields: GoogleConnectionParams<AuthOptions> | undefined,
caller: AsyncCaller,
client: GoogleAbstractedClient
) {
super(fields, caller, client);
}
async _body(
separator: string,
data: MediaBlob,
metadata: Record<string, unknown>
): Promise<string> {
const contentType = data.mimetype;
const { encoded, encoding } = await data.encode();
const body = [
`--${separator}`,
"Content-Type: application/json; charset=UTF-8",
"",
JSON.stringify(metadata),
"",
`--${separator}`,
`Content-Type: ${contentType}`,
`Content-Transfer-Encoding: ${encoding}`,
"",
encoded,
`--${separator}--`,
];
return body.join("\n");
}
async request(
data: MediaBlob,
metadata: Record<string, unknown>,
options: CallOptions
): Promise<ResponseType> {
const separator = `separator-${Date.now()}`;
const body = await this._body(separator, data, metadata);
const requestHeaders = {
"Content-Type": `multipart/related; boundary=${separator}`,
"X-Goog-Upload-Protocol": "multipart",
};
const response = this._request(body, options, requestHeaders);
return response;
}
}
export abstract class GoogleDownloadConnection<
CallOptions extends AsyncCallerCallOptions,
ResponseType extends GoogleResponse,
AuthOptions
> extends GoogleHostConnection<CallOptions, ResponseType, AuthOptions> {
async request(options: CallOptions): Promise<ResponseType> {
return this._request(undefined, options);
}
}
export abstract class GoogleDownloadRawConnection<
CallOptions extends AsyncCallerCallOptions,
AuthOptions
> extends GoogleRawConnection<CallOptions, AuthOptions> {
buildMethod(): GoogleAbstractedClientOpsMethod {
return "GET";
}
async request(options: CallOptions): Promise<GoogleRawResponse> {
return this._request(undefined, options);
}
}
export interface BlobStoreGoogleParams<AuthOptions>
extends GoogleConnectionParams<AuthOptions>,
AsyncCallerParams,
BlobStoreOptions {}
export abstract class BlobStoreGoogle<
ResponseType extends GoogleResponse,
AuthOptions
> extends BlobStore {
caller: AsyncCaller;
client: GoogleAbstractedClient;
constructor(fields?: BlobStoreGoogleParams<AuthOptions>) {
super(fields);
this.caller = new AsyncCaller(fields ?? {});
this.client = this.buildClient(fields);
}
abstract buildClient(
fields?: BlobStoreGoogleParams<AuthOptions>
): GoogleAbstractedClient;
abstract buildSetMetadata([key, blob]: [string, MediaBlob]): Record<
string,
unknown
>;
abstract buildSetConnection([key, blob]: [
string,
MediaBlob
]): GoogleMultipartUploadConnection<
AsyncCallerCallOptions,
ResponseType,
AuthOptions
>;
async _set(keyValuePair: [string, MediaBlob]): Promise<ResponseType> {
const [, blob] = keyValuePair;
const setMetadata = this.buildSetMetadata(keyValuePair);
const metadata = setMetadata;
const options = {};
const connection = this.buildSetConnection(keyValuePair);
const response = await connection.request(blob, metadata, options);
return response;
}
async mset(keyValuePairs: [string, MediaBlob][]): Promise<void> {
const ret = keyValuePairs.map((keyValue) => this._set(keyValue));
await Promise.all(ret);
}
abstract buildGetMetadataConnection(
key: string
): GoogleDownloadConnection<
AsyncCallerCallOptions,
ResponseType,
AuthOptions
>;
async _getMetadata(key: string): Promise<Record<string, unknown>> {
const connection = this.buildGetMetadataConnection(key);
const options = {};
const response = await connection.request(options);
return response.data;
}
abstract buildGetDataConnection(
key: string
): GoogleDownloadRawConnection<AsyncCallerCallOptions, AuthOptions>;
async _getData(key: string): Promise<Blob> {
const connection = this.buildGetDataConnection(key);
const options = {};
const response = await connection.request(options);
return response.data;
}
_getMimetypeFromMetadata(metadata: Record<string, unknown>): string {
return metadata.contentType as string;
}
async _get(key: string): Promise<MediaBlob | undefined> {
const metadata = await this._getMetadata(key);
const data = await this._getData(key);
if (data && metadata) {
const ret = await MediaBlob.fromBlob(data, { metadata, path: key });
return ret;
} else {
return undefined;
}
}
async mget(keys: string[]): Promise<(MediaBlob | undefined)[]> {
const ret = keys.map((key) => this._get(key));
return await Promise.all(ret);
}
abstract buildDeleteConnection(
key: string
): GoogleDownloadConnection<
AsyncCallerCallOptions,
GoogleResponse,
AuthOptions
>;
async _del(key: string): Promise<void> {
const connection = this.buildDeleteConnection(key);
const options = {};
await connection.request(options);
}
async mdelete(keys: string[]): Promise<void> {
const ret = keys.map((key) => this._del(key));
await Promise.all(ret);
}
// eslint-disable-next-line require-yield
async *yieldKeys(_prefix: string | undefined): AsyncGenerator<string> {
// TODO: Implement. Most have an implementation that uses nextToken.
throw new Error("yieldKeys is not implemented");
}
}
/**
* Based on https://cloud.google.com/storage/docs/json_api/v1/objects#resource
*/
export interface GoogleCloudStorageObject extends Record<string, unknown> {
id?: string;
name?: string;
contentType?: string;
metadata?: Record<string, unknown>;
// This is incomplete.
}
export interface GoogleCloudStorageResponse extends GoogleResponse {
data: GoogleCloudStorageObject;
}
export type BucketAndPath = {
bucket: string;
path: string;
};
export class GoogleCloudStorageUri {
static uriRegexp = /gs:\/\/([a-z0-9][a-z0-9._-]+[a-z0-9])\/(.*)/;
bucket: string;
path: string;
constructor(uri: string) {
const bucketAndPath = GoogleCloudStorageUri.uriToBucketAndPath(uri);
this.bucket = bucketAndPath.bucket;
this.path = bucketAndPath.path;
}
get uri() {
return `gs://${this.bucket}/${this.path}`;
}
get isValid() {
return (
typeof this.bucket !== "undefined" && typeof this.path !== "undefined"
);
}
static uriToBucketAndPath(uri: string): BucketAndPath {
const match = this.uriRegexp.exec(uri);
if (!match) {
throw new Error(`Invalid gs:// URI: ${uri}`);
}
return {
bucket: match[1],
path: match[2],
};
}
static isValidUri(uri: string): boolean {
return this.uriRegexp.test(uri);
}
}
export interface GoogleCloudStorageConnectionParams {
uri: string;
}
export interface GoogleCloudStorageUploadConnectionParams<AuthOptions>
extends GoogleUploadConnectionParams<AuthOptions>,
GoogleCloudStorageConnectionParams {}
export class GoogleCloudStorageUploadConnection<
AuthOptions
> extends GoogleMultipartUploadConnection<
AsyncCallerCallOptions,
GoogleCloudStorageResponse,
AuthOptions
> {
uri: GoogleCloudStorageUri;
constructor(
fields: GoogleCloudStorageUploadConnectionParams<AuthOptions>,
caller: AsyncCaller,
client: GoogleAbstractedClient
) {
super(fields, caller, client);
this.uri = new GoogleCloudStorageUri(fields.uri);
}
async buildUrl(): Promise<string> {
return `https://storage.googleapis.com/upload/storage/${this.apiVersion}/b/${this.uri.bucket}/o?uploadType=multipart`;
}
}
export interface GoogleCloudStorageDownloadConnectionParams<AuthOptions>
extends GoogleCloudStorageConnectionParams,
GoogleConnectionParams<AuthOptions> {
method: GoogleAbstractedClientOpsMethod;
alt: "media" | undefined;
}
export class GoogleCloudStorageDownloadConnection<
ResponseType extends GoogleResponse,
AuthOptions
> extends GoogleDownloadConnection<
AsyncCallerCallOptions,
ResponseType,
AuthOptions
> {
uri: GoogleCloudStorageUri;
method: GoogleAbstractedClientOpsMethod;
alt: "media" | undefined;
constructor(
fields: GoogleCloudStorageDownloadConnectionParams<AuthOptions>,
caller: AsyncCaller,
client: GoogleAbstractedClient
) {
super(fields, caller, client);
this.uri = new GoogleCloudStorageUri(fields.uri);
this.method = fields.method;
this.alt = fields.alt;
}
buildMethod(): GoogleAbstractedClientOpsMethod {
return this.method;
}
async buildUrl(): Promise<string> {
const path = encodeURIComponent(this.uri.path);
const ret = `https://storage.googleapis.com/storage/${this.apiVersion}/b/${this.uri.bucket}/o/${path}`;
return this.alt ? `${ret}?alt=${this.alt}` : ret;
}
}
export interface GoogleCloudStorageRawConnectionParams<AuthOptions>
extends GoogleCloudStorageConnectionParams,
GoogleConnectionParams<AuthOptions> {}
export class GoogleCloudStorageRawConnection<
AuthOptions
> extends GoogleDownloadRawConnection<AsyncCallerCallOptions, AuthOptions> {
uri: GoogleCloudStorageUri;
constructor(
fields: GoogleCloudStorageRawConnectionParams<AuthOptions>,
caller: AsyncCaller,
client: GoogleAbstractedClient
) {
super(fields, caller, client);
this.uri = new GoogleCloudStorageUri(fields.uri);
}
async buildUrl(): Promise<string> {
const path = encodeURIComponent(this.uri.path);
const ret = `https://storage.googleapis.com/storage/${this.apiVersion}/b/${this.uri.bucket}/o/${path}?alt=media`;
return ret;
}
}
export interface BlobStoreGoogleCloudStorageBaseParams<AuthOptions>
extends BlobStoreGoogleParams<AuthOptions> {
uriPrefix: GoogleCloudStorageUri;
}
export abstract class BlobStoreGoogleCloudStorageBase<
AuthOptions
> extends BlobStoreGoogle<GoogleCloudStorageResponse, AuthOptions> {
params: BlobStoreGoogleCloudStorageBaseParams<AuthOptions>;
constructor(fields: BlobStoreGoogleCloudStorageBaseParams<AuthOptions>) {
super(fields);
this.params = fields;
this.defaultStoreOptions = {
...this.defaultStoreOptions,
pathPrefix: fields.uriPrefix.uri,
};
}
buildSetConnection([key, _blob]: [
string,
MediaBlob
]): GoogleMultipartUploadConnection<
AsyncCallerCallOptions,
GoogleCloudStorageResponse,
AuthOptions
> {
const params: GoogleCloudStorageUploadConnectionParams<AuthOptions> = {
...this.params,
uri: key,
};
return new GoogleCloudStorageUploadConnection<AuthOptions>(
params,
this.caller,
this.client
);
}
buildSetMetadata([key, blob]: [string, MediaBlob]): Record<string, unknown> {
const uri = new GoogleCloudStorageUri(key);
const ret: GoogleCloudStorageObject = {
name: uri.path,
metadata: blob.metadata,
contentType: blob.mimetype,
};
return ret;
}
buildGetMetadataConnection(
key: string
): GoogleDownloadConnection<
AsyncCallerCallOptions,
GoogleCloudStorageResponse,
AuthOptions
> {
const params: GoogleCloudStorageDownloadConnectionParams<AuthOptions> = {
uri: key,
method: "GET",
alt: undefined,
};
return new GoogleCloudStorageDownloadConnection<
GoogleCloudStorageResponse,
AuthOptions
>(params, this.caller, this.client);
}
buildGetDataConnection(
key: string
): GoogleDownloadRawConnection<AsyncCallerCallOptions, AuthOptions> {
const params: GoogleCloudStorageRawConnectionParams<AuthOptions> = {
uri: key,
};
return new GoogleCloudStorageRawConnection<AuthOptions>(
params,
this.caller,
this.client
);
}
buildDeleteConnection(
key: string
): GoogleDownloadConnection<
AsyncCallerCallOptions,
GoogleResponse,
AuthOptions
> {
const params: GoogleCloudStorageDownloadConnectionParams<AuthOptions> = {
uri: key,
method: "DELETE",
alt: undefined,
};
return new GoogleCloudStorageDownloadConnection<
GoogleResponse,
AuthOptions
>(params, this.caller, this.client);
}
}
export type AIStudioFileState =
| "PROCESSING"
| "ACTIVE"
| "FAILED"
| "STATE_UNSPECIFIED";
export type AIStudioFileVideoMetadata = {
videoMetadata: {
videoDuration: string; // Duration in seconds, possibly with fractional, ending in "s"
};
};
export type AIStudioFileMetadata = AIStudioFileVideoMetadata;
export interface AIStudioFileObject {
name?: string;
displayName?: string;
mimeType?: string;
sizeBytes?: string; // int64 format
createTime?: string; // timestamp format
updateTime?: string; // timestamp format
expirationTime?: string; // timestamp format
sha256Hash?: string; // base64 encoded
uri?: string;
state?: AIStudioFileState;
error?: {
code: number;
message: string;
details: Record<string, unknown>[];
};
metadata?: AIStudioFileMetadata;
}
export class AIStudioMediaBlob extends MediaBlob {
_valueAsDate(value: string): Date {
if (!value) {
return new Date(0);
}
return new Date(value);
}
_metadataFieldAsDate(field: string): Date {
return this._valueAsDate(this.metadata?.[field]);
}
get createDate(): Date {
return this._metadataFieldAsDate("createTime");
}
get updateDate(): Date {
return this._metadataFieldAsDate("updateTime");
}
get expirationDate(): Date {
return this._metadataFieldAsDate("expirationTime");
}
get isExpired(): boolean {
const now = new Date().toISOString();
const exp = this.metadata?.expirationTime ?? now;
return exp <= now;
}
}
export interface AIStudioFileGetResponse extends GoogleResponse {
data: AIStudioFileObject;
}
export interface AIStudioFileSaveResponse extends GoogleResponse {
data: {
file: AIStudioFileObject;
};
}
export interface AIStudioFileListResponse extends GoogleResponse {
data: {
files: AIStudioFileObject[];
nextPageToken: string;
};
}
export type AIStudioFileResponse =
| AIStudioFileGetResponse
| AIStudioFileSaveResponse
| AIStudioFileListResponse;
export interface AIStudioFileConnectionParams {}
export interface AIStudioFileUploadConnectionParams<AuthOptions>
extends GoogleUploadConnectionParams<AuthOptions>,
AIStudioFileConnectionParams {}
export class AIStudioFileUploadConnection<
AuthOptions
> extends GoogleMultipartUploadConnection<
AsyncCallerCallOptions,
AIStudioFileSaveResponse,
AuthOptions
> {
apiVersion = "v1beta";
async buildUrl(): Promise<string> {
return `https://generativelanguage.googleapis.com/upload/${this.apiVersion}/files`;
}
}
export interface AIStudioFileDownloadConnectionParams<AuthOptions>
extends AIStudioFileConnectionParams,
GoogleConnectionParams<AuthOptions> {
method: GoogleAbstractedClientOpsMethod;
name: string;
}
export class AIStudioFileDownloadConnection<
ResponseType extends GoogleResponse,
AuthOptions
> extends GoogleDownloadConnection<
AsyncCallerCallOptions,
ResponseType,
AuthOptions
> {
method: GoogleAbstractedClientOpsMethod;
name: string;
apiVersion = "v1beta";
constructor(
fields: AIStudioFileDownloadConnectionParams<AuthOptions>,
caller: AsyncCaller,
client: GoogleAbstractedClient
) {
super(fields, caller, client);
this.method = fields.method;
this.name = fields.name;
}
buildMethod(): GoogleAbstractedClientOpsMethod {
return this.method;
}
async buildUrl(): Promise<string> {
return `https://generativelanguage.googleapis.com/${this.apiVersion}/files/${this.name}`;
}
}
export interface BlobStoreAIStudioFileBaseParams<AuthOptions>
extends BlobStoreGoogleParams<AuthOptions> {
retryTime?: number;
}
export abstract class BlobStoreAIStudioFileBase<
AuthOptions
> extends BlobStoreGoogle<AIStudioFileResponse, AuthOptions> {
params?: BlobStoreAIStudioFileBaseParams<AuthOptions>;
retryTime: number = 1000;
constructor(fields?: BlobStoreAIStudioFileBaseParams<AuthOptions>) {
const params: BlobStoreAIStudioFileBaseParams<AuthOptions> = {
defaultStoreOptions: {
pathPrefix: "https://generativelanguage.googleapis.com/v1beta/files/",
actionIfInvalid: "removePath",
},
...fields,
};
super(params);
this.params = params;
this.retryTime = params?.retryTime ?? this.retryTime ?? 1000;
}
_pathToName(path: string): string {
return path.split("/").pop() ?? path;
}
abstract buildAbstractedClient(
fields?: BlobStoreGoogleParams<AuthOptions>
): GoogleAbstractedClient;
buildApiKeyClient(apiKey: string): GoogleAbstractedClient {
return new ApiKeyGoogleAuth(apiKey);
}
buildApiKey(fields?: BlobStoreGoogleParams<AuthOptions>): string | undefined {
return fields?.apiKey ?? getEnvironmentVariable("GOOGLE_API_KEY");
}
buildClient(
fields?: BlobStoreGoogleParams<AuthOptions>
): GoogleAbstractedClient {
const apiKey = this.buildApiKey(fields);
if (apiKey) {
return this.buildApiKeyClient(apiKey);
} else {
// TODO: Test that you can use OAuth to access
return this.buildAbstractedClient(fields);
}
}
async _regetMetadata(key: string): Promise<AIStudioFileObject> {
// Sleep for some time period
// eslint-disable-next-line no-promise-executor-return
await new Promise((resolve) => setTimeout(resolve, this.retryTime));
// Fetch the latest metadata
return this._getMetadata(key);
}
async _set([key, blob]: [
string,
MediaBlob
]): Promise<AIStudioFileSaveResponse> {
const response = (await super._set([
key,
blob,
])) as AIStudioFileSaveResponse;
let file = response.data?.file ?? { state: "FAILED" };
while (file.state === "PROCESSING" && file.uri && this.retryTime > 0) {
file = await this._regetMetadata(file.uri);
}
// The response should contain the name (and valid URI), so we need to
// update the blob with this. We can't return a new blob, since mset()
// doesn't return anything.
/* eslint-disable no-param-reassign */
blob.path = file.uri;
blob.metadata = {
...blob.metadata,
...file,
};
/* eslint-enable no-param-reassign */
return response;
}
buildSetConnection([_key, _blob]: [
string,
MediaBlob
]): GoogleMultipartUploadConnection<
AsyncCallerCallOptions,
AIStudioFileResponse,
AuthOptions
> {
return new AIStudioFileUploadConnection(
this.params,
this.caller,
this.client
);
}
buildSetMetadata([_key, _blob]: [string, MediaBlob]): Record<
string,
unknown
> {
return {};
}
buildGetMetadataConnection(
key: string
): GoogleDownloadConnection<
AsyncCallerCallOptions,
AIStudioFileResponse,
AuthOptions
> {
const params: AIStudioFileDownloadConnectionParams<AuthOptions> = {
...this.params,
method: "GET",
name: this._pathToName(key),
};
return new AIStudioFileDownloadConnection<
AIStudioFileResponse,
AuthOptions
>(params, this.caller, this.client);
}
buildGetDataConnection(
_key: string
): GoogleDownloadRawConnection<AsyncCallerCallOptions, AuthOptions> {
throw new Error("AI Studio File API does not provide data");
}
async _get(key: string): Promise<MediaBlob | undefined> {
const metadata = await this._getMetadata(key);
if (metadata) {
const contentType =
(metadata?.mimeType as string) ?? "application/octet-stream";
// TODO - Get the actual data (and other metadata) from an optional backing store
const data: MediaBlobData = {
value: "",
type: contentType,
};
return new MediaBlob({
path: key,
data,
metadata,
});
} else {
return undefined;
}
}
buildDeleteConnection(
key: string
): GoogleDownloadConnection<
AsyncCallerCallOptions,
AIStudioFileResponse,
AuthOptions
> {
const params: AIStudioFileDownloadConnectionParams<AuthOptions> = {
...this.params,
method: "DELETE",
name: this._pathToName(key),
};
return new AIStudioFileDownloadConnection<
AIStudioFileResponse,
AuthOptions
>(params, this.caller, this.client);
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src/experimental | lc_public_repos/langchainjs/libs/langchain-google-common/src/experimental/utils/media_core.ts | import { v1, v4 } from "uuid"; // FIXME - it is importing the wrong uuid, so v6 and v7 aren't implemented
import { BaseStore } from "@langchain/core/stores";
import { Serializable } from "@langchain/core/load/serializable";
export type MediaBlobData = {
value: string; // In Base64 encoding
type: string; // The mime type and possibly encoding
};
export interface MediaBlobParameters {
data?: MediaBlobData;
metadata?: Record<string, unknown>;
path?: string;
}
function bytesToString(dataArray: Uint8Array): string {
// Need to handle the array in smaller chunks to deal with stack size limits
let ret = "";
const chunkSize = 102400;
for (let i = 0; i < dataArray.length; i += chunkSize) {
const chunk = dataArray.subarray(i, i + chunkSize);
ret += String.fromCharCode(...chunk);
}
return ret;
}
/**
* Represents a chunk of data that can be identified by the path where the
* data is (or will be) located, along with optional metadata about the data.
*/
export class MediaBlob extends Serializable implements MediaBlobParameters {
lc_serializable = true;
lc_namespace = [
"langchain",
"google_common",
"experimental",
"utils",
"media_core",
];
data: MediaBlobData = {
value: "",
type: "text/plain",
};
// eslint-disable-next-line @typescript-eslint/no-explicit-any
metadata?: Record<string, any>;
path?: string;
constructor(params: MediaBlobParameters) {
super(params);
this.data = params.data ?? this.data;
this.metadata = params.metadata;
this.path = params.path;
}
get size(): number {
return this.asBytes.length;
}
get dataType(): string {
return this.data?.type ?? "";
}
get encoding(): string {
const charsetEquals = this.dataType.indexOf("charset=");
return charsetEquals === -1
? "utf-8"
: this.dataType.substring(charsetEquals + 8);
}
get mimetype(): string {
const semicolon = this.dataType.indexOf(";");
return semicolon === -1
? this.dataType
: this.dataType.substring(0, semicolon);
}
get asBytes(): Uint8Array {
if (!this.data) {
return Uint8Array.from([]);
}
const binString = atob(this.data?.value);
const ret = new Uint8Array(binString.length);
for (let co = 0; co < binString.length; co += 1) {
ret[co] = binString.charCodeAt(co);
}
return ret;
}
async asString(): Promise<string> {
return bytesToString(this.asBytes);
}
async asBase64(): Promise<string> {
return this.data?.value ?? "";
}
async asDataUrl(): Promise<string> {
return `data:${this.mimetype};base64,${await this.asBase64()}`;
}
async asUri(): Promise<string> {
return this.path ?? (await this.asDataUrl());
}
async encode(): Promise<{ encoded: string; encoding: string }> {
const dataUrl = await this.asDataUrl();
const comma = dataUrl.indexOf(",");
const encoded = dataUrl.substring(comma + 1);
const encoding: string = dataUrl.indexOf("base64") > -1 ? "base64" : "8bit";
return {
encoded,
encoding,
};
}
static fromDataUrl(url: string): MediaBlob {
if (!url.startsWith("data:")) {
throw new Error("Not a data: URL");
}
const colon = url.indexOf(":");
const semicolon = url.indexOf(";");
const mimeType = url.substring(colon + 1, semicolon);
const comma = url.indexOf(",");
const base64Data = url.substring(comma + 1);
const data: MediaBlobData = {
type: mimeType,
value: base64Data,
};
return new MediaBlob({
data,
path: url,
});
}
static async fromBlob(
blob: Blob,
other?: Omit<MediaBlobParameters, "data">
): Promise<MediaBlob> {
const valueBuffer = await blob.arrayBuffer();
const valueArray = new Uint8Array(valueBuffer);
const valueStr = bytesToString(valueArray);
const value = btoa(valueStr);
return new MediaBlob({
...other,
data: {
value,
type: blob.type,
},
});
}
}
export type ActionIfInvalidAction =
| "ignore"
| "prefixPath"
| "prefixUuid1"
| "prefixUuid4"
| "prefixUuid6"
| "prefixUuid7"
| "removePath";
export interface BlobStoreStoreOptions {
/**
* If the path is missing or invalid in the blob, how should we create
* a new path?
* Subclasses may define their own methods, but the following are supported
* by default:
* - Undefined or an emtpy string: Reject the blob
* - "ignore": Attempt to store it anyway (but this may fail)
* - "prefixPath": Use the default prefix for the BlobStore and get the
* unique portion from the URL. The original path is stored in the metadata
* - "prefixUuid": Use the default prefix for the BlobStore and get the
* unique portion from a generated UUID. The original path is stored
* in the metadata
*/
actionIfInvalid?: ActionIfInvalidAction;
/**
* The expected prefix for URIs that are stored.
* This may be used to test if a MediaBlob is valid and used to create a new
* path if "prefixPath" or "prefixUuid" is set for actionIfInvalid.
*/
pathPrefix?: string;
}
export type ActionIfBlobMissingAction = "emptyBlob";
export interface BlobStoreFetchOptions {
/**
* If the blob is not found when fetching, what should we do?
* Subclasses may define their own methods, but the following are supported
* by default:
* - Undefined or an empty string: return undefined
* - "emptyBlob": return a new MediaBlob that has the path set, but nothing else.
*/
actionIfBlobMissing?: ActionIfBlobMissingAction;
}
export interface BlobStoreOptions {
defaultStoreOptions?: BlobStoreStoreOptions;
defaultFetchOptions?: BlobStoreFetchOptions;
}
/**
* A specialized Store that is designed to handle MediaBlobs and use the
* key that is included in the blob to determine exactly how it is stored.
*
* The full details of a MediaBlob may be changed when it is stored.
* For example, it may get additional or different Metadata. This should be
* what is returned when the store() method is called.
*
* Although BlobStore extends BaseStore, not all of the methods from
* BaseStore may be implemented (or even possible). Those that are not
* implemented should be documented and throw an Error if called.
*/
export abstract class BlobStore extends BaseStore<string, MediaBlob> {
lc_namespace = ["langchain", "google-common"]; // FIXME - What should this be? And why?
defaultStoreOptions: BlobStoreStoreOptions;
defaultFetchOptions: BlobStoreFetchOptions;
constructor(opts?: BlobStoreOptions) {
super(opts);
this.defaultStoreOptions = opts?.defaultStoreOptions ?? {};
this.defaultFetchOptions = opts?.defaultFetchOptions ?? {};
}
protected async _realKey(key: string | MediaBlob): Promise<string> {
return typeof key === "string" ? key : await key.asUri();
}
/**
* Is the path supported by this BlobStore?
*
* Although this is async, this is expected to be a relatively fast operation
* (ie - you shouldn't make network calls).
*
* @param path The path to check
* @param opts Any options (if needed) that may be used to determine if it is valid
* @return If the path is supported
*/
hasValidPath(
path: string | undefined,
opts?: BlobStoreStoreOptions
): Promise<boolean> {
const prefix = opts?.pathPrefix ?? "";
const isPrefixed = typeof path !== "undefined" && path.startsWith(prefix);
return Promise.resolve(isPrefixed);
}
protected _blobPathSuffix(blob: MediaBlob): string {
// Get the path currently set and make sure we treat it as a string
const blobPath = `${blob.path}`;
// Advance past the first set of /
let pathStart = blobPath.indexOf("/") + 1;
while (blobPath.charAt(pathStart) === "/") {
pathStart += 1;
}
// We will use the rest as the path for a replacement
return blobPath.substring(pathStart);
}
protected async _newBlob(
oldBlob: MediaBlob,
newPath?: string
): Promise<MediaBlob> {
const oldPath = oldBlob.path;
const metadata = oldBlob?.metadata ?? {};
metadata.langchainOldPath = oldPath;
const newBlob = new MediaBlob({
...oldBlob,
metadata,
});
if (newPath) {
newBlob.path = newPath;
} else if (newBlob.path) {
delete newBlob.path;
}
return newBlob;
}
protected async _validBlobPrefixPath(
blob: MediaBlob,
opts?: BlobStoreStoreOptions
): Promise<MediaBlob> {
const prefix = opts?.pathPrefix ?? "";
const suffix = this._blobPathSuffix(blob);
const newPath = `${prefix}${suffix}`;
return this._newBlob(blob, newPath);
}
protected _validBlobPrefixUuidFunction(
name: ActionIfInvalidAction | string
): string {
switch (name) {
case "prefixUuid1":
return v1();
case "prefixUuid4":
return v4();
// case "prefixUuid6": return v6();
// case "prefixUuid7": return v7();
default:
throw new Error(`Unknown uuid function: ${name}`);
}
}
protected async _validBlobPrefixUuid(
blob: MediaBlob,
opts?: BlobStoreStoreOptions
): Promise<MediaBlob> {
const prefix = opts?.pathPrefix ?? "";
const suffix = this._validBlobPrefixUuidFunction(
opts?.actionIfInvalid ?? "prefixUuid4"
);
const newPath = `${prefix}${suffix}`;
return this._newBlob(blob, newPath);
}
protected async _validBlobRemovePath(
blob: MediaBlob,
_opts?: BlobStoreStoreOptions
): Promise<MediaBlob> {
return this._newBlob(blob, undefined);
}
/**
* Based on the blob and options, return a blob that has a valid path
* that can be saved.
* @param blob
* @param opts
*/
protected async _validStoreBlob(
blob: MediaBlob,
opts?: BlobStoreStoreOptions
): Promise<MediaBlob | undefined> {
if (await this.hasValidPath(blob.path, opts)) {
return blob;
}
switch (opts?.actionIfInvalid) {
case "ignore":
return blob;
case "prefixPath":
return this._validBlobPrefixPath(blob, opts);
case "prefixUuid1":
case "prefixUuid4":
case "prefixUuid6":
case "prefixUuid7":
return this._validBlobPrefixUuid(blob, opts);
case "removePath":
return this._validBlobRemovePath(blob, opts);
default:
return undefined;
}
}
async store(
blob: MediaBlob,
opts: BlobStoreStoreOptions = {}
): Promise<MediaBlob | undefined> {
const allOpts: BlobStoreStoreOptions = {
...this.defaultStoreOptions,
...opts,
};
const validBlob = await this._validStoreBlob(blob, allOpts);
if (typeof validBlob !== "undefined") {
const validKey = await validBlob.asUri();
await this.mset([[validKey, validBlob]]);
const savedKey = await validBlob.asUri();
return await this.fetch(savedKey);
}
return undefined;
}
protected async _missingFetchBlobEmpty(
path: string,
_opts?: BlobStoreFetchOptions
): Promise<MediaBlob> {
return new MediaBlob({ path });
}
protected async _missingFetchBlob(
path: string,
opts?: BlobStoreFetchOptions
): Promise<MediaBlob | undefined> {
switch (opts?.actionIfBlobMissing) {
case "emptyBlob":
return this._missingFetchBlobEmpty(path, opts);
default:
return undefined;
}
}
async fetch(
key: string | MediaBlob,
opts: BlobStoreFetchOptions = {}
): Promise<MediaBlob | undefined> {
const allOpts: BlobStoreFetchOptions = {
...this.defaultFetchOptions,
...opts,
};
const realKey = await this._realKey(key);
const ret = await this.mget([realKey]);
return ret?.[0] ?? (await this._missingFetchBlob(realKey, allOpts));
}
}
export interface BackedBlobStoreOptions extends BlobStoreOptions {
backingStore: BaseStore<string, MediaBlob>;
}
export class BackedBlobStore extends BlobStore {
backingStore: BaseStore<string, MediaBlob>;
constructor(opts: BackedBlobStoreOptions) {
super(opts);
this.backingStore = opts.backingStore;
}
mdelete(keys: string[]): Promise<void> {
return this.backingStore.mdelete(keys);
}
mget(keys: string[]): Promise<(MediaBlob | undefined)[]> {
return this.backingStore.mget(keys);
}
mset(keyValuePairs: [string, MediaBlob][]): Promise<void> {
return this.backingStore.mset(keyValuePairs);
}
yieldKeys(prefix: string | undefined): AsyncGenerator<string> {
return this.backingStore.yieldKeys(prefix);
}
}
export interface ReadThroughBlobStoreOptions extends BlobStoreOptions {
baseStore: BlobStore;
backingStore: BlobStore;
}
export class ReadThroughBlobStore extends BlobStore {
baseStore: BlobStore;
backingStore: BlobStore;
constructor(opts: ReadThroughBlobStoreOptions) {
super(opts);
this.baseStore = opts.baseStore;
this.backingStore = opts.backingStore;
}
async store(
blob: MediaBlob,
opts: BlobStoreStoreOptions = {}
): Promise<MediaBlob | undefined> {
const originalUri = await blob.asUri();
const newBlob = await this.backingStore.store(blob, opts);
if (newBlob) {
await this.baseStore.mset([[originalUri, newBlob]]);
}
return newBlob;
}
mdelete(keys: string[]): Promise<void> {
return this.baseStore.mdelete(keys);
}
mget(keys: string[]): Promise<(MediaBlob | undefined)[]> {
return this.baseStore.mget(keys);
}
mset(_keyValuePairs: [string, MediaBlob][]): Promise<void> {
throw new Error("Do not call ReadThroughBlobStore.mset directly");
}
yieldKeys(prefix: string | undefined): AsyncGenerator<string> {
return this.baseStore.yieldKeys(prefix);
}
}
export class SimpleWebBlobStore extends BlobStore {
_notImplementedException() {
throw new Error("Not implemented for SimpleWebBlobStore");
}
async hasValidPath(
path: string | undefined,
_opts?: BlobStoreStoreOptions
): Promise<boolean> {
return (
(await super.hasValidPath(path, { pathPrefix: "https://" })) ||
(await super.hasValidPath(path, { pathPrefix: "http://" }))
);
}
async _fetch(url: string): Promise<MediaBlob | undefined> {
const ret = new MediaBlob({
path: url,
});
const metadata: Record<string, unknown> = {};
const fetchOptions = {
method: "GET",
};
const res = await fetch(url, fetchOptions);
metadata.status = res.status;
const headers: Record<string, string> = {};
for (const [key, value] of res.headers.entries()) {
headers[key] = value;
}
metadata.headers = headers;
metadata.ok = res.ok;
if (res.ok) {
const resMediaBlob = await MediaBlob.fromBlob(await res.blob());
ret.data = resMediaBlob.data;
}
ret.metadata = metadata;
return ret;
}
async mget(keys: string[]): Promise<(MediaBlob | undefined)[]> {
const blobMap = keys.map(this._fetch);
return await Promise.all(blobMap);
}
async mdelete(_keys: string[]): Promise<void> {
this._notImplementedException();
}
async mset(_keyValuePairs: [string, MediaBlob][]): Promise<void> {
this._notImplementedException();
}
async *yieldKeys(_prefix: string | undefined): AsyncGenerator<string> {
this._notImplementedException();
yield "";
}
}
/**
* A blob "store" that works with data: URLs that will turn the URL into
* a blob.
*/
export class DataBlobStore extends BlobStore {
_notImplementedException() {
throw new Error("Not implemented for DataBlobStore");
}
hasValidPath(path: string, _opts?: BlobStoreStoreOptions): Promise<boolean> {
return super.hasValidPath(path, { pathPrefix: "data:" });
}
_fetch(url: string): MediaBlob {
return MediaBlob.fromDataUrl(url);
}
async mget(keys: string[]): Promise<(MediaBlob | undefined)[]> {
const blobMap = keys.map(this._fetch);
return blobMap;
}
async mdelete(_keys: string[]): Promise<void> {
this._notImplementedException();
}
async mset(_keyValuePairs: [string, MediaBlob][]): Promise<void> {
this._notImplementedException();
}
async *yieldKeys(_prefix: string | undefined): AsyncGenerator<string> {
this._notImplementedException();
yield "";
}
}
export interface MediaManagerConfiguration {
/**
* A store that, given a common URI, returns the corresponding MediaBlob.
* The returned MediaBlob may have a different URI.
* In many cases, this will be a ReadThroughStore or something similar
* that has a cached version of the MediaBlob, but also a way to get
* a new (or refreshed) version.
*/
store: BlobStore;
/**
* BlobStores that can resolve a URL into the MediaBlob to save
* in the canonical store. This list is evaluated in order.
* If not provided, a default list (which involves a DataBlobStore
* and a SimpleWebBlobStore) will be used.
*/
resolvers?: BlobStore[];
}
/**
* Responsible for converting a URI (typically a web URL) into a MediaBlob.
* Allows for aliasing / caching of the requested URI and what it resolves to.
* This MediaBlob is expected to be usable to provide to an LLM, either
* through the Base64 of the media or through a canonical URI that the LLM
* supports.
*/
export class MediaManager {
store: BlobStore;
resolvers: BlobStore[] | undefined;
constructor(config: MediaManagerConfiguration) {
this.store = config.store;
this.resolvers = config.resolvers;
}
defaultResolvers(): BlobStore[] {
return [new DataBlobStore({}), new SimpleWebBlobStore({})];
}
async _isInvalid(blob: MediaBlob | undefined): Promise<boolean> {
return typeof blob === "undefined";
}
/**
* Given the public URI, load what is at this URI and save it
* in the store.
* @param uri The URI to resolve using the resolver
* @return A canonical MediaBlob for this URI
*/
async _resolveAndSave(uri: string): Promise<MediaBlob | undefined> {
let resolvedBlob: MediaBlob | undefined;
const resolvers = this.resolvers || this.defaultResolvers();
for (let co = 0; co < resolvers.length; co += 1) {
const resolver = resolvers[co];
if (await resolver.hasValidPath(uri)) {
resolvedBlob = await resolver.fetch(uri);
}
}
if (resolvedBlob) {
return await this.store.store(resolvedBlob);
} else {
return new MediaBlob({});
}
}
async getMediaBlob(uri: string): Promise<MediaBlob | undefined> {
const aliasBlob = await this.store.fetch(uri);
const ret = (await this._isInvalid(aliasBlob))
? await this._resolveAndSave(uri)
: (aliasBlob as MediaBlob);
return ret;
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src | lc_public_repos/langchainjs/libs/langchain-google-common/src/utils/zod_to_gemini_parameters.ts | /* eslint-disable @typescript-eslint/no-unused-vars */
import type { z } from "zod";
import { zodToJsonSchema } from "zod-to-json-schema";
import {
GeminiFunctionSchema,
GeminiJsonSchema,
GeminiJsonSchemaDirty,
} from "../types.js";
export function removeAdditionalProperties(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
obj: Record<string, any>
): GeminiJsonSchema {
if (typeof obj === "object" && obj !== null) {
const newObj = { ...obj };
if ("additionalProperties" in newObj) {
delete newObj.additionalProperties;
}
for (const key in newObj) {
if (key in newObj) {
if (Array.isArray(newObj[key])) {
newObj[key] = newObj[key].map(removeAdditionalProperties);
} else if (typeof newObj[key] === "object" && newObj[key] !== null) {
newObj[key] = removeAdditionalProperties(newObj[key]);
}
}
}
return newObj as GeminiJsonSchema;
}
return obj as GeminiJsonSchema;
}
export function zodToGeminiParameters(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
zodObj: z.ZodType<any>
): GeminiFunctionSchema {
// Gemini doesn't accept either the $schema or additionalProperties
// attributes, so we need to explicitly remove them.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const jsonSchema = removeAdditionalProperties(zodToJsonSchema(zodObj));
const { $schema, ...rest } = jsonSchema;
return rest;
}
export function jsonSchemaToGeminiParameters(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
schema: Record<string, any>
): GeminiFunctionSchema {
// Gemini doesn't accept either the $schema or additionalProperties
// attributes, so we need to explicitly remove them.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const jsonSchema = removeAdditionalProperties(
schema as GeminiJsonSchemaDirty
);
const { $schema, ...rest } = jsonSchema;
return rest;
}
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src | lc_public_repos/langchainjs/libs/langchain-google-common/src/utils/failed_handler.ts | import { AsyncCallerParams } from "@langchain/core/utils/async_caller";
const STATUS_NO_RETRY = [
400,
401,
402,
403,
404,
405,
406,
407,
408,
409, // Conflict
];
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export function failedAttemptHandler(error: any) {
const status = error?.response?.status ?? 0;
if (status === 0) {
// What is this?
console.error("failedAttemptHandler", error);
}
// What errors shouldn't be retried?
if (STATUS_NO_RETRY.includes(+status)) {
throw error;
}
throw error;
}
export function ensureParams(params?: AsyncCallerParams): AsyncCallerParams {
const base: AsyncCallerParams = params ?? {};
return {
onFailedAttempt: failedAttemptHandler,
...base,
};
}
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src | lc_public_repos/langchainjs/libs/langchain-google-common/src/utils/anthropic.ts | import {
ChatGeneration,
ChatGenerationChunk,
ChatResult,
} from "@langchain/core/outputs";
import {
BaseMessage,
BaseMessageChunk,
AIMessageChunk,
MessageContentComplex,
MessageContentText,
MessageContent,
MessageContentImageUrl,
AIMessageFields,
AIMessageChunkFields,
} from "@langchain/core/messages";
import {
ToolCall,
ToolCallChunk,
ToolMessage,
} from "@langchain/core/messages/tool";
import {
AnthropicAPIConfig,
AnthropicContent,
AnthropicContentText,
AnthropicContentToolUse,
AnthropicMessage,
AnthropicMessageContent,
AnthropicMessageContentImage,
AnthropicMessageContentText,
AnthropicMessageContentToolResult,
AnthropicMessageContentToolResultContent,
AnthropicRequest,
AnthropicRequestSettings,
AnthropicResponseData,
AnthropicResponseMessage,
AnthropicStreamContentBlockDeltaEvent,
AnthropicStreamContentBlockStartEvent,
AnthropicStreamInputJsonDelta,
AnthropicStreamMessageDeltaEvent,
AnthropicStreamMessageStartEvent,
AnthropicStreamTextDelta,
AnthropicTool,
AnthropicToolChoice,
GeminiTool,
GoogleAIAPI,
GoogleAIModelParams,
GoogleAIModelRequestParams,
GoogleAIToolType,
GoogleLLMResponse,
} from "../types.js";
export function getAnthropicAPI(config?: AnthropicAPIConfig): GoogleAIAPI {
function partToString(part: AnthropicContent): string {
return "text" in part ? part.text : "";
}
function messageToString(message: AnthropicResponseMessage): string {
const content: AnthropicContent[] = message?.content ?? [];
const ret = content.reduce((acc, part) => {
const str = partToString(part);
return acc + str;
}, "");
return ret;
}
function responseToString(response: GoogleLLMResponse): string {
const data = response.data as AnthropicResponseData;
switch (data?.type) {
case "message":
return messageToString(data as AnthropicResponseMessage);
default:
throw Error(`Unknown type: ${data?.type}`);
}
}
/**
* Normalize the AIMessageChunk.
* If the fields are just a string - use that as content.
* If the content is an array of just text fields, turn them into a string.
* @param fields
*/
function newAIMessageChunk(fields: string | AIMessageFields): AIMessageChunk {
if (typeof fields === "string") {
return new AIMessageChunk(fields);
}
const ret: AIMessageFields = {
...fields,
};
if (Array.isArray(fields?.content)) {
let str: string | undefined = "";
fields.content.forEach((val) => {
if (str !== undefined && val.type === "text") {
str = `${str}${val.text}`;
} else {
str = undefined;
}
});
if (str) {
ret.content = str;
}
}
return new AIMessageChunk(ret);
}
function textContentToMessageFields(
textContent: AnthropicContentText
): AIMessageFields {
return {
content: [textContent],
};
}
function toolUseContentToMessageFields(
toolUseContent: AnthropicContentToolUse
): AIMessageFields {
const tool: ToolCall = {
id: toolUseContent.id,
name: toolUseContent.name,
type: "tool_call",
args: toolUseContent.input,
};
return {
content: [],
tool_calls: [tool],
};
}
function anthropicContentToMessageFields(
anthropicContent: AnthropicContent
): AIMessageFields | undefined {
const type = anthropicContent?.type;
switch (type) {
case "text":
return textContentToMessageFields(anthropicContent);
case "tool_use":
return toolUseContentToMessageFields(anthropicContent);
default:
return undefined;
}
}
function contentToMessage(
anthropicContent: AnthropicContent[]
): BaseMessageChunk {
const complexContent: MessageContentComplex[] = [];
const toolCalls: ToolCall[] = [];
anthropicContent.forEach((ac) => {
const messageFields = anthropicContentToMessageFields(ac);
if (messageFields?.content) {
complexContent.push(
...(messageFields.content as MessageContentComplex[])
);
}
if (messageFields?.tool_calls) {
toolCalls.push(...messageFields.tool_calls);
}
});
const ret: AIMessageFields = {
content: complexContent,
tool_calls: toolCalls,
};
return newAIMessageChunk(ret);
}
function messageToGenerationInfo(message: AnthropicResponseMessage) {
const usage = message?.usage;
const usageMetadata: Record<string, number> = {
input_tokens: usage?.input_tokens ?? 0,
output_tokens: usage?.output_tokens ?? 0,
total_tokens: (usage?.input_tokens ?? 0) + (usage?.output_tokens ?? 0),
};
return {
usage_metadata: usageMetadata,
finish_reason: message.stop_reason,
};
}
function messageToChatGeneration(
responseMessage: AnthropicResponseMessage
): ChatGenerationChunk {
const content: AnthropicContent[] = responseMessage?.content ?? [];
const text = messageToString(responseMessage);
const message = contentToMessage(content);
const generationInfo = messageToGenerationInfo(responseMessage);
return new ChatGenerationChunk({
text,
message,
generationInfo,
});
}
function messageStartToChatGeneration(
event: AnthropicStreamMessageStartEvent
): ChatGenerationChunk {
const responseMessage = event.message;
return messageToChatGeneration(responseMessage);
}
function messageDeltaToChatGeneration(
event: AnthropicStreamMessageDeltaEvent
): ChatGenerationChunk {
const responseMessage = event.delta;
return messageToChatGeneration(responseMessage as AnthropicResponseMessage);
}
function contentBlockStartTextToChatGeneration(
event: AnthropicStreamContentBlockStartEvent
): ChatGenerationChunk | null {
const content = event.content_block;
const message = contentToMessage([content]);
if (!message) {
return null;
}
const text = "text" in content ? content.text : "";
return new ChatGenerationChunk({
message,
text,
});
}
function contentBlockStartToolUseToChatGeneration(
event: AnthropicStreamContentBlockStartEvent
): ChatGenerationChunk | null {
const contentBlock = event.content_block as AnthropicContentToolUse;
const text: string = "";
const toolChunk: ToolCallChunk = {
type: "tool_call_chunk",
index: event.index,
name: contentBlock.name,
id: contentBlock.id,
};
if (
typeof contentBlock.input === "object" &&
Object.keys(contentBlock.input).length > 0
) {
toolChunk.args = JSON.stringify(contentBlock.input);
}
const toolChunks: ToolCallChunk[] = [toolChunk];
const content: MessageContentComplex[] = [
{
index: event.index,
...contentBlock,
},
];
const messageFields: AIMessageChunkFields = {
content,
tool_call_chunks: toolChunks,
};
const message = newAIMessageChunk(messageFields);
return new ChatGenerationChunk({
message,
text,
});
}
function contentBlockStartToChatGeneration(
event: AnthropicStreamContentBlockStartEvent
): ChatGenerationChunk | null {
switch (event.content_block.type) {
case "text":
return contentBlockStartTextToChatGeneration(event);
case "tool_use":
return contentBlockStartToolUseToChatGeneration(event);
default:
console.warn(
`Unexpected start content_block type: ${JSON.stringify(event)}`
);
return null;
}
}
function contentBlockDeltaTextToChatGeneration(
event: AnthropicStreamContentBlockDeltaEvent
): ChatGenerationChunk {
const delta = event.delta as AnthropicStreamTextDelta;
const text = delta?.text;
const message = newAIMessageChunk(text);
return new ChatGenerationChunk({
message,
text,
});
}
function contentBlockDeltaInputJsonDeltaToChatGeneration(
event: AnthropicStreamContentBlockDeltaEvent
): ChatGenerationChunk {
const delta = event.delta as AnthropicStreamInputJsonDelta;
const text: string = "";
const toolChunks: ToolCallChunk[] = [
{
index: event.index,
args: delta.partial_json,
},
];
const content: MessageContentComplex[] = [
{
index: event.index,
...delta,
},
];
const messageFields: AIMessageChunkFields = {
content,
tool_call_chunks: toolChunks,
};
const message = newAIMessageChunk(messageFields);
return new ChatGenerationChunk({
message,
text,
});
}
function contentBlockDeltaToChatGeneration(
event: AnthropicStreamContentBlockDeltaEvent
): ChatGenerationChunk | null {
switch (event.delta.type) {
case "text_delta":
return contentBlockDeltaTextToChatGeneration(event);
case "input_json_delta":
return contentBlockDeltaInputJsonDeltaToChatGeneration(event);
default:
console.warn(
`Unexpected delta content_block type: ${JSON.stringify(event)}`
);
return null;
}
}
function responseToChatGeneration(
response: GoogleLLMResponse
): ChatGenerationChunk | null {
const data = response.data as AnthropicResponseData;
switch (data.type) {
case "message":
return messageToChatGeneration(data as AnthropicResponseMessage);
case "message_start":
return messageStartToChatGeneration(
data as AnthropicStreamMessageStartEvent
);
case "message_delta":
return messageDeltaToChatGeneration(
data as AnthropicStreamMessageDeltaEvent
);
case "content_block_start":
return contentBlockStartToChatGeneration(
data as AnthropicStreamContentBlockStartEvent
);
case "content_block_delta":
return contentBlockDeltaToChatGeneration(
data as AnthropicStreamContentBlockDeltaEvent
);
case "ping":
case "message_stop":
case "content_block_stop":
// These are ignorable
return null;
case "error":
throw new Error(
`Error while streaming results: ${JSON.stringify(data)}`
);
default:
// We don't know what type this is, but Anthropic may have added
// new ones without telling us. Don't error, but don't use them.
console.warn("Unknown data for responseToChatGeneration", data);
// throw new Error(`Unknown response type: ${data.type}`);
return null;
}
}
function chunkToString(chunk: BaseMessageChunk): string {
if (chunk === null) {
return "";
} else if (typeof chunk.content === "string") {
return chunk.content;
} else if (chunk.content.length === 0) {
return "";
} else if (chunk.content[0].type === "text") {
return chunk.content[0].text;
} else {
throw new Error(`Unexpected chunk: ${chunk}`);
}
}
function responseToBaseMessage(response: GoogleLLMResponse): BaseMessage {
const data = response.data as AnthropicResponseMessage;
const content: AnthropicContent[] = data?.content ?? [];
return contentToMessage(content);
}
function responseToChatResult(response: GoogleLLMResponse): ChatResult {
const message = response.data as AnthropicResponseMessage;
const generations: ChatGeneration[] = [];
const gen = responseToChatGeneration(response);
if (gen) {
generations.push(gen);
}
const llmOutput = messageToGenerationInfo(message);
return {
generations,
llmOutput,
};
}
function formatAnthropicVersion(): string {
return config?.version ?? "vertex-2023-10-16";
}
function textContentToAnthropicContent(
content: MessageContentText
): AnthropicMessageContentText {
return content;
}
function extractMimeType(
str: string
): { media_type: string; data: string } | null {
if (str.startsWith("data:")) {
return {
media_type: str.split(":")[1].split(";")[0],
data: str.split(",")[1],
};
}
return null;
}
function imageContentToAnthropicContent(
content: MessageContentImageUrl
): AnthropicMessageContentImage | undefined {
const dataUrl = content.image_url;
const url = typeof dataUrl === "string" ? dataUrl : dataUrl?.url;
const urlInfo = extractMimeType(url);
if (!urlInfo) {
return undefined;
}
return {
type: "image",
source: {
type: "base64",
...urlInfo,
},
};
}
function contentComplexToAnthropicContent(
content: MessageContentComplex
): AnthropicMessageContent | undefined {
const type = content?.type;
switch (type) {
case "text":
return textContentToAnthropicContent(content as MessageContentText);
case "image_url":
return imageContentToAnthropicContent(
content as MessageContentImageUrl
);
default:
console.warn(`Unexpected content type: ${type}`);
return undefined;
}
}
function contentToAnthropicContent(
content: MessageContent
): AnthropicMessageContent[] {
const ret: AnthropicMessageContent[] = [];
const ca =
typeof content === "string" ? [{ type: "text", text: content }] : content;
ca.forEach((complex) => {
const ac = contentComplexToAnthropicContent(complex);
if (ac) {
ret.push(ac);
}
});
return ret;
}
function baseRoleToAnthropicMessage(
base: BaseMessage,
role: string
): AnthropicMessage {
const content = contentToAnthropicContent(base.content);
return {
role,
content,
};
}
function toolMessageToAnthropicMessage(base: ToolMessage): AnthropicMessage {
const role = "user";
const toolUseId = base.tool_call_id;
const toolContent = contentToAnthropicContent(
base.content
) as AnthropicMessageContentToolResultContent[];
const content: AnthropicMessageContentToolResult[] = [
{
type: "tool_result",
tool_use_id: toolUseId,
content: toolContent,
},
];
return {
role,
content,
};
}
function baseToAnthropicMessage(
base: BaseMessage
): AnthropicMessage | undefined {
const type = base._getType();
switch (type) {
case "human":
return baseRoleToAnthropicMessage(base, "user");
case "ai":
return baseRoleToAnthropicMessage(base, "assistant");
case "tool":
return toolMessageToAnthropicMessage(base as ToolMessage);
default:
return undefined;
}
}
function formatMessages(input: BaseMessage[]): AnthropicMessage[] {
const ret: AnthropicMessage[] = [];
input.forEach((baseMessage) => {
const anthropicMessage = baseToAnthropicMessage(baseMessage);
if (anthropicMessage) {
ret.push(anthropicMessage);
}
});
return ret;
}
function formatSettings(
parameters: GoogleAIModelRequestParams
): AnthropicRequestSettings {
const ret: AnthropicRequestSettings = {
stream: parameters?.streaming ?? false,
max_tokens: parameters?.maxOutputTokens ?? 8192,
};
if (parameters.topP) {
ret.top_p = parameters.topP;
}
if (parameters.topK) {
ret.top_k = parameters.topK;
}
if (parameters.temperature) {
ret.temperature = parameters.temperature;
}
if (parameters.stopSequences) {
ret.stop_sequences = parameters.stopSequences;
}
return ret;
}
function contentComplexArrayToText(
contentArray: MessageContentComplex[]
): string {
let ret = "";
contentArray.forEach((content) => {
const contentType = content?.type;
if (contentType === "text") {
const textContent = content as MessageContentText;
ret = `${ret}\n${textContent.text}`;
}
});
return ret;
}
function formatSystem(input: BaseMessage[]): string {
let ret = "";
input.forEach((message) => {
if (message._getType() === "system") {
const content = message?.content;
const contentString =
typeof content === "string"
? (content as string)
: contentComplexArrayToText(content as MessageContentComplex[]);
ret = `${ret}\n${contentString}`;
}
});
return ret;
}
function formatGeminiTool(tool: GeminiTool): AnthropicTool[] {
if (Object.hasOwn(tool, "functionDeclarations")) {
const funcs = tool?.functionDeclarations ?? [];
return funcs.map((func) => {
const inputSchema = func.parameters!;
return {
// type: "tool", // This may only be valid for models 20241022+
name: func.name,
description: func.description,
input_schema: inputSchema,
};
});
} else {
console.warn(
`Unable to format GeminiTool: ${JSON.stringify(tool, null, 1)}`
);
return [];
}
}
function formatTool(tool: GoogleAIToolType): AnthropicTool[] {
if (Object.hasOwn(tool, "name")) {
return [tool as AnthropicTool];
} else {
return formatGeminiTool(tool as GeminiTool);
}
}
function formatTools(
parameters: GoogleAIModelRequestParams
): AnthropicTool[] {
const tools: GoogleAIToolType[] = parameters?.tools ?? [];
const ret: AnthropicTool[] = [];
tools.forEach((tool) => {
const anthropicTools = formatTool(tool);
anthropicTools.forEach((anthropicTool) => {
if (anthropicTool) {
ret.push(anthropicTool);
}
});
});
return ret;
}
function formatToolChoice(
parameters: GoogleAIModelRequestParams
): AnthropicToolChoice | undefined {
const choice = parameters?.tool_choice;
if (!choice) {
return undefined;
} else if (typeof choice === "object") {
return choice as AnthropicToolChoice;
} else {
switch (choice) {
case "any":
case "auto":
return {
type: choice,
};
case "none":
return undefined;
default:
return {
type: "tool",
name: choice,
};
}
}
}
async function formatData(
input: unknown,
parameters: GoogleAIModelRequestParams
): Promise<AnthropicRequest> {
const typedInput = input as BaseMessage[];
const anthropicVersion = formatAnthropicVersion();
const messages = formatMessages(typedInput);
const settings = formatSettings(parameters);
const system = formatSystem(typedInput);
const tools = formatTools(parameters);
const toolChoice = formatToolChoice(parameters);
const ret: AnthropicRequest = {
anthropic_version: anthropicVersion,
messages,
...settings,
};
if (tools && tools.length && parameters?.tool_choice !== "none") {
ret.tools = tools;
}
if (toolChoice) {
ret.tool_choice = toolChoice;
}
if (system?.length) {
ret.system = system;
}
return ret;
}
return {
responseToString,
responseToChatGeneration,
chunkToString,
responseToBaseMessage,
responseToChatResult,
formatData,
};
}
export function validateClaudeParams(_params: GoogleAIModelParams): void {
// FIXME - validate the parameters
}
export function isModelClaude(modelName: string): boolean {
return modelName.toLowerCase().startsWith("claude");
}
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src | lc_public_repos/langchainjs/libs/langchain-google-common/src/utils/palm.ts | export interface GoogleVertexAIBasePrediction {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
safetyAttributes?: any;
}
export interface GoogleVertexAILLMPredictions<
PredictionType extends GoogleVertexAIBasePrediction
> {
predictions: PredictionType[];
}
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src | lc_public_repos/langchainjs/libs/langchain-google-common/src/utils/safety.ts | import { GoogleLLMResponse } from "../types.js";
export class GoogleAISafetyError extends Error {
response: GoogleLLMResponse;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
reply: any = "";
constructor(response: GoogleLLMResponse, message?: string) {
super(message);
this.response = response;
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src | lc_public_repos/langchainjs/libs/langchain-google-common/src/utils/index.ts | export * from "./common.js";
export * from "./failed_handler.js";
export * from "./gemini.js";
export * from "./zod_to_gemini_parameters.js";
export * from "./palm.js";
export * from "./safety.js";
export * from "./stream.js";
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src | lc_public_repos/langchainjs/libs/langchain-google-common/src/utils/stream.ts | import { GenerationChunk } from "@langchain/core/outputs";
export interface AbstractStream {
/**
* Add more text to the buffer
* @param data
*/
appendBuffer(data: string): void;
/**
* Indicate that there is no more text to be added to the buffer
* (ie - our source material is done)
*/
closeBuffer(): void;
/**
* Get the next chunk that is coming from the stream.
* This chunk may be null, usually indicating the last chunk in the stream.
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
nextChunk(): Promise<any>;
/**
* Is the stream done?
* A stream is only done if all of the following are true:
* - There is no more data to be added to the text buffer
* - There is no more data in the text buffer
* - There are no chunks that are waiting to be consumed
*/
get streamDone(): boolean;
}
export function complexValue(value: unknown): unknown {
if (value === null || typeof value === "undefined") {
// I dunno what to put here. An error, probably
return undefined;
} else if (typeof value === "object") {
if (Array.isArray(value)) {
return {
list_val: value.map((avalue) => complexValue(avalue)),
};
} else {
const ret: Record<string, unknown> = {};
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const v: Record<string, any> = value;
Object.keys(v).forEach((key) => {
ret[key] = complexValue(v[key]);
});
return { struct_val: ret };
}
} else if (typeof value === "number") {
if (Number.isInteger(value)) {
return { int_val: value };
} else {
return { float_val: value };
}
} else {
return {
string_val: [value],
};
}
}
export function simpleValue(val: unknown): unknown {
if (val && typeof val === "object" && !Array.isArray(val)) {
// eslint-disable-next-line no-prototype-builtins
if (val.hasOwnProperty("stringVal")) {
return (val as { stringVal: string[] }).stringVal[0];
// eslint-disable-next-line no-prototype-builtins
} else if (val.hasOwnProperty("boolVal")) {
return (val as { boolVal: boolean[] }).boolVal[0];
// eslint-disable-next-line no-prototype-builtins
} else if (val.hasOwnProperty("listVal")) {
const { listVal } = val as { listVal: unknown[] };
return listVal.map((aval) => simpleValue(aval));
// eslint-disable-next-line no-prototype-builtins
} else if (val.hasOwnProperty("structVal")) {
const ret: Record<string, unknown> = {};
const struct = (val as { structVal: Record<string, unknown> }).structVal;
Object.keys(struct).forEach((key) => {
ret[key] = simpleValue(struct[key]);
});
return ret;
} else {
const ret: Record<string, unknown> = {};
const struct = val as Record<string, unknown>;
Object.keys(struct).forEach((key) => {
ret[key] = simpleValue(struct[key]);
});
return ret;
}
} else if (Array.isArray(val)) {
return val.map((aval) => simpleValue(aval));
} else {
return val;
}
}
export class JsonStream implements AbstractStream {
_buffer = "";
_bufferOpen = true;
_firstRun = true;
/**
* Add data to the buffer. This may cause chunks to be generated, if available.
* @param data
*/
appendBuffer(data: string): void {
this._buffer += data;
// Our first time, skip to the opening of the array
if (this._firstRun) {
this._skipTo("[");
this._firstRun = false;
}
this._parseBuffer();
}
/**
* Indicate there is no more data that will be added to the text buffer.
* This should be called when all the data has been read and added to indicate
* that we should process everything remaining in the buffer.
*/
closeBuffer(): void {
this._bufferOpen = false;
this._parseBuffer();
}
/**
* Skip characters in the buffer till we get to the start of an object.
* Then attempt to read a full object.
* If we do read a full object, turn it into a chunk and send it to the chunk handler.
* Repeat this for as much as we can.
*/
_parseBuffer(): void {
let obj = null;
do {
this._skipTo("{");
obj = this._getFullObject();
if (obj !== null) {
const chunk = this._simplifyObject(obj);
this._handleChunk(chunk);
}
} while (obj !== null);
if (!this._bufferOpen) {
// No more data will be added, and we have parsed everything we could,
// so everything else is garbage.
this._handleChunk(null);
this._buffer = "";
}
}
/**
* If the string is present, move the start of the buffer to the first occurrence
* of that string. This is useful for skipping over elements or parts that we're not
* really interested in parsing. (ie - the opening characters, comma separators, etc.)
* @param start The string to start the buffer with
*/
_skipTo(start: string): void {
const index = this._buffer.indexOf(start);
if (index > 0) {
this._buffer = this._buffer.slice(index);
}
}
/**
* Given what is in the buffer, parse a single object out of it.
* If a complete object isn't available, return null.
* Assumes that we are at the start of an object to parse.
*/
_getFullObject(): object | null {
let ret: object | null = null;
// Loop while we don't have something to return AND we have something in the buffer
let index = 0;
while (ret === null && this._buffer.length > index) {
// Advance to the next close bracket after our current index
index = this._buffer.indexOf("}", index + 1);
// If we don't find one, exit with null
if (index === -1) {
return null;
}
// If we have one, try to turn it into an object to return
try {
const objStr = this._buffer.substring(0, index + 1);
ret = JSON.parse(objStr);
// We only get here if it parsed it ok
// If we did turn it into an object, remove it from the buffer
this._buffer = this._buffer.slice(index + 1);
} catch (xx) {
// It didn't parse it correctly, so we swallow the exception and continue
}
}
return ret;
}
_simplifyObject(obj: unknown): object {
return obj as object;
}
// Set up a potential Promise that the handler can resolve.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
_chunkResolution: (chunk: any) => void;
// If there is no Promise (it is null), the handler must add it to the queue
// eslint-disable-next-line @typescript-eslint/no-explicit-any
_chunkPending: Promise<any> | null = null;
// A queue that will collect chunks while there is no Promise
// eslint-disable-next-line @typescript-eslint/no-explicit-any
_chunkQueue: any[] = [];
/**
* Register that we have another chunk available for consumption.
* If we are waiting for a chunk, resolve the promise waiting for it immediately.
* If not, then add it to the queue.
* @param chunk
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
_handleChunk(chunk: any): void {
if (this._chunkPending) {
this._chunkResolution(chunk);
this._chunkPending = null;
} else {
this._chunkQueue.push(chunk);
}
}
/**
* Get the next chunk that is coming from the stream.
* This chunk may be null, usually indicating the last chunk in the stream.
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
async nextChunk(): Promise<any> {
if (this._chunkQueue.length > 0) {
// If there is data in the queue, return the next queue chunk
return this._chunkQueue.shift() as GenerationChunk;
} else {
// Otherwise, set up a promise that handleChunk will cause to be resolved
this._chunkPending = new Promise((resolve) => {
this._chunkResolution = resolve;
});
return this._chunkPending;
}
}
/**
* Is the stream done?
* A stream is only done if all of the following are true:
* - There is no more data to be added to the text buffer
* - There is no more data in the text buffer
* - There are no chunks that are waiting to be consumed
*/
get streamDone(): boolean {
return (
!this._bufferOpen &&
this._buffer.length === 0 &&
this._chunkQueue.length === 0 &&
this._chunkPending === null
);
}
}
export class ComplexJsonStream extends JsonStream {
_simplifyObject(obj: unknown): object {
return simpleValue(obj) as object;
}
}
export class ReadableAbstractStream implements AbstractStream {
private baseStream: AbstractStream;
decoder: TextDecoder;
constructor(baseStream: AbstractStream, body: ReadableStream | null) {
this.baseStream = baseStream;
this.decoder = new TextDecoder("utf-8");
if (body) {
void this.run(body);
} else {
console.error("Unexpected empty body while streaming");
}
}
appendBuffer(data: string): void {
return this.baseStream.appendBuffer(data);
}
closeBuffer(): void {
return this.baseStream.closeBuffer();
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
nextChunk(): Promise<any> {
return this.baseStream.nextChunk();
}
get streamDone(): boolean {
return this.baseStream.streamDone;
}
async run(body: ReadableStream) {
const reader = body.getReader();
let isDone = false;
while (!isDone) {
const { value, done } = await reader.read();
if (!done) {
const svalue = this.decoder.decode(value, { stream: true });
this.appendBuffer(svalue);
} else {
isDone = done;
this.closeBuffer();
}
}
}
}
export class ReadableJsonStream extends ReadableAbstractStream {
constructor(body: ReadableStream | null) {
super(new JsonStream(), body);
}
}
export class SseStream implements AbstractStream {
_buffer = "";
_bufferOpen = true;
appendBuffer(data: string): void {
this._buffer += data;
this._parseBuffer();
}
closeBuffer(): void {
this._bufferOpen = false;
this._parseBuffer();
}
/**
* Attempt to load an entire event.
* For each entire event we load,
* send them to be handled.
*/
_parseBuffer(): void {
const events = this._buffer.split(/\n\n/);
this._buffer = events.pop() ?? "";
events.forEach((event) => this._handleEvent(event.trim()));
if (!this._bufferOpen) {
// No more data will be added, and we have parsed
// everything. So dump the rest.
this._handleEvent(null);
this._buffer = "";
}
}
/**
* Given an event string, get all the fields
* in the event. It is assumed there is one field
* per line, but that field names can be duplicated,
* indicating to append the new value to the previous value
* @param event
*/
_parseEvent(event: string | null): Record<string, string> | null {
if (!event || event.trim() === "") {
return null;
}
const ret: Record<string, string> = {};
const lines = event.split(/\n/);
lines.forEach((line) => {
const match = line.match(/^([^:]+): \s*(.+)\n*$/);
if (match && match.length === 3) {
const key = match[1];
const val = match[2];
const cur = ret[key] ?? "";
ret[key] = `${cur}${val}`;
}
});
return ret;
}
// Set up a potential Promise that the handler can resolve.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
_chunkResolution: (chunk: any) => void;
// If there is no Promise (it is null), the handler must add it to the queue
// eslint-disable-next-line @typescript-eslint/no-explicit-any
_chunkPending: Promise<any> | null = null;
// A queue that will collect chunks while there is no Promise
// eslint-disable-next-line @typescript-eslint/no-explicit-any
_chunkQueue: any[] = [];
_handleEvent(event: string | null): void {
const chunk = this._parseEvent(event);
if (this._chunkPending) {
this._chunkResolution(chunk);
this._chunkPending = null;
} else {
this._chunkQueue.push(chunk);
}
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
async nextChunk(): Promise<any> {
if (this._chunkQueue.length > 0) {
// If there is data in the queue, return the next queue chunk
return this._chunkQueue.shift() as Record<string, string>;
} else {
// Otherwise, set up a promise that handleChunk will cause to be resolved
this._chunkPending = new Promise((resolve) => {
this._chunkResolution = resolve;
});
return this._chunkPending;
}
}
get streamDone(): boolean {
return (
!this._bufferOpen &&
this._buffer.length === 0 &&
this._chunkQueue.length === 0 &&
this._chunkPending === null
);
}
}
export class ReadableSseStream extends ReadableAbstractStream {
constructor(body: ReadableStream | null) {
super(new SseStream(), body);
}
}
export class SseJsonStream extends SseStream {
_jsonAttribute: string = "data";
constructor(jsonAttribute?: string) {
super();
this._jsonAttribute = jsonAttribute ?? this._jsonAttribute;
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
async nextChunk(): Promise<any> {
const eventRecord = (await super.nextChunk()) as Record<string, string>;
const json = eventRecord?.[this._jsonAttribute];
if (!json) {
return null;
} else {
return JSON.parse(json);
}
}
}
export class ReadableSseJsonStream extends ReadableAbstractStream {
constructor(body: ReadableStream | null) {
super(new SseJsonStream(), body);
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src | lc_public_repos/langchainjs/libs/langchain-google-common/src/utils/common.ts | import { isOpenAITool } from "@langchain/core/language_models/base";
import { isLangChainTool } from "@langchain/core/utils/function_calling";
import { isModelGemini, validateGeminiParams } from "./gemini.js";
import type {
GeminiFunctionDeclaration,
GeminiFunctionSchema,
GeminiTool,
GoogleAIBaseLanguageModelCallOptions,
GoogleAIModelParams,
GoogleAIModelRequestParams,
GoogleAIToolType,
VertexModelFamily,
} from "../types.js";
import {
jsonSchemaToGeminiParameters,
zodToGeminiParameters,
} from "./zod_to_gemini_parameters.js";
import { isModelClaude, validateClaudeParams } from "./anthropic.js";
export function copyAIModelParams(
params: GoogleAIModelParams | undefined,
options: GoogleAIBaseLanguageModelCallOptions | undefined
): GoogleAIModelRequestParams {
return copyAIModelParamsInto(params, options, {});
}
function processToolChoice(
toolChoice: GoogleAIBaseLanguageModelCallOptions["tool_choice"],
allowedFunctionNames: GoogleAIBaseLanguageModelCallOptions["allowed_function_names"]
):
| {
tool_choice: "any" | "auto" | "none";
allowed_function_names?: string[];
}
| undefined {
if (!toolChoice) {
if (allowedFunctionNames) {
// Allowed func names is passed, return 'any' so it forces the model to use a tool.
return {
tool_choice: "any",
allowed_function_names: allowedFunctionNames,
};
}
return undefined;
}
if (toolChoice === "any" || toolChoice === "auto" || toolChoice === "none") {
return {
tool_choice: toolChoice,
allowed_function_names: allowedFunctionNames,
};
}
if (typeof toolChoice === "string") {
// String representing the function name.
// Return any to force the model to predict the specified function call.
return {
tool_choice: "any",
allowed_function_names: [...(allowedFunctionNames ?? []), toolChoice],
};
}
throw new Error("Object inputs for tool_choice not supported.");
}
export function convertToGeminiTools(tools: GoogleAIToolType[]): GeminiTool[] {
const geminiTools: GeminiTool[] = [
{
functionDeclarations: [],
},
];
tools.forEach((tool) => {
if (
"functionDeclarations" in tool &&
Array.isArray(tool.functionDeclarations)
) {
const funcs: GeminiFunctionDeclaration[] = tool.functionDeclarations;
geminiTools[0].functionDeclarations?.push(...funcs);
} else if (isLangChainTool(tool)) {
const jsonSchema = zodToGeminiParameters(tool.schema);
geminiTools[0].functionDeclarations?.push({
name: tool.name,
description: tool.description ?? `A function available to call.`,
parameters: jsonSchema as GeminiFunctionSchema,
});
} else if (isOpenAITool(tool)) {
geminiTools[0].functionDeclarations?.push({
name: tool.function.name,
description:
tool.function.description ?? `A function available to call.`,
parameters: jsonSchemaToGeminiParameters(tool.function.parameters),
});
}
});
return geminiTools;
}
export function copyAIModelParamsInto(
params: GoogleAIModelParams | undefined,
options: GoogleAIBaseLanguageModelCallOptions | undefined,
target: GoogleAIModelParams
): GoogleAIModelRequestParams {
const ret: GoogleAIModelRequestParams = target || {};
const model = options?.model ?? params?.model ?? target.model;
ret.modelName =
model ?? options?.modelName ?? params?.modelName ?? target.modelName;
ret.model = model;
ret.temperature =
options?.temperature ?? params?.temperature ?? target.temperature;
ret.maxOutputTokens =
options?.maxOutputTokens ??
params?.maxOutputTokens ??
target.maxOutputTokens;
ret.topP = options?.topP ?? params?.topP ?? target.topP;
ret.topK = options?.topK ?? params?.topK ?? target.topK;
ret.stopSequences =
options?.stopSequences ?? params?.stopSequences ?? target.stopSequences;
ret.safetySettings =
options?.safetySettings ?? params?.safetySettings ?? target.safetySettings;
ret.convertSystemMessageToHumanContent =
options?.convertSystemMessageToHumanContent ??
params?.convertSystemMessageToHumanContent ??
target?.convertSystemMessageToHumanContent;
ret.responseMimeType =
options?.responseMimeType ??
params?.responseMimeType ??
target?.responseMimeType;
ret.streaming = options?.streaming ?? params?.streaming ?? target?.streaming;
const toolChoice = processToolChoice(
options?.tool_choice,
options?.allowed_function_names
);
if (toolChoice) {
ret.tool_choice = toolChoice.tool_choice;
ret.allowed_function_names = toolChoice.allowed_function_names;
}
const tools = options?.tools;
if (tools) {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
ret.tools = convertToGeminiTools(tools as Record<string, any>[]);
}
return ret;
}
export function modelToFamily(
modelName: string | undefined
): VertexModelFamily {
if (!modelName) {
return null;
} else if (isModelGemini(modelName)) {
return "gemini";
} else if (isModelClaude(modelName)) {
return "claude";
} else {
return null;
}
}
export function modelToPublisher(modelName: string | undefined): string {
const family = modelToFamily(modelName);
switch (family) {
case "gemini":
case "palm":
return "google";
case "claude":
return "anthropic";
default:
return "unknown";
}
}
export function validateModelParams(
params: GoogleAIModelParams | undefined
): void {
const testParams: GoogleAIModelParams = params ?? {};
const model = testParams.model ?? testParams.modelName;
switch (modelToFamily(model)) {
case "gemini":
return validateGeminiParams(testParams);
case "claude":
return validateClaudeParams(testParams);
default:
throw new Error(
`Unable to verify model params: ${JSON.stringify(params)}`
);
}
}
export function copyAndValidateModelParamsInto(
params: GoogleAIModelParams | undefined,
target: GoogleAIModelParams
): GoogleAIModelParams {
copyAIModelParamsInto(params, undefined, target);
validateModelParams(target);
return target;
}
|
0 | lc_public_repos/langchainjs/libs/langchain-google-common/src | lc_public_repos/langchainjs/libs/langchain-google-common/src/utils/gemini.ts | import { v4 as uuidv4 } from "uuid";
import {
AIMessage,
AIMessageChunk,
AIMessageChunkFields,
BaseMessage,
BaseMessageChunk,
BaseMessageFields,
MessageContent,
MessageContentComplex,
MessageContentImageUrl,
MessageContentText,
SystemMessage,
ToolMessage,
UsageMetadata,
isAIMessage,
} from "@langchain/core/messages";
import {
ChatGeneration,
ChatGenerationChunk,
ChatResult,
} from "@langchain/core/outputs";
import { ToolCallChunk } from "@langchain/core/messages/tool";
import { StructuredToolParams } from "@langchain/core/tools";
import { isLangChainTool } from "@langchain/core/utils/function_calling";
import type {
GoogleLLMResponse,
GoogleAIModelParams,
GeminiPartText,
GeminiPartInlineData,
GeminiPartFileData,
GeminiPart,
GeminiRole,
GeminiContent,
GenerateContentResponseData,
GoogleAISafetyHandler,
GeminiPartFunctionCall,
GoogleAIAPI,
GeminiAPIConfig,
} from "../types.js";
import { GoogleAISafetyError } from "./safety.js";
import { MediaBlob } from "../experimental/utils/media_core.js";
import {
GeminiFunctionDeclaration,
GeminiGenerationConfig,
GeminiRequest,
GeminiSafetySetting,
GeminiTool,
GoogleAIModelRequestParams,
GoogleAIToolType,
} from "../types.js";
import { zodToGeminiParameters } from "./zod_to_gemini_parameters.js";
export interface FunctionCall {
name: string;
arguments: string;
}
export interface ToolCall {
id: string;
type: "function";
function: FunctionCall;
}
export interface FunctionCallRaw {
name: string;
arguments: object;
}
export interface ToolCallRaw {
id: string;
type: "function";
function: FunctionCallRaw;
}
export interface DefaultGeminiSafetySettings {
errorFinish?: string[];
}
export class DefaultGeminiSafetyHandler implements GoogleAISafetyHandler {
errorFinish = ["SAFETY", "RECITATION", "OTHER"];
constructor(settings?: DefaultGeminiSafetySettings) {
this.errorFinish = settings?.errorFinish ?? this.errorFinish;
}
handleDataPromptFeedback(
response: GoogleLLMResponse,
data: GenerateContentResponseData
): GenerateContentResponseData {
// Check to see if our prompt was blocked in the first place
const promptFeedback = data?.promptFeedback;
const blockReason = promptFeedback?.blockReason;
if (blockReason) {
throw new GoogleAISafetyError(response, `Prompt blocked: ${blockReason}`);
}
return data;
}
handleDataFinishReason(
response: GoogleLLMResponse,
data: GenerateContentResponseData
): GenerateContentResponseData {
const firstCandidate = data?.candidates?.[0];
const finishReason = firstCandidate?.finishReason;
if (this.errorFinish.includes(finishReason)) {
throw new GoogleAISafetyError(response, `Finish reason: ${finishReason}`);
}
return data;
}
handleData(
response: GoogleLLMResponse,
data: GenerateContentResponseData
): GenerateContentResponseData {
let ret = data;
ret = this.handleDataPromptFeedback(response, ret);
ret = this.handleDataFinishReason(response, ret);
return ret;
}
handle(response: GoogleLLMResponse): GoogleLLMResponse {
let newdata;
if ("nextChunk" in response.data) {
// TODO: This is a stream. How to handle?
newdata = response.data;
} else if (Array.isArray(response.data)) {
// If it is an array, try to handle every item in the array
try {
newdata = response.data.map((item) => this.handleData(response, item));
} catch (xx) {
// eslint-disable-next-line no-instanceof/no-instanceof
if (xx instanceof GoogleAISafetyError) {
throw new GoogleAISafetyError(response, xx.message);
} else {
throw xx;
}
}
} else {
const data = response.data as GenerateContentResponseData;
newdata = this.handleData(response, data);
}
return {
...response,
data: newdata,
};
}
}
export interface MessageGeminiSafetySettings
extends DefaultGeminiSafetySettings {
msg?: string;
forceNewMessage?: boolean;
}
export class MessageGeminiSafetyHandler extends DefaultGeminiSafetyHandler {
msg: string = "";
forceNewMessage = false;
constructor(settings?: MessageGeminiSafetySettings) {
super(settings);
this.msg = settings?.msg ?? this.msg;
this.forceNewMessage = settings?.forceNewMessage ?? this.forceNewMessage;
}
setMessage(data: GenerateContentResponseData): GenerateContentResponseData {
const ret = data;
if (
this.forceNewMessage ||
!data?.candidates?.[0]?.content?.parts?.length
) {
ret.candidates = data.candidates ?? [];
ret.candidates[0] = data.candidates[0] ?? {};
ret.candidates[0].content = data.candidates[0].content ?? {};
ret.candidates[0].content = {
role: "model",
parts: [{ text: this.msg }],
};
}
return ret;
}
handleData(
response: GoogleLLMResponse,
data: GenerateContentResponseData
): GenerateContentResponseData {
try {
return super.handleData(response, data);
} catch (xx) {
return this.setMessage(data);
}
}
}
const extractMimeType = (
str: string
): { mimeType: string; data: string } | null => {
if (str.startsWith("data:")) {
return {
mimeType: str.split(":")[1].split(";")[0],
data: str.split(",")[1],
};
}
return null;
};
export function getGeminiAPI(config?: GeminiAPIConfig): GoogleAIAPI {
function messageContentText(
content: MessageContentText
): GeminiPartText | null {
if (content?.text && content?.text.length > 0) {
return {
text: content.text,
};
} else {
return null;
}
}
function messageContentImageUrl(
content: MessageContentImageUrl
): GeminiPartInlineData | GeminiPartFileData {
const url: string =
typeof content.image_url === "string"
? content.image_url
: content.image_url.url;
if (!url) {
throw new Error("Missing Image URL");
}
const mimeTypeAndData = extractMimeType(url);
if (mimeTypeAndData) {
return {
inlineData: mimeTypeAndData,
};
} else {
// FIXME - need some way to get mime type
return {
fileData: {
mimeType: "image/png",
fileUri: url,
},
};
}
}
async function blobToFileData(blob: MediaBlob): Promise<GeminiPartFileData> {
return {
fileData: {
fileUri: blob.path!,
mimeType: blob.mimetype,
},
};
}
async function fileUriContentToBlob(
uri: string
): Promise<MediaBlob | undefined> {
return config?.mediaManager?.getMediaBlob(uri);
}
async function messageContentMedia(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
content: Record<string, any>
): Promise<GeminiPartInlineData | GeminiPartFileData> {
if ("mimeType" in content && "data" in content) {
return {
inlineData: {
mimeType: content.mimeType,
data: content.data,
},
};
} else if ("mimeType" in content && "fileUri" in content) {
return {
fileData: {
mimeType: content.mimeType,
fileUri: content.fileUri,
},
};
} else {
const uri = content.fileUri;
const blob = await fileUriContentToBlob(uri);
if (blob) {
return await blobToFileData(blob);
}
}
throw new Error(
`Invalid media content: ${JSON.stringify(content, null, 1)}`
);
}
async function messageContentComplexToPart(
content: MessageContentComplex
): Promise<GeminiPart | null> {
switch (content.type) {
case "text":
if ("text" in content) {
return messageContentText(content as MessageContentText);
}
break;
case "image_url":
if ("image_url" in content) {
// Type guard for MessageContentImageUrl
return messageContentImageUrl(content as MessageContentImageUrl);
}
break;
case "media":
return await messageContentMedia(content);
default:
throw new Error(
`Unsupported type "${content.type}" received while converting message to message parts: ${content}`
);
}
throw new Error(
`Cannot coerce "${content.type}" message part into a string.`
);
}
async function messageContentComplexToParts(
content: MessageContentComplex[]
): Promise<(GeminiPart | null)[]> {
const contents = content.map(messageContentComplexToPart);
return Promise.all(contents);
}
async function messageContentToParts(
content: MessageContent
): Promise<GeminiPart[]> {
// Convert a string to a text type MessageContent if needed
const messageContent: MessageContentComplex[] =
typeof content === "string"
? [
{
type: "text",
text: content,
},
]
: content;
// Get all of the parts, even those that don't correctly resolve
const allParts = await messageContentComplexToParts(messageContent);
// Remove any invalid parts
const parts: GeminiPart[] = allParts.reduce(
(acc: GeminiPart[], val: GeminiPart | null | undefined) => {
if (val) {
return [...acc, val];
} else {
return acc;
}
},
[]
);
return parts;
}
function messageToolCallsToParts(toolCalls: ToolCall[]): GeminiPart[] {
if (!toolCalls || toolCalls.length === 0) {
return [];
}
return toolCalls.map((tool: ToolCall) => {
let args = {};
if (tool?.function?.arguments) {
const argStr = tool.function.arguments;
args = JSON.parse(argStr);
}
return {
functionCall: {
name: tool.function.name,
args,
},
};
});
}
function messageKwargsToParts(kwargs: Record<string, unknown>): GeminiPart[] {
const ret: GeminiPart[] = [];
if (kwargs?.tool_calls) {
ret.push(...messageToolCallsToParts(kwargs.tool_calls as ToolCall[]));
}
return ret;
}
async function roleMessageToContent(
role: GeminiRole,
message: BaseMessage
): Promise<GeminiContent[]> {
const contentParts: GeminiPart[] = await messageContentToParts(
message.content
);
let toolParts: GeminiPart[];
if (isAIMessage(message) && !!message.tool_calls?.length) {
toolParts = message.tool_calls.map(
(toolCall): GeminiPart => ({
functionCall: {
name: toolCall.name,
args: toolCall.args,
},
})
);
} else {
toolParts = messageKwargsToParts(message.additional_kwargs);
}
const parts: GeminiPart[] = [...contentParts, ...toolParts];
return [
{
role,
parts,
},
];
}
async function systemMessageToContent(
message: SystemMessage
): Promise<GeminiContent[]> {
return config?.useSystemInstruction
? roleMessageToContent("system", message)
: [
...(await roleMessageToContent("user", message)),
...(await roleMessageToContent("model", new AIMessage("Ok"))),
];
}
function toolMessageToContent(
message: ToolMessage,
prevMessage: BaseMessage
): GeminiContent[] {
const contentStr =
typeof message.content === "string"
? message.content
: message.content.reduce(
(acc: string, content: MessageContentComplex) => {
if (content.type === "text") {
return acc + content.text;
} else {
return acc;
}
},
""
);
// Hacky :(
const responseName =
(isAIMessage(prevMessage) && !!prevMessage.tool_calls?.length
? prevMessage.tool_calls[0].name
: prevMessage.name) ?? message.tool_call_id;
try {
const content = JSON.parse(contentStr);
return [
{
role: "function",
parts: [
{
functionResponse: {
name: responseName,
response: { content },
},
},
],
},
];
} catch (_) {
return [
{
role: "function",
parts: [
{
functionResponse: {
name: responseName,
response: { content: contentStr },
},
},
],
},
];
}
}
async function baseMessageToContent(
message: BaseMessage,
prevMessage: BaseMessage | undefined
): Promise<GeminiContent[]> {
const type = message._getType();
switch (type) {
case "system":
return systemMessageToContent(message as SystemMessage);
case "human":
return roleMessageToContent("user", message);
case "ai":
return roleMessageToContent("model", message);
case "tool":
if (!prevMessage) {
throw new Error(
"Tool messages cannot be the first message passed to the model."
);
}
return toolMessageToContent(message as ToolMessage, prevMessage);
default:
console.log(`Unsupported message type: ${type}`);
return [];
}
}
function textPartToMessageContent(part: GeminiPartText): MessageContentText {
return {
type: "text",
text: part.text,
};
}
function inlineDataPartToMessageContent(
part: GeminiPartInlineData
): MessageContentImageUrl {
return {
type: "image_url",
image_url: `data:${part.inlineData.mimeType};base64,${part.inlineData.data}`,
};
}
function fileDataPartToMessageContent(
part: GeminiPartFileData
): MessageContentImageUrl {
return {
type: "image_url",
image_url: part.fileData.fileUri,
};
}
function partsToMessageContent(parts: GeminiPart[]): MessageContent {
return parts
.map((part) => {
if (part === undefined || part === null) {
return null;
} else if ("text" in part) {
return textPartToMessageContent(part);
} else if ("inlineData" in part) {
return inlineDataPartToMessageContent(part);
} else if ("fileData" in part) {
return fileDataPartToMessageContent(part);
} else {
return null;
}
})
.reduce((acc, content) => {
if (content) {
acc.push(content);
}
return acc;
}, [] as MessageContentComplex[]);
}
function toolRawToTool(raw: ToolCallRaw): ToolCall {
return {
id: raw.id,
type: raw.type,
function: {
name: raw.function.name,
arguments: JSON.stringify(raw.function.arguments),
},
};
}
function functionCallPartToToolRaw(
part: GeminiPartFunctionCall
): ToolCallRaw {
return {
id: uuidv4().replace(/-/g, ""),
type: "function",
function: {
name: part.functionCall.name,
arguments: part.functionCall.args ?? {},
},
};
}
function partsToToolsRaw(parts: GeminiPart[]): ToolCallRaw[] {
return parts
.map((part: GeminiPart) => {
if (part === undefined || part === null) {
return null;
} else if ("functionCall" in part) {
return functionCallPartToToolRaw(part);
} else {
return null;
}
})
.reduce((acc, content) => {
if (content) {
acc.push(content);
}
return acc;
}, [] as ToolCallRaw[]);
}
function toolsRawToTools(raws: ToolCallRaw[]): ToolCall[] {
return raws.map((raw) => toolRawToTool(raw));
}
function responseToGenerateContentResponseData(
response: GoogleLLMResponse
): GenerateContentResponseData {
if ("nextChunk" in response.data) {
throw new Error("Cannot convert Stream to GenerateContentResponseData");
} else if (Array.isArray(response.data)) {
// Collapse the array of response data as if it was a single one
return response.data.reduce(
(
acc: GenerateContentResponseData,
val: GenerateContentResponseData
): GenerateContentResponseData => {
// Add all the parts
// FIXME: Handle other candidates?
const valParts = val?.candidates?.[0]?.content?.parts ?? [];
acc.candidates[0].content.parts.push(...valParts);
// FIXME: Merge promptFeedback and safety settings
acc.promptFeedback = val.promptFeedback;
return acc;
}
);
} else {
return response.data as GenerateContentResponseData;
}
}
function responseToParts(response: GoogleLLMResponse): GeminiPart[] {
const responseData = responseToGenerateContentResponseData(response);
const parts = responseData?.candidates?.[0]?.content?.parts ?? [];
return parts;
}
function partToText(part: GeminiPart): string {
return "text" in part ? part.text : "";
}
function responseToString(response: GoogleLLMResponse): string {
const parts = responseToParts(response);
const ret: string = parts.reduce((acc, part) => {
const val = partToText(part);
return acc + val;
}, "");
return ret;
}
function safeResponseTo<RetType>(
response: GoogleLLMResponse,
responseTo: (response: GoogleLLMResponse) => RetType
): RetType {
const safetyHandler =
config?.safetyHandler ?? new DefaultGeminiSafetyHandler();
try {
const safeResponse = safetyHandler.handle(response);
return responseTo(safeResponse);
} catch (xx) {
// eslint-disable-next-line no-instanceof/no-instanceof
if (xx instanceof GoogleAISafetyError) {
const ret = responseTo(xx.response);
xx.reply = ret;
}
throw xx;
}
}
function safeResponseToString(response: GoogleLLMResponse): string {
return safeResponseTo(response, responseToString);
}
function responseToGenerationInfo(response: GoogleLLMResponse) {
if (!Array.isArray(response.data)) {
return {};
}
const data = response.data[0];
return {
usage_metadata: {
prompt_token_count: data.usageMetadata?.promptTokenCount,
candidates_token_count: data.usageMetadata?.candidatesTokenCount,
total_token_count: data.usageMetadata?.totalTokenCount,
},
safety_ratings: data.candidates[0]?.safetyRatings?.map((rating) => ({
category: rating.category,
probability: rating.probability,
probability_score: rating.probabilityScore,
severity: rating.severity,
severity_score: rating.severityScore,
})),
finish_reason: data.candidates[0]?.finishReason,
};
}
function responseToChatGeneration(
response: GoogleLLMResponse
): ChatGenerationChunk {
return new ChatGenerationChunk({
text: responseToString(response),
message: partToMessageChunk(responseToParts(response)[0]),
generationInfo: responseToGenerationInfo(response),
});
}
function safeResponseToChatGeneration(
response: GoogleLLMResponse
): ChatGenerationChunk {
return safeResponseTo(response, responseToChatGeneration);
}
function chunkToString(chunk: BaseMessageChunk): string {
if (chunk === null) {
return "";
} else if (typeof chunk.content === "string") {
return chunk.content;
} else if (chunk.content.length === 0) {
return "";
} else if (chunk.content[0].type === "text") {
return chunk.content[0].text;
} else {
throw new Error(`Unexpected chunk: ${chunk}`);
}
}
function partToMessageChunk(part: GeminiPart): BaseMessageChunk {
const fields = partsToBaseMessageChunkFields([part]);
if (typeof fields.content === "string") {
return new AIMessageChunk(fields);
} else if (fields.content.every((item) => item.type === "text")) {
const newContent = fields.content
.map((item) => ("text" in item ? item.text : ""))
.join("");
return new AIMessageChunk({
...fields,
content: newContent,
});
}
return new AIMessageChunk(fields);
}
function partToChatGeneration(part: GeminiPart): ChatGeneration {
const message = partToMessageChunk(part);
const text = partToText(part);
return new ChatGenerationChunk({
text,
message,
});
}
function responseToChatGenerations(
response: GoogleLLMResponse
): ChatGeneration[] {
const parts = responseToParts(response);
if (parts.length === 0) {
return [];
}
let ret = parts.map((part) => partToChatGeneration(part));
if (ret.every((item) => typeof item.message.content === "string")) {
const combinedContent = ret.map((item) => item.message.content).join("");
const combinedText = ret.map((item) => item.text).join("");
const toolCallChunks: ToolCallChunk[] | undefined = ret[
ret.length - 1
]?.message.additional_kwargs?.tool_calls?.map((toolCall, i) => ({
name: toolCall.function.name,
args: toolCall.function.arguments,
id: toolCall.id,
index: i,
type: "tool_call_chunk",
}));
let usageMetadata: UsageMetadata | undefined;
if ("usageMetadata" in response.data) {
usageMetadata = {
input_tokens: response.data.usageMetadata.promptTokenCount as number,
output_tokens: response.data.usageMetadata
.candidatesTokenCount as number,
total_tokens: response.data.usageMetadata.totalTokenCount as number,
};
}
ret = [
new ChatGenerationChunk({
message: new AIMessageChunk({
content: combinedContent,
additional_kwargs: ret[ret.length - 1]?.message.additional_kwargs,
tool_call_chunks: toolCallChunks,
usage_metadata: usageMetadata,
}),
text: combinedText,
generationInfo: ret[ret.length - 1].generationInfo,
}),
];
}
return ret;
}
function responseToBaseMessageFields(
response: GoogleLLMResponse
): BaseMessageFields {
const parts = responseToParts(response);
return partsToBaseMessageChunkFields(parts);
}
function partsToBaseMessageChunkFields(
parts: GeminiPart[]
): AIMessageChunkFields {
const fields: AIMessageChunkFields = {
content: partsToMessageContent(parts),
tool_call_chunks: [],
tool_calls: [],
invalid_tool_calls: [],
};
const rawTools = partsToToolsRaw(parts);
if (rawTools.length > 0) {
const tools = toolsRawToTools(rawTools);
for (const tool of tools) {
fields.tool_call_chunks?.push({
name: tool.function.name,
args: tool.function.arguments,
id: tool.id,
type: "tool_call_chunk",
});
try {
fields.tool_calls?.push({
name: tool.function.name,
args: JSON.parse(tool.function.arguments),
id: tool.id,
});
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (e: any) {
fields.invalid_tool_calls?.push({
name: tool.function.name,
args: tool.function.arguments,
id: tool.id,
error: e.message,
type: "invalid_tool_call",
});
}
}
fields.additional_kwargs = {
tool_calls: tools,
};
}
return fields;
}
function responseToBaseMessage(response: GoogleLLMResponse): BaseMessage {
const fields = responseToBaseMessageFields(response);
return new AIMessage(fields);
}
function safeResponseToBaseMessage(response: GoogleLLMResponse): BaseMessage {
return safeResponseTo(response, responseToBaseMessage);
}
function responseToChatResult(response: GoogleLLMResponse): ChatResult {
const generations = responseToChatGenerations(response);
return {
generations,
llmOutput: responseToGenerationInfo(response),
};
}
function safeResponseToChatResult(response: GoogleLLMResponse): ChatResult {
return safeResponseTo(response, responseToChatResult);
}
function inputType(
input: MessageContent | BaseMessage[]
): "MessageContent" | "BaseMessageArray" {
if (typeof input === "string") {
return "MessageContent";
} else {
const firstItem: BaseMessage | MessageContentComplex = input[0];
if (Object.hasOwn(firstItem, "content")) {
return "BaseMessageArray";
} else {
return "MessageContent";
}
}
}
async function formatMessageContents(
input: MessageContent,
_parameters: GoogleAIModelParams
): Promise<GeminiContent[]> {
const parts = await messageContentToParts!(input);
const contents: GeminiContent[] = [
{
role: "user", // Required by Vertex AI
parts,
},
];
return contents;
}
async function formatBaseMessageContents(
input: BaseMessage[],
_parameters: GoogleAIModelParams
): Promise<GeminiContent[]> {
const inputPromises: Promise<GeminiContent[]>[] = input.map((msg, i) =>
baseMessageToContent!(msg, input[i - 1])
);
const inputs = await Promise.all(inputPromises);
return inputs.reduce((acc, cur) => {
// Filter out the system content
if (cur.every((content) => content.role === "system")) {
return acc;
}
// Combine adjacent function messages
if (
cur[0]?.role === "function" &&
acc.length > 0 &&
acc[acc.length - 1].role === "function"
) {
acc[acc.length - 1].parts = [
...acc[acc.length - 1].parts,
...cur[0].parts,
];
} else {
acc.push(...cur);
}
return acc;
}, [] as GeminiContent[]);
}
async function formatContents(
input: MessageContent | BaseMessage[],
parameters: GoogleAIModelRequestParams
): Promise<GeminiContent[]> {
const it = inputType(input);
switch (it) {
case "MessageContent":
return formatMessageContents(input as MessageContent, parameters);
case "BaseMessageArray":
return formatBaseMessageContents(input as BaseMessage[], parameters);
default:
throw new Error(`Unknown input type "${it}": ${input}`);
}
}
function formatGenerationConfig(
parameters: GoogleAIModelRequestParams
): GeminiGenerationConfig {
return {
temperature: parameters.temperature,
topK: parameters.topK,
topP: parameters.topP,
maxOutputTokens: parameters.maxOutputTokens,
stopSequences: parameters.stopSequences,
responseMimeType: parameters.responseMimeType,
};
}
function formatSafetySettings(
parameters: GoogleAIModelRequestParams
): GeminiSafetySetting[] {
return parameters.safetySettings ?? [];
}
async function formatBaseMessageSystemInstruction(
input: BaseMessage[]
): Promise<GeminiContent> {
let ret = {} as GeminiContent;
for (let index = 0; index < input.length; index += 1) {
const message = input[index];
if (message._getType() === "system") {
// For system types, we only want it if it is the first message,
// if it appears anywhere else, it should be an error.
if (index === 0) {
// eslint-disable-next-line prefer-destructuring
ret = (await baseMessageToContent!(message, undefined))[0];
} else {
throw new Error(
"System messages are only permitted as the first passed message."
);
}
}
}
return ret;
}
async function formatSystemInstruction(
input: MessageContent | BaseMessage[]
): Promise<GeminiContent> {
if (!config?.useSystemInstruction) {
return {} as GeminiContent;
}
const it = inputType(input);
switch (it) {
case "BaseMessageArray":
return formatBaseMessageSystemInstruction(input as BaseMessage[]);
default:
return {} as GeminiContent;
}
}
function structuredToolToFunctionDeclaration(
tool: StructuredToolParams
): GeminiFunctionDeclaration {
const jsonSchema = zodToGeminiParameters(tool.schema);
return {
name: tool.name,
description: tool.description ?? `A function available to call.`,
parameters: jsonSchema,
};
}
function structuredToolsToGeminiTools(
tools: StructuredToolParams[]
): GeminiTool[] {
return [
{
functionDeclarations: tools.map(structuredToolToFunctionDeclaration),
},
];
}
function formatTools(parameters: GoogleAIModelRequestParams): GeminiTool[] {
const tools: GoogleAIToolType[] | undefined = parameters?.tools;
if (!tools || tools.length === 0) {
return [];
}
if (tools.every(isLangChainTool)) {
return structuredToolsToGeminiTools(tools);
} else {
if (
tools.length === 1 &&
(!("functionDeclarations" in tools[0]) ||
!tools[0].functionDeclarations?.length)
) {
return [];
}
return tools as GeminiTool[];
}
}
function formatToolConfig(
parameters: GoogleAIModelRequestParams
): GeminiRequest["toolConfig"] | undefined {
if (!parameters.tool_choice || typeof parameters.tool_choice !== "string") {
return undefined;
}
return {
functionCallingConfig: {
mode: parameters.tool_choice as "auto" | "any" | "none",
allowedFunctionNames: parameters.allowed_function_names,
},
};
}
async function formatData(
input: unknown,
parameters: GoogleAIModelRequestParams
): Promise<GeminiRequest> {
const typedInput = input as MessageContent | BaseMessage[];
const contents = await formatContents(typedInput, parameters);
const generationConfig = formatGenerationConfig(parameters);
const tools = formatTools(parameters);
const toolConfig = formatToolConfig(parameters);
const safetySettings = formatSafetySettings(parameters);
const systemInstruction = await formatSystemInstruction(typedInput);
const ret: GeminiRequest = {
contents,
generationConfig,
};
if (tools && tools.length) {
ret.tools = tools;
}
if (toolConfig) {
ret.toolConfig = toolConfig;
}
if (safetySettings && safetySettings.length) {
ret.safetySettings = safetySettings;
}
if (
systemInstruction?.role &&
systemInstruction?.parts &&
systemInstruction?.parts?.length
) {
ret.systemInstruction = systemInstruction;
}
return ret;
}
return {
messageContentToParts,
baseMessageToContent,
responseToString: safeResponseToString,
responseToChatGeneration: safeResponseToChatGeneration,
chunkToString,
responseToBaseMessage: safeResponseToBaseMessage,
responseToChatResult: safeResponseToChatResult,
formatData,
};
}
export function validateGeminiParams(params: GoogleAIModelParams): void {
if (params.maxOutputTokens && params.maxOutputTokens < 0) {
throw new Error("`maxOutputTokens` must be a positive integer");
}
if (
params.temperature &&
(params.temperature < 0 || params.temperature > 2)
) {
throw new Error("`temperature` must be in the range of [0.0,2.0]");
}
if (params.topP && (params.topP < 0 || params.topP > 1)) {
throw new Error("`topP` must be in the range of [0.0,1.0]");
}
if (params.topK && params.topK < 0) {
throw new Error("`topK` must be a positive integer");
}
}
export function isModelGemini(modelName: string): boolean {
return modelName.toLowerCase().startsWith("gemini");
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/tsconfig.json | {
"extends": "@tsconfig/recommended",
"compilerOptions": {
"outDir": "../dist",
"rootDir": "./src",
"target": "ES2021",
"lib": ["ES2021", "ES2022.Object", "DOM"],
"module": "ES2020",
"moduleResolution": "nodenext",
"esModuleInterop": true,
"declaration": true,
"noImplicitReturns": true,
"noFallthroughCasesInSwitch": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"useDefineForClassFields": true,
"strictPropertyInitialization": false,
"allowJs": true,
"strict": true
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist", "docs"]
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/LICENSE | The MIT License
Copyright (c) 2023 LangChain
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE. |
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/jest.config.cjs | /** @type {import('ts-jest').JestConfigWithTsJest} */
module.exports = {
preset: "ts-jest/presets/default-esm",
testEnvironment: "./jest.env.cjs",
modulePathIgnorePatterns: ["dist/", "docs/"],
moduleNameMapper: {
"^(\\.{1,2}/.*)\\.js$": "$1",
},
transform: {
"^.+\\.tsx?$": ["@swc/jest"],
},
transformIgnorePatterns: [
"/node_modules/",
"\\.pnp\\.[^\\/]+$",
"./scripts/jest-setup-after-env.js",
],
setupFiles: ["dotenv/config"],
testTimeout: 20_000,
passWithNoTests: true,
};
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/jest.env.cjs | const { TestEnvironment } = require("jest-environment-node");
class AdjustedTestEnvironmentToSupportFloat32Array extends TestEnvironment {
constructor(config, context) {
// Make `instanceof Float32Array` return true in tests
// to avoid https://github.com/xenova/transformers.js/issues/57 and https://github.com/jestjs/jest/issues/2549
super(config, context);
this.global.Float32Array = Float32Array;
}
}
module.exports = AdjustedTestEnvironmentToSupportFloat32Array;
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/README.md | # LangChain google-vertexai-web
This package contains resources to access Google AI/ML models
and other Google services via Vertex AI. Authorization to these
services use either an API Key or service account credentials
that are included in an environment variable.
If you are running this on the Google Cloud Platform, or in a way
where service account credentials can be stored on a file system,
consider using the @langchain/google-vertexai
package *instead*. You do not need to use both packages. See the
section on **Authorization** below.
## Installation
```bash
$ yarn add @langchain/google-vertexai-web
```
## Authorization
Authorization is done through a Google Cloud Service Account.
To handle service accounts, this package uses the `google-auth-library`
package, and you may wish to consult the documentation for that library
about how it does so. But in short, classes in this package will use
credentials from the first of the following that apply:
1. An API Key that is passed to the constructor using the `apiKey` attribute
2. Credentials that are passed to the constructor using the `authInfo` attribute
3. An API Key that is set in the environment variable `API_KEY`
4. The Service Account credentials that are saved directly into the
`GOOGLE_WEB_CREDENTIALS`
5. The Service Account credentials that are saved directly into the
`GOOGLE_VERTEX_AI_WEB_CREDENTIALS` (deprecated)
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/.release-it.json | {
"github": {
"release": true,
"autoGenerate": true,
"tokenRef": "GITHUB_TOKEN_RELEASE"
},
"npm": {
"versionArgs": [
"--workspaces-update=false"
]
}
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/.eslintrc.cjs | module.exports = {
extends: [
"airbnb-base",
"eslint:recommended",
"prettier",
"plugin:@typescript-eslint/recommended",
],
parserOptions: {
ecmaVersion: 12,
parser: "@typescript-eslint/parser",
project: "./tsconfig.json",
sourceType: "module",
},
plugins: ["@typescript-eslint", "no-instanceof"],
ignorePatterns: [
".eslintrc.cjs",
"scripts",
"node_modules",
"dist",
"dist-cjs",
"*.js",
"*.cjs",
"*.d.ts",
],
rules: {
"no-process-env": 2,
"no-instanceof/no-instanceof": 2,
"@typescript-eslint/explicit-module-boundary-types": 0,
"@typescript-eslint/no-empty-function": 0,
"@typescript-eslint/no-shadow": 0,
"@typescript-eslint/no-empty-interface": 0,
"@typescript-eslint/no-use-before-define": ["error", "nofunc"],
"@typescript-eslint/no-unused-vars": ["warn", { args: "none" }],
"@typescript-eslint/no-floating-promises": "error",
"@typescript-eslint/no-misused-promises": "error",
camelcase: 0,
"class-methods-use-this": 0,
"import/extensions": [2, "ignorePackages"],
"import/no-extraneous-dependencies": [
"error",
{ devDependencies: ["**/*.test.ts"] },
],
"import/no-unresolved": 0,
"import/prefer-default-export": 0,
"keyword-spacing": "error",
"max-classes-per-file": 0,
"max-len": 0,
"no-await-in-loop": 0,
"no-bitwise": 0,
"no-console": 0,
"no-restricted-syntax": 0,
"no-shadow": 0,
"no-continue": 0,
"no-void": 0,
"no-underscore-dangle": 0,
"no-use-before-define": 0,
"no-useless-constructor": 0,
"no-return-await": 0,
"consistent-return": 0,
"no-else-return": 0,
"func-names": 0,
"no-lonely-if": 0,
"prefer-rest-params": 0,
"new-cap": ["error", { properties: false, capIsNew: false }],
},
overrides: [
{
files: ['**/*.test.ts'],
rules: {
'@typescript-eslint/no-unused-vars': 'off'
}
}
]
};
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/langchain.config.js | import { resolve, dirname } from "node:path";
import { fileURLToPath } from "node:url";
/**
* @param {string} relativePath
* @returns {string}
*/
function abs(relativePath) {
return resolve(dirname(fileURLToPath(import.meta.url)), relativePath);
}
export const config = {
internals: [/node\:/, /@langchain\/core\//, /@langchain\/google-webauth/],
entrypoints: {
index: "index",
utils: "utils",
types: "types",
},
tsConfigPath: resolve("./tsconfig.json"),
cjsSource: "./dist-cjs",
cjsDestination: "./dist",
abs,
} |
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/package.json | {
"name": "@langchain/google-vertexai-web",
"version": "0.1.3",
"description": "LangChain.js support for Google Vertex AI Web",
"type": "module",
"engines": {
"node": ">=18"
},
"main": "./index.js",
"types": "./index.d.ts",
"repository": {
"type": "git",
"url": "git@github.com:langchain-ai/langchainjs.git"
},
"homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-google-vertexai-web/",
"scripts": {
"build": "yarn turbo:command build:internal --filter=@langchain/google-vertexai-web",
"build:internal": "yarn lc_build --create-entrypoints --pre --tree-shaking",
"lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/",
"lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts",
"lint": "yarn lint:eslint && yarn lint:dpdm",
"lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm",
"clean": "rm -rf .turbo dist/",
"prepack": "yarn build",
"test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%",
"test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts",
"test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000",
"test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%",
"format": "prettier --config .prettierrc --write \"src\"",
"format:check": "prettier --config .prettierrc --check \"src\""
},
"author": "LangChain",
"license": "MIT",
"dependencies": {
"@langchain/google-webauth": "~0.1.3"
},
"peerDependencies": {
"@langchain/core": ">=0.2.21 <0.4.0"
},
"devDependencies": {
"@jest/globals": "^29.5.0",
"@langchain/core": "workspace:*",
"@langchain/google-common": "^0.1.0",
"@langchain/scripts": ">=0.1.0 <0.2.0",
"@langchain/standard-tests": "0.0.0",
"@swc/core": "^1.3.90",
"@swc/jest": "^0.2.29",
"@tsconfig/recommended": "^1.0.3",
"@typescript-eslint/eslint-plugin": "^6.12.0",
"@typescript-eslint/parser": "^6.12.0",
"dotenv": "^16.3.1",
"dpdm": "^3.12.0",
"eslint": "^8.33.0",
"eslint-config-airbnb-base": "^15.0.0",
"eslint-config-prettier": "^8.6.0",
"eslint-plugin-import": "^2.27.5",
"eslint-plugin-no-instanceof": "^1.0.1",
"eslint-plugin-prettier": "^4.2.1",
"jest": "^29.5.0",
"jest-environment-node": "^29.6.4",
"prettier": "^2.8.3",
"release-it": "^17.6.0",
"rollup": "^4.5.2",
"ts-jest": "^29.1.0",
"typescript": "<5.2.0",
"zod": "^3.22.4"
},
"publishConfig": {
"access": "public"
},
"exports": {
".": {
"types": {
"import": "./index.d.ts",
"require": "./index.d.cts",
"default": "./index.d.ts"
},
"import": "./index.js",
"require": "./index.cjs"
},
"./utils": {
"types": {
"import": "./utils.d.ts",
"require": "./utils.d.cts",
"default": "./utils.d.ts"
},
"import": "./utils.js",
"require": "./utils.cjs"
},
"./types": {
"types": {
"import": "./types.d.ts",
"require": "./types.d.cts",
"default": "./types.d.ts"
},
"import": "./types.js",
"require": "./types.cjs"
},
"./package.json": "./package.json"
},
"files": [
"dist/",
"index.cjs",
"index.js",
"index.d.ts",
"index.d.cts",
"utils.cjs",
"utils.js",
"utils.d.ts",
"utils.d.cts",
"types.cjs",
"types.js",
"types.d.ts",
"types.d.cts"
]
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/tsconfig.cjs.json | {
"extends": "./tsconfig.json",
"compilerOptions": {
"module": "commonjs",
"declaration": false
},
"exclude": ["node_modules", "dist", "docs", "**/tests"]
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/turbo.json | {
"extends": ["//"],
"pipeline": {
"build": {
"outputs": ["**/dist/**"]
},
"build:internal": {
"dependsOn": ["^build:internal"]
}
}
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/.prettierrc | {
"$schema": "https://json.schemastore.org/prettierrc",
"printWidth": 80,
"tabWidth": 2,
"useTabs": false,
"semi": true,
"singleQuote": false,
"quoteProps": "as-needed",
"jsxSingleQuote": false,
"trailingComma": "es5",
"bracketSpacing": true,
"arrowParens": "always",
"requirePragma": false,
"insertPragma": false,
"proseWrap": "preserve",
"htmlWhitespaceSensitivity": "css",
"vueIndentScriptAndStyle": false,
"endOfLine": "lf"
}
|
0 | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/src/types.ts | export * from "@langchain/google-webauth/types";
|
0 | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/src/llms.ts | import { type GoogleLLMInput, GoogleLLM } from "@langchain/google-webauth";
/**
* Input to a Google Vertex LLM class.
*/
export interface VertexAIInput extends GoogleLLMInput {}
/**
* Integration with a Google Vertex AI LLM using
* the "@langchain/google-webauth" package for auth.
*/
export class VertexAI extends GoogleLLM {
lc_namespace = ["langchain", "llms", "vertexai"];
static lc_name() {
return "VertexAI";
}
constructor(fields?: VertexAIInput) {
super({
...fields,
platformType: "gcp",
});
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/src/index.ts | export * from "./chat_models.js";
export * from "./llms.js";
export * from "./embeddings.js";
|
0 | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/src/chat_models.ts | import { type ChatGoogleInput, ChatGoogle } from "@langchain/google-webauth";
/**
* Input to a Google Vertex AI chat model class.
*/
export interface ChatVertexAIInput extends ChatGoogleInput {}
/**
* Integration with Google Vertex AI chat models in web environments.
*
* Setup:
* Install `@langchain/google-vertexai-web` and set your stringified
* Vertex AI credentials as an environment variable named `GOOGLE_VERTEX_AI_WEB_CREDENTIALS`.
*
* ```bash
* npm install @langchain/google-vertexai-web
* export GOOGLE_VERTEX_AI_WEB_CREDENTIALS={"type":"service_account","project_id":"YOUR_PROJECT-12345",...}
* ```
*
* ## [Constructor args](https://api.js.langchain.com/classes/langchain_community_chat_models_googlevertexai_web.ChatGoogleVertexAI.html#constructor)
*
* ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_google_common_types.GoogleAIBaseLanguageModelCallOptions.html)
*
* Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.
* They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below:
*
* ```typescript
* // When calling `.bind`, call options should be passed via the first argument
* const llmWithArgsBound = llm.bind({
* stop: ["\n"],
* tools: [...],
* });
*
* // When calling `.bindTools`, call options should be passed via the second argument
* const llmWithTools = llm.bindTools(
* [...],
* {
* tool_choice: "auto",
* }
* );
* ```
*
* ## Examples
*
* <details open>
* <summary><strong>Instantiate</strong></summary>
*
* ```typescript
* import { ChatVertexAI } from '@langchain/google-vertexai-web';
*
* const llm = new ChatVertexAI({
* model: "gemini-1.5-pro",
* temperature: 0,
* authOptions: {
* credentials: process.env.GOOGLE_VERTEX_AI_WEB_CREDENTIALS,
* },
* // other params...
* });
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Invoking</strong></summary>
*
* ```typescript
* const input = `Translate "I love programming" into French.`;
*
* // Models also accept a list of chat messages or a formatted prompt
* const result = await llm.invoke(input);
* console.log(result);
* ```
*
* ```txt
* AIMessageChunk {
* "content": "\"J'adore programmer\" \n\nHere's why this is the best translation:\n\n* **J'adore** means \"I love\" and conveys a strong passion.\n* **Programmer** is the French verb for \"to program.\"\n\nThis translation is natural and idiomatic in French. \n",
* "additional_kwargs": {},
* "response_metadata": {},
* "tool_calls": [],
* "tool_call_chunks": [],
* "invalid_tool_calls": [],
* "usage_metadata": {
* "input_tokens": 9,
* "output_tokens": 63,
* "total_tokens": 72
* }
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Streaming Chunks</strong></summary>
*
* ```typescript
* for await (const chunk of await llm.stream(input)) {
* console.log(chunk);
* }
* ```
*
* ```txt
* AIMessageChunk {
* "content": "\"",
* "additional_kwargs": {},
* "response_metadata": {},
* "tool_calls": [],
* "tool_call_chunks": [],
* "invalid_tool_calls": []
* }
* AIMessageChunk {
* "content": "J'adore programmer\" \n",
* "additional_kwargs": {},
* "response_metadata": {},
* "tool_calls": [],
* "tool_call_chunks": [],
* "invalid_tool_calls": []
* }
* AIMessageChunk {
* "content": "",
* "additional_kwargs": {},
* "response_metadata": {},
* "tool_calls": [],
* "tool_call_chunks": [],
* "invalid_tool_calls": []
* }
* AIMessageChunk {
* "content": "",
* "additional_kwargs": {},
* "response_metadata": {
* "finishReason": "stop"
* },
* "tool_calls": [],
* "tool_call_chunks": [],
* "invalid_tool_calls": [],
* "usage_metadata": {
* "input_tokens": 9,
* "output_tokens": 8,
* "total_tokens": 17
* }
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Aggregate Streamed Chunks</strong></summary>
*
* ```typescript
* import { AIMessageChunk } from '@langchain/core/messages';
* import { concat } from '@langchain/core/utils/stream';
*
* const stream = await llm.stream(input);
* let full: AIMessageChunk | undefined;
* for await (const chunk of stream) {
* full = !full ? chunk : concat(full, chunk);
* }
* console.log(full);
* ```
*
* ```txt
* AIMessageChunk {
* "content": "\"J'adore programmer\" \n",
* "additional_kwargs": {},
* "response_metadata": {
* "finishReason": "stop"
* },
* "tool_calls": [],
* "tool_call_chunks": [],
* "invalid_tool_calls": [],
* "usage_metadata": {
* "input_tokens": 9,
* "output_tokens": 8,
* "total_tokens": 17
* }
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Bind tools</strong></summary>
*
* ```typescript
* import { z } from 'zod';
*
* const GetWeather = {
* name: "GetWeather",
* description: "Get the current weather in a given location",
* schema: z.object({
* location: z.string().describe("The city and state, e.g. San Francisco, CA")
* }),
* }
*
* const GetPopulation = {
* name: "GetPopulation",
* description: "Get the current population in a given location",
* schema: z.object({
* location: z.string().describe("The city and state, e.g. San Francisco, CA")
* }),
* }
*
* const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);
* const aiMsg = await llmWithTools.invoke(
* "Which city is hotter today and which is bigger: LA or NY?"
* );
* console.log(aiMsg.tool_calls);
* ```
*
* ```txt
* [
* {
* name: 'GetPopulation',
* args: { location: 'New York City, NY' },
* id: '33c1c1f47e2f492799c77d2800a43912',
* type: 'tool_call'
* }
* ]
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Structured Output</strong></summary>
*
* ```typescript
* import { z } from 'zod';
*
* const Joke = z.object({
* setup: z.string().describe("The setup of the joke"),
* punchline: z.string().describe("The punchline to the joke"),
* rating: z.number().optional().describe("How funny the joke is, from 1 to 10")
* }).describe('Joke to tell user.');
*
* const structuredLlm = llm.withStructuredOutput(Joke, { name: "Joke" });
* const jokeResult = await structuredLlm.invoke("Tell me a joke about cats");
* console.log(jokeResult);
* ```
*
* ```txt
* {
* setup: 'What do you call a cat that loves to bowl?',
* punchline: 'An alley cat!'
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Usage Metadata</strong></summary>
*
* ```typescript
* const aiMsgForMetadata = await llm.invoke(input);
* console.log(aiMsgForMetadata.usage_metadata);
* ```
*
* ```txt
* { input_tokens: 9, output_tokens: 8, total_tokens: 17 }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Stream Usage Metadata</strong></summary>
*
* ```typescript
* const streamForMetadata = await llm.stream(
* input,
* {
* streamUsage: true
* }
* );
* let fullForMetadata: AIMessageChunk | undefined;
* for await (const chunk of streamForMetadata) {
* fullForMetadata = !fullForMetadata ? chunk : concat(fullForMetadata, chunk);
* }
* console.log(fullForMetadata?.usage_metadata);
* ```
*
* ```txt
* { input_tokens: 9, output_tokens: 8, total_tokens: 17 }
* ```
* </details>
*
* <br />
*/
export class ChatVertexAI extends ChatGoogle {
lc_namespace = ["langchain", "chat_models", "vertexai"];
static lc_name() {
return "ChatVertexAI";
}
constructor(fields?: ChatVertexAIInput) {
super({
...fields,
platformType: "gcp",
});
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/src/embeddings.ts | import {
type GoogleEmbeddingsInput,
GoogleEmbeddings,
} from "@langchain/google-webauth";
/**
* Input to a Google Vertex AI embeddings class.
*/
export interface GoogleVertexAIEmbeddingsInput extends GoogleEmbeddingsInput {}
/**
* Integration with a Google Vertex AI embeddings model using
* the "@langchain/google-webauth" package for auth.
*/
export class VertexAIEmbeddings extends GoogleEmbeddings {
static lc_name() {
return "VertexAIEmbeddings";
}
constructor(fields: GoogleVertexAIEmbeddingsInput) {
super({
...fields,
platformType: "gcp",
});
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/src/utils.ts | export * from "@langchain/google-webauth/utils";
|
0 | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/src | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/src/tests/agent.int.test.ts | // import { test, expect } from "@jest/globals";
// import { ChatPromptTemplate } from "@langchain/core/prompts";
// import { TavilySearchResults } from "@langchain/community/tools/tavily_search";
// import { AgentExecutor, createToolCallingAgent } from "langchain/agents";
// import { Calculator } from "@langchain/community/tools/calculator";
// import { ChatVertexAI } from "../index.js";
// const tools = [new TavilySearchResults({ maxResults: 1 }), new Calculator()];
// TODO: This test breaks CI build due to dependencies. Figure out a way around it.
test("createToolCallingAgent works", async () => {
// const prompt = ChatPromptTemplate.fromMessages([
// ["system", "You are a helpful assistant"],
// ["placeholder", "{chat_history}"],
// ["human", "{input}"],
// ["placeholder", "{agent_scratchpad}"],
// ]);
// const llm = new ChatVertexAI({
// temperature: 0,
// });
// const agent = await createToolCallingAgent({
// llm,
// tools,
// prompt,
// });
// const agentExecutor = new AgentExecutor({
// agent,
// tools,
// });
// const input = "what is the current weather in SF?";
// const result = await agentExecutor.invoke({
// input,
// });
// console.log(result);
// expect(result.input).toBe(input);
// expect(typeof result.output).toBe("string");
// // Length greater than 10 because any less than that would warrant
// // an investigation into why such a short generation was returned.
// expect(result.output.length).toBeGreaterThan(10);
});
|
0 | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/src | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/src/tests/llms.test.ts | /* eslint-disable no-process-env */
import { test, expect } from "@jest/globals";
import { VertexAI } from "../llms.js";
test("Serialization", () => {
const model = new VertexAI({
authOptions: {
credentials: "foo",
},
});
expect(JSON.stringify(model)).toEqual(
`{"lc":1,"type":"constructor","id":["langchain","llms","vertexai","VertexAI"],"kwargs":{"auth_options":{"lc":1,"type":"secret","id":["GOOGLE_AUTH_OPTIONS"]},"platform_type":"gcp"}}`
);
});
|
0 | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/src | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/src/tests/chat_models.standard.int.test.ts | /* eslint-disable no-process-env */
import { test, expect } from "@jest/globals";
import { ChatModelIntegrationTests } from "@langchain/standard-tests";
import { AIMessageChunk } from "@langchain/core/messages";
import { GoogleAIBaseLanguageModelCallOptions } from "@langchain/google-common";
import { ChatVertexAI } from "../chat_models.js";
class ChatVertexAIStandardIntegrationTests extends ChatModelIntegrationTests<
GoogleAIBaseLanguageModelCallOptions,
AIMessageChunk
> {
constructor() {
if (!process.env.GOOGLE_VERTEX_AI_WEB_CREDENTIALS) {
throw new Error("Missing secrets for Google VertexAI standard tests.");
}
super({
Cls: ChatVertexAI,
chatModelHasToolCalling: true,
chatModelHasStructuredOutput: true,
supportsParallelToolCalls: true,
invokeResponseType: AIMessageChunk,
constructorArgs: {
model: "gemini-1.5-pro",
authOptions: {
credentials: JSON.parse(process.env.GOOGLE_VERTEX_AI_WEB_CREDENTIALS),
},
},
});
}
async testToolMessageHistoriesListContent() {
this.skipTestMessage(
"testToolMessageHistoriesListContent",
"ChatVertexAI",
"Not implemented."
);
}
async testInvokeMoreComplexTools() {
this.skipTestMessage(
"testInvokeMoreComplexTools",
"ChatVertexAI",
"Google VertexAI does not support tool schemas which contain object with unknown/any parameters." +
"Google VertexAI only supports objects in schemas when the parameters are defined."
);
}
async testParallelToolCalling() {
// Pass `true` in the second argument to only verify it can support parallel tool calls in the message history.
// This is because the model struggles to actually call parallel tools.
await super.testParallelToolCalling(undefined, true);
}
}
const testClass = new ChatVertexAIStandardIntegrationTests();
test("ChatVertexAIStandardIntegrationTests", async () => {
const testResults = await testClass.runTests();
expect(testResults).toBe(true);
});
|
0 | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/src | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/src/tests/llms.int.test.ts | import { test } from "@jest/globals";
import {
AIMessage,
BaseMessage,
HumanMessageChunk,
MessageContentComplex,
} from "@langchain/core/messages";
import { ChatPromptValue } from "@langchain/core/prompt_values";
import { VertexAI } from "../llms.js";
const imgData = {
blueSquare:
"iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAIAAAACUFjqAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH6AIbFwQSRaexCAAAAB1pVFh0Q29tbWVudAAAAAAAQ3JlYXRlZCB3aXRoIEdJTVBkLmUHAAAAJklEQVQY02P8//8/A27AxIAXsEAor31f0CS2OfEQ1j2Q0owU+RsAGNUJD2/04PgAAAAASUVORK5CYII=",
};
describe("Google APIKey LLM", () => {
test("platform", async () => {
const model = new VertexAI();
expect(model.platform).toEqual("gcp");
});
test("invoke", async () => {
const model = new VertexAI();
const res = await model.invoke("If the time is 1:00, what time is it?");
expect(res.length).toBeGreaterThan(0);
expect(typeof res === "string").toBeTruthy();
});
test("stream", async () => {
const model = new VertexAI();
const stream = await model.stream(
"What is the answer to live, the universe, and everything? Be verbose."
);
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
expect(chunks.length).toBeGreaterThan(1);
});
test("predictMessage image", async () => {
const model = new VertexAI({
model: "gemini-1.5-flash",
});
const message: MessageContentComplex[] = [
{
type: "text",
text: "What is in this image?",
},
{
type: "image_url",
image_url: `data:image/png;base64,${imgData.blueSquare}`,
},
];
const messages: BaseMessage[] = [
new HumanMessageChunk({ content: message }),
];
const res = await model.predictMessages(messages);
expect(res).toBeInstanceOf(AIMessage);
expect(Array.isArray(res.content)).toEqual(true);
expect(res.content[0]).toHaveProperty("text");
// console.log("res", res);
});
test("invoke image", async () => {
const model = new VertexAI({
modelName: "gemini-pro-vision",
});
const message: MessageContentComplex[] = [
{
type: "text",
text: "What is in this image?",
},
{
type: "image_url",
image_url: `data:image/png;base64,${imgData.blueSquare}`,
},
];
const messages: BaseMessage[] = [
new HumanMessageChunk({ content: message }),
];
const input = new ChatPromptValue(messages);
const res = await model.invoke(input);
expect(res).toBeDefined();
expect(res.length).toBeGreaterThan(0);
// console.log("res", res);
});
});
|
0 | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/src | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/src/tests/chat_models.standard.test.ts | /* eslint-disable no-process-env */
import { test, expect } from "@jest/globals";
import { ChatModelUnitTests } from "@langchain/standard-tests";
import { AIMessageChunk } from "@langchain/core/messages";
import { GoogleAIBaseLanguageModelCallOptions } from "@langchain/google-common";
import { ChatVertexAI } from "../chat_models.js";
class ChatVertexAIStandardUnitTests extends ChatModelUnitTests<
GoogleAIBaseLanguageModelCallOptions,
AIMessageChunk
> {
constructor() {
super({
Cls: ChatVertexAI,
chatModelHasToolCalling: true,
chatModelHasStructuredOutput: true,
constructorArgs: {},
});
// This must be set so method like `.bindTools` or `.withStructuredOutput`
// which we call after instantiating the model will work.
// (constructor will throw if API key is not set)
process.env.GOOGLE_VERTEX_AI_WEB_CREDENTIALS = "test";
}
testChatModelInitApiKey() {
this.skipTestMessage(
"testChatModelInitApiKey",
"ChatVertexAI (webauth)",
this.multipleApiKeysRequiredMessage
);
}
}
const testClass = new ChatVertexAIStandardUnitTests();
test("ChatVertexAIStandardUnitTests", () => {
const testResults = testClass.runTests();
expect(testResults).toBe(true);
});
|
0 | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/src | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/src/tests/chat_models.int.test.ts | /* eslint-disable import/no-extraneous-dependencies, no-process-env */
import { z } from "zod";
import { test } from "@jest/globals";
import {
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
HumanMessage,
SystemMessage,
} from "@langchain/core/messages";
import { BaseLanguageModelInput } from "@langchain/core/language_models/base";
import { ChatPromptValue } from "@langchain/core/prompt_values";
import { StructuredTool } from "@langchain/core/tools";
import { ChatVertexAI } from "../chat_models.js";
class WeatherTool extends StructuredTool {
schema = z.object({
locations: z
.array(z.object({ name: z.string() }))
.describe("The name of cities to get the weather for."),
});
description =
"Get the weather of a specific location and return the temperature in Celsius.";
name = "get_weather";
async _call(input: z.infer<typeof this.schema>) {
// console.log(`WeatherTool called with input: ${input}`);
return `The weather in ${JSON.stringify(input.locations)} is 25Β°C`;
}
}
describe("Google APIKey Chat", () => {
test("invoke", async () => {
const model = new ChatVertexAI({
authOptions: {
credentials: JSON.parse(
process.env.GOOGLE_VERTEX_AI_WEB_CREDENTIALS ?? ""
),
},
});
const res = await model.invoke("What is 1 + 1?");
// console.log(res);
expect(res).toBeDefined();
expect(res._getType()).toEqual("ai");
const aiMessage = res as AIMessageChunk;
// console.log(aiMessage);
expect(aiMessage.content).toBeDefined();
expect(aiMessage.content.length).toBeGreaterThan(0);
expect(aiMessage.content[0]).toBeDefined();
});
test("generate", async () => {
const model = new ChatVertexAI();
const messages: BaseMessage[] = [
new SystemMessage(
"You will reply to all requests to flip a coin with either H, indicating heads, or T, indicating tails."
),
new HumanMessage("Flip it"),
new AIMessage("T"),
new HumanMessage("Flip the coin again"),
];
const res = await model.predictMessages(messages);
expect(res).toBeDefined();
expect(res._getType()).toEqual("ai");
const aiMessage = res as AIMessageChunk;
expect(aiMessage.content).toBeDefined();
expect(aiMessage.content.length).toBeGreaterThan(0);
expect(aiMessage.content[0]).toBeDefined();
});
test("stream", async () => {
const model = new ChatVertexAI();
const input: BaseLanguageModelInput = new ChatPromptValue([
new SystemMessage(
"You will reply to all requests to flip a coin with either H, indicating heads, or T, indicating tails."
),
new HumanMessage("Flip it"),
new AIMessage("T"),
new HumanMessage("Flip the coin again"),
]);
const res = await model.stream(input);
const resArray: BaseMessageChunk[] = [];
for await (const chunk of res) {
resArray.push(chunk);
}
expect(resArray).toBeDefined();
expect(resArray.length).toBeGreaterThanOrEqual(1);
const lastChunk = resArray[resArray.length - 1];
expect(lastChunk).toBeDefined();
expect(lastChunk._getType()).toEqual("ai");
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const aiChunk = lastChunk as AIMessageChunk;
// console.log(aiChunk);
// console.log(JSON.stringify(resArray, null, 2));
});
test("Tool call", async () => {
const chat = new ChatVertexAI().bindTools([new WeatherTool()]);
const res = await chat.invoke("What is the weather in SF and LA");
// console.log(res);
expect(res.tool_calls?.length).toEqual(1);
expect(res.tool_calls?.[0].args).toEqual(
JSON.parse(res.additional_kwargs.tool_calls?.[0].function.arguments ?? "")
);
});
test("withStructuredOutput", async () => {
const tool = {
name: "get_weather",
description:
"Get the weather of a specific location and return the temperature in Celsius.",
parameters: {
type: "object",
properties: {
location: {
type: "string",
description: "The name of city to get the weather for.",
},
},
required: ["location"],
},
};
const model = new ChatVertexAI().withStructuredOutput(tool);
const result = await model.invoke("What is the weather in Paris?");
expect(result).toHaveProperty("location");
});
});
describe("Google Webauth Chat", () => {
test("invoke", async () => {
const model = new ChatVertexAI();
const res = await model.invoke("What is 1 + 1?");
expect(res).toBeDefined();
expect(res._getType()).toEqual("ai");
const aiMessage = res as AIMessageChunk;
expect(aiMessage.content).toBeDefined();
expect(aiMessage.content.length).toBeGreaterThan(0);
expect(aiMessage.content[0]).toBeDefined();
// console.log(aiMessage);
});
test("generate", async () => {
const model = new ChatVertexAI();
const messages: BaseMessage[] = [
new SystemMessage(
"You will reply to all requests to flip a coin with either H, indicating heads, or T, indicating tails."
),
new HumanMessage("Flip it"),
new AIMessage("T"),
new HumanMessage("Flip the coin again"),
];
const res = await model.predictMessages(messages);
expect(res).toBeDefined();
expect(res._getType()).toEqual("ai");
const aiMessage = res as AIMessageChunk;
expect(aiMessage.content).toBeDefined();
expect(aiMessage.content.length).toBeGreaterThan(0);
expect(aiMessage.content[0]).toBeDefined();
// console.log(aiMessage);
});
test("stream", async () => {
const model = new ChatVertexAI();
const input: BaseLanguageModelInput = new ChatPromptValue([
new SystemMessage(
"You will reply to all requests to flip a coin with either H, indicating heads, or T, indicating tails."
),
new HumanMessage("Flip it"),
new AIMessage("T"),
new HumanMessage("Flip the coin again"),
]);
const res = await model.stream(input);
const resArray: BaseMessageChunk[] = [];
for await (const chunk of res) {
resArray.push(chunk);
}
expect(resArray).toBeDefined();
expect(resArray.length).toBeGreaterThanOrEqual(1);
const lastChunk = resArray[resArray.length - 1];
expect(lastChunk).toBeDefined();
expect(lastChunk._getType()).toEqual("ai");
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const aiChunk = lastChunk as AIMessageChunk;
// console.log(aiChunk);
});
});
|
0 | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/src | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/src/tests/embeddings.int.test.ts | import { test, expect } from "@jest/globals";
import { VertexAIEmbeddings } from "../embeddings.js";
test("Test VertexAIEmbeddings.embedQuery", async () => {
const embeddings = new VertexAIEmbeddings({
model: "textembedding-gecko",
});
const res = await embeddings.embedQuery("Hello world");
expect(typeof res[0]).toBe("number");
});
test("Test VertexAIEmbeddings.embedDocuments", async () => {
const embeddings = new VertexAIEmbeddings({
model: "text-embedding-004",
});
const res = await embeddings.embedDocuments([
"Hello world",
"Bye bye",
"we need",
"at least",
"six documents",
"to test pagination",
]);
// console.log(res);
expect(res).toHaveLength(6);
res.forEach((r) => {
expect(typeof r[0]).toBe("number");
});
});
|
0 | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/src | lc_public_repos/langchainjs/libs/langchain-google-vertexai-web/src/tests/chat_models.test.ts | /* eslint-disable no-process-env */
import { test, expect } from "@jest/globals";
import { ChatVertexAI } from "../chat_models.js";
test("Serialization", () => {
const model = new ChatVertexAI({
authOptions: {
credentials: "foo",
},
});
expect(JSON.stringify(model)).toEqual(
`{"lc":1,"type":"constructor","id":["langchain","chat_models","vertexai","ChatVertexAI"],"kwargs":{"auth_options":{"lc":1,"type":"secret","id":["GOOGLE_AUTH_OPTIONS"]},"platform_type":"gcp"}}`
);
});
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-scripts/tsconfig.json | {
"extends": "@tsconfig/recommended",
"compilerOptions": {
"outDir": "../dist",
"rootDir": "./src",
"target": "ES2021",
"lib": [
"ES2021",
"ES2022.Object",
"DOM"
],
"module": "NodeNext",
"moduleResolution": "nodenext",
"esModuleInterop": true,
"declaration": true,
"noImplicitReturns": true,
"noFallthroughCasesInSwitch": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"useDefineForClassFields": true,
"strictPropertyInitialization": false,
"allowJs": true,
"strict": true
},
"include": [
"src/**/*",
"src/*"
],
"exclude": [
"node_modules/",
"dist",
"docs",
"bin/"
]
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-scripts/LICENSE | The MIT License
Copyright (c) 2023 LangChain
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE. |
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-scripts/jest.config.cjs | /** @type {import('ts-jest').JestConfigWithTsJest} */
module.exports = {
preset: "ts-jest/presets/default-esm",
testEnvironment: "./jest.env.cjs",
modulePathIgnorePatterns: ["dist/", "docs/"],
moduleNameMapper: {
"^(\\.{1,2}/.*)\\.js$": "$1",
},
transform: {
'^.+\\.tsx?$': ['@swc/jest'],
},
transformIgnorePatterns: [
"/node_modules/",
"\\.pnp\\.[^\\/]+$",
],
setupFiles: ["dotenv/config"],
testTimeout: 20_000,
passWithNoTests: true
};
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-scripts/jest.env.cjs | const { TestEnvironment } = require("jest-environment-node");
class AdjustedTestEnvironmentToSupportFloat32Array extends TestEnvironment {
constructor(config, context) {
// Make `instanceof Float32Array` return true in tests
// to avoid https://github.com/xenova/transformers.js/issues/57 and https://github.com/jestjs/jest/issues/2549
super(config, context);
this.global.Float32Array = Float32Array;
}
}
module.exports = AdjustedTestEnvironmentToSupportFloat32Array;
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-scripts/README.md | # @langchain/scripts
This package contains the LangChain.js shared scripts for our packages.
## Installation
```bash npm2yarn
npm install @langchain/scripts
```
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-scripts/.release-it.json | {
"github": {
"release": true,
"autoGenerate": true,
"tokenRef": "GITHUB_TOKEN_RELEASE"
},
"npm": {
"versionArgs": [
"--workspaces-update=false"
]
}
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-scripts/.eslintrc.cjs | module.exports = {
extends: [
"airbnb-base",
"eslint:recommended",
"prettier",
"plugin:@typescript-eslint/recommended",
],
parserOptions: {
ecmaVersion: 12,
parser: "@typescript-eslint/parser",
project: "./tsconfig.json",
sourceType: "module",
},
plugins: ["@typescript-eslint", "no-instanceof"],
ignorePatterns: [
".eslintrc.cjs",
"scripts",
"node_modules",
"dist",
"dist-cjs",
"*.js",
"*.cjs",
"*.d.ts",
],
rules: {
"no-instanceof/no-instanceof": 2,
"@typescript-eslint/explicit-module-boundary-types": 0,
"@typescript-eslint/no-empty-function": 0,
"@typescript-eslint/no-shadow": 0,
"@typescript-eslint/no-empty-interface": 0,
"@typescript-eslint/no-use-before-define": ["error", "nofunc"],
"@typescript-eslint/no-unused-vars": ["warn", { args: "none" }],
"@typescript-eslint/no-floating-promises": "error",
"@typescript-eslint/no-misused-promises": "error",
camelcase: 0,
"class-methods-use-this": 0,
"import/extensions": [2, "ignorePackages"],
"import/no-extraneous-dependencies": [
"error",
{ devDependencies: ["**/*.test.ts"] },
],
"import/no-unresolved": 0,
"import/prefer-default-export": 0,
"keyword-spacing": "error",
"max-classes-per-file": 0,
"max-len": 0,
"no-await-in-loop": 0,
"no-bitwise": 0,
"no-console": 0,
"no-restricted-syntax": 0,
"no-shadow": 0,
"no-continue": 0,
"no-void": 0,
"no-underscore-dangle": 0,
"no-use-before-define": 0,
"no-useless-constructor": 0,
"no-return-await": 0,
"consistent-return": 0,
"no-else-return": 0,
"func-names": 0,
"no-lonely-if": 0,
"prefer-rest-params": 0,
"new-cap": ["error", { properties: false, capIsNew: false }],
"arrow-body-style": 0,
},
overrides: [
{
files: ['**/*.test.ts'],
rules: {
'@typescript-eslint/no-unused-vars': 'off'
}
}
]
};
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-scripts/langchain.config.js | import { resolve, dirname } from "node:path";
import { fileURLToPath } from "node:url";
/**
* @param {string} relativePath
* @returns {string}
*/
function abs(relativePath) {
return resolve(dirname(fileURLToPath(import.meta.url)), relativePath);
}
export const config = {
internals: [/node\:/],
entrypoints: {
index: "index",
build: "build/index",
migrations: "migrations/index",
check_broken_links: "check_broken_links",
},
tsConfigPath: resolve("./tsconfig.json"),
cjsSource: "./dist-cjs",
cjsDestination: "./dist",
abs,
additionalGitignorePaths: ["!bin/build.js", "dist_build"]
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-scripts/tsconfig.build.json | {
"extends": "@tsconfig/recommended",
"compilerOptions": {
"outDir": "./dist_build",
"rootDir": "./src",
"target": "ES2021",
"lib": [
"ES2021",
"ES2022.Object",
"DOM"
],
"module": "NodeNext",
"moduleResolution": "nodenext",
"esModuleInterop": true,
"declaration": true,
"noImplicitReturns": true,
"noFallthroughCasesInSwitch": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"useDefineForClassFields": true,
"strictPropertyInitialization": false,
"allowJs": true,
"strict": true
},
"include": [
"src/build/*"
],
"exclude": [
"node_modules/",
"dist",
"docs",
"bin/",
"../../node_modules/"
]
} |
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-scripts/package.json | {
"name": "@langchain/scripts",
"version": "0.1.4",
"description": "Shared scripts for LangChain.js",
"type": "module",
"engines": {
"node": ">=18"
},
"main": "./index.js",
"types": "./index.d.ts",
"repository": {
"type": "git",
"url": "git@github.com:langchain-ai/langchainjs.git"
},
"homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-scripts/",
"bin": {
"filter_spam_comment": "bin/filter_spam_comment.js",
"lc_build": "bin/build.js",
"notebook_validate": "bin/validate_notebook.js"
},
"scripts": {
"build": "yarn clean && yarn turbo:command build:internal --filter=@langchain/scripts",
"build:internal": "tsc --project ./tsconfig.build.json && yarn move:artifacts && yarn build:generated",
"move:artifacts": "rimraf dist && mkdir -p dist && mv dist_build/* dist/",
"build:generated": "node bin/build.js --create-entrypoints --pre --tree-shaking",
"build:turbo": "yarn turbo:command build --filter=@langchain/scripts",
"lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/",
"lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts",
"lint": "yarn lint:eslint && yarn lint:dpdm",
"lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm",
"clean": "rm -rf ./dist ./dist_build .turbo",
"prepack": "yarn build",
"test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%",
"test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts",
"test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000",
"test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%",
"format": "prettier --write \"src\"",
"format:check": "prettier --check \"src\"",
"create:integration:doc": "node dist/cli/docs/index.js"
},
"author": "LangChain",
"license": "MIT",
"dependencies": {
"@octokit/rest": "^21.0.2",
"@rollup/wasm-node": "^4.19.0",
"axios": "^1.6.7",
"commander": "^11.1.0",
"glob": "^10.3.10",
"lodash": "^4.17.21",
"readline": "^1.3.0",
"rimraf": "^5.0.1",
"rollup": "^4.5.2",
"ts-morph": "^21.0.1",
"typescript": "^5.4.5"
},
"devDependencies": {
"@jest/globals": "^29.5.0",
"@swc/core": "^1.3.90",
"@swc/jest": "^0.2.29",
"@tsconfig/recommended": "^1.0.3",
"@types/lodash": "^4",
"@typescript-eslint/eslint-plugin": "^6.12.0",
"@typescript-eslint/parser": "^6.12.0",
"dotenv": "^16.3.1",
"dpdm": "^3.12.0",
"eslint": "^8.33.0",
"eslint-config-airbnb-base": "^15.0.0",
"eslint-config-prettier": "^8.6.0",
"eslint-plugin-import": "^2.27.5",
"eslint-plugin-no-instanceof": "^1.0.1",
"eslint-plugin-prettier": "^4.2.1",
"jest": "^29.5.0",
"jest-environment-node": "^29.6.4",
"prettier": "^2.8.3",
"release-it": "^17.6.0",
"ts-jest": "^29.1.0",
"tsx": "^4.16.2"
},
"publishConfig": {
"access": "public"
},
"exports": {
".": {
"types": {
"import": "./index.d.ts",
"require": "./index.d.cts",
"default": "./index.d.ts"
},
"import": "./index.js",
"require": "./index.cjs"
},
"./build": {
"types": {
"import": "./build.d.ts",
"require": "./build.d.cts",
"default": "./build.d.ts"
},
"import": "./build.js",
"require": "./build.cjs"
},
"./migrations": {
"types": {
"import": "./migrations.d.ts",
"require": "./migrations.d.cts",
"default": "./migrations.d.ts"
},
"import": "./migrations.js",
"require": "./migrations.cjs"
},
"./check_broken_links": {
"types": {
"import": "./check_broken_links.d.ts",
"require": "./check_broken_links.d.cts",
"default": "./check_broken_links.d.ts"
},
"import": "./check_broken_links.js",
"require": "./check_broken_links.cjs"
},
"./package.json": "./package.json"
},
"files": [
"dist/",
"index.cjs",
"index.js",
"index.d.ts",
"index.d.cts",
"build.cjs",
"build.js",
"build.d.ts",
"build.d.cts",
"migrations.cjs",
"migrations.js",
"migrations.d.ts",
"migrations.d.cts",
"check_broken_links.cjs",
"check_broken_links.js",
"check_broken_links.d.ts",
"check_broken_links.d.cts"
]
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-scripts/tsconfig.cjs.json | {
"extends": "./tsconfig.json",
"compilerOptions": {
"declaration": false
},
"exclude": [
"node_modules",
"dist",
"docs",
"**/tests",
"bin/"
]
} |
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-scripts/turbo.json | {
"extends": ["//"],
"pipeline": {
"build": {
"dependsOn": ["^build:internal"],
"outputs": ["**/dist/**"]
},
"build:internal": {
"outputs": ["**/dist/**"]
}
}
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-scripts/.prettierrc | {
"$schema": "https://json.schemastore.org/prettierrc",
"printWidth": 80,
"tabWidth": 2,
"useTabs": false,
"semi": true,
"singleQuote": false,
"quoteProps": "as-needed",
"jsxSingleQuote": false,
"trailingComma": "es5",
"bracketSpacing": true,
"arrowParens": "always",
"requirePragma": false,
"insertPragma": false,
"proseWrap": "preserve",
"htmlWhitespaceSensitivity": "css",
"vueIndentScriptAndStyle": false,
"endOfLine": "lf"
}
|
0 | lc_public_repos/langchainjs/libs/langchain-scripts | lc_public_repos/langchainjs/libs/langchain-scripts/src/check_broken_links.ts | import { glob } from "glob";
import fs from "node:fs/promises";
import axios from "axios";
const DEFAULT_WHITELIST = [
"openai.com",
"ibm.com",
"x.com",
"twitter.com",
"npmjs.com",
"microsoft.com",
];
type CheckBrokenLinksOptions = {
logErrors?: boolean;
timeout?: number;
whitelist?: string[];
retryFailed?: boolean;
};
const batchArray = <T>(array: T[], batchSize: number): T[][] => {
const batches = [];
for (let i = 0; i < array.length; i += batchSize) {
batches.push(array.slice(i, i + batchSize));
}
return batches;
};
const readFile = async (
pathName: string,
options?: { logErrors?: boolean }
): Promise<string | null> => {
try {
const fileContent = await fs.readFile(pathName, "utf-8");
return fileContent;
} catch (e) {
if (options?.logErrors) {
console.error(
{
error: e,
},
`Error reading file: ${pathName}`
);
}
return null;
}
};
export const extractLinks = (content: string): string[] => {
let links: string[] = [];
const regex = /\[[\s\S]*?\]\((https:\/\/.*?)\)/g;
const matches = content.match(regex);
if (matches) {
links = links.concat(
matches.map((match) => {
const [, link] = match.match(/\[[\s\S]*?\]\((https:\/\/.*?)\)/) || [];
return link;
})
);
}
return links;
};
export const checkUrl = async (
url: string,
options?: CheckBrokenLinksOptions
) => {
const timeout = options?.timeout || 3000;
if (
[
...DEFAULT_WHITELIST,
...(options?.whitelist ? options.whitelist : []),
].some((domain) => url.includes(domain))
) {
return true;
}
try {
const response = await axios.get(url, {
// Allow up to 5 redirects
maxRedirects: 5,
// Allow status codes in the 200 and 300 range
validateStatus: (status) => status >= 200 && status < 400,
// Set a timeout so the request doesn't hang
timeout,
});
if (response.status >= 200 && response.status < 400) {
return true;
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (e: any) {
if (options?.logErrors) {
if ("cause" in e) {
console.error(
{
error: e.cause,
},
`Error fetching url: ${url}`
);
} else {
console.error(
{
error: e,
},
`Error fetching url: ${url}`
);
}
}
return false;
}
return false;
};
const checkLinksInFile = async (
filePath: string,
options?: CheckBrokenLinksOptions
): Promise<{
linksChecked: number;
message?: string;
failedUrls?: string[];
}> => {
const content = await readFile(filePath, { logErrors: options?.logErrors });
if (!content) {
if (options?.logErrors) {
console.error(`Could not read file: ${filePath}`);
}
return { linksChecked: 0 };
}
const links = extractLinks(content);
const brokenLinks = (
await Promise.all(
links.map(async (link) => {
const isOk = await checkUrl(link, options);
if (!isOk) {
return link;
}
return null;
})
)
).filter((l): l is string => l !== null);
if (brokenLinks.length) {
return {
linksChecked: links.length,
message: `Found ${
brokenLinks.length
} broken links in ${filePath}:\nLinks:\n - ${brokenLinks.join("\n - ")}`,
failedUrls: brokenLinks,
};
}
return {
linksChecked: links.length,
};
};
export async function checkBrokenLinks(
mdxDirPath: string,
options?: CheckBrokenLinksOptions
) {
const startTime = Date.now();
const allMdxFiles = await glob(`${mdxDirPath}/**/*.mdx`);
const fileCount = allMdxFiles.length;
let linksChecked = 0;
const batchSize = 10;
const batches = batchArray(allMdxFiles, batchSize);
const failedUrls: string[] = [];
const results: string[] = [];
for await (const batch of batches) {
const batchLinksChecked = batch.map((filePath) =>
checkLinksInFile(filePath, options)
);
const batchResults = await Promise.all(batchLinksChecked);
const batchLinksCount = batchResults.reduce<number>((acc, result) => {
let accCopy = acc;
if (typeof result.linksChecked === "number") {
accCopy += result.linksChecked;
}
// Do not push the message if we are retrying failed links
// because we will push the message again after retrying
if (result.message && !options?.retryFailed) {
results.push(result.message);
}
if (result.failedUrls) {
failedUrls.push(...result.failedUrls);
}
return accCopy;
}, 0);
linksChecked += batchLinksCount;
}
if (options?.retryFailed && failedUrls.length) {
console.log(`Retrying ${failedUrls.length} failed urls...`);
const uniqueFailedUrls = [...new Set(failedUrls)];
const stillFailed: string[] = [];
for await (const url of uniqueFailedUrls) {
const isOk = await checkUrl(url, options);
if (!isOk) {
stillFailed.push(url);
}
}
if (stillFailed.length > 0) {
results.push(
`Found ${
stillFailed.length
} broken links after retrying:\nLinks:\n - ${stillFailed.join("\n - ")}`
);
}
}
const endTime = Date.now();
const totalTimeInSeconds = (endTime - startTime) / 1000;
console.log(
`Checked ${linksChecked} links inside ${fileCount} files. Took ${totalTimeInSeconds} seconds.`
);
if (results.length) {
const errorMsg = results.join("\n\n");
throw new Error(errorMsg);
}
console.log("No broken links found! πππ");
}
|
0 | lc_public_repos/langchainjs/libs/langchain-scripts | lc_public_repos/langchainjs/libs/langchain-scripts/src/types.ts | /** @ignore We don't need API refs for these */
export declare type PackageJSONDependencyTypes =
| "dependencies"
| "devDependencies"
| "peerDependencies"
| "optionalDependencies";
/** @ignore We don't need API refs for these */
export interface PackageJSONAddress {
email?: string;
url?: string;
}
/** @ignore We don't need API refs for these */
export interface PackageJSONPerson extends PackageJSONAddress {
name: string;
}
export interface ExportsMapValue {
types: {
import: string;
require: string;
default: string;
};
import: string;
require: string;
}
/** @ignore We don't need API refs for these */
export interface PackageJSON {
name: string;
version: string;
description?: string;
keywords?: string;
homepage?: string;
bugs?: PackageJSONAddress;
license?: string;
author?: string | PackageJSONPerson;
contributors?: string[] | PackageJSONPerson[];
files?: string[];
main?: string;
browser?: string;
bin?: Record<string, string>;
man?: string;
directories?: {
lib?: string;
bin?: string;
man?: string;
doc?: string;
example?: string;
test?: string;
};
repository?: {
type?: "git";
url?: string;
directory?: string;
};
scripts?: Record<string, string>;
config?: Record<string, string>;
dependencies?: Record<string, string>;
devDependencies?: Record<string, string>;
peerDependencies?: Record<string, string>;
peerDependenciesMeta?: Record<string, Record<string, boolean>>;
optionalDependencies?: Record<string, string>;
bundledDependencies?: string[];
engines?: Record<string, string>;
os?: string[];
cpu?: string[];
exports?:
| Record<string, ExportsMapValue | string>
| Record<"./package.json", "./package.json">;
}
export type TreeShakingArgs = {
/**
* @default [...Object.keys(packageJson.dependencies), ...Object.keys(packageJson.peerDependencies), /node:/, /@langchain\/core\//]
*/
extraInternals?: Array<string | RegExp>;
};
export interface ImportData {
imports: Record<string, string[]>;
exportedAliases: Record<string, string[]>;
}
export interface ExtraImportMapEntry {
modules: Array<string>;
alias: Array<string>;
path: string;
}
export interface LangChainConfig {
/**
* This lists all the entrypoints for the library. Each key corresponds to an
* importable path, eg. `import { AgentExecutor } from "langchain/agents"`.
* The value is the path to the file in `src/` that exports the entrypoint.
* This is used to generate the `exports` field in package.json.
* Order is not important.
*/
entrypoints: Record<string, string>;
/**
* Entrypoints in this list require an optional dependency to be installed.
* Therefore they are not tested in the generated test-exports-* packages.
*/
requiresOptionalDependency?: string[];
/**
* Entrypoints in this list will
* 1. Be excluded from the documentation
* 2. Be only available in Node.js environments (for backwards compatibility)
*/
deprecatedNodeOnly?: string[];
/**
* Endpoints that are deprecated due to redundancy. Will not appear in the import map.
*/
deprecatedOmitFromImportMap?: string[];
/**
* The suffix of the package. Eg. `community` for `@langchain/community`.
* Used in the generated import map.
*/
packageSuffix?: string;
/**
* Whether or not to write to the test exports files. At the moment this only
* applies to the `langchain` package.
*/
shouldTestExports?: boolean;
/**
* Extra entries to add to the import map.
*/
extraImportMapEntries?: Array<ExtraImportMapEntry>;
/**
* The absolute path to the tsconfig.json file.
*/
tsConfigPath: string;
/**
* Paths to add to .gitignore
* @default ["node_modules", "dist", ".yarn"]
* @type {string[]}
*/
gitignorePaths?: string[];
internals?: Array<string | RegExp>;
/**
* The source of the `.cjs` files to move.
*/
cjsSource: string;
/**
* The destination to move the `.cjs` files to.
*/
cjsDestination: string;
/**
* @param {string} relativePath
* @returns {string}
*/
abs: (relativePath: string) => string;
/**
* Additional paths to add to the gitignore file.
* @default undefined
* @type {string[]}
*/
additionalGitignorePaths?: string[];
}
|
0 | lc_public_repos/langchainjs/libs/langchain-scripts | lc_public_repos/langchainjs/libs/langchain-scripts/src/index.ts | export type { LangChainConfig } from "./types.js";
|
0 | lc_public_repos/langchainjs/libs/langchain-scripts | lc_public_repos/langchainjs/libs/langchain-scripts/src/filter_spam_comment.ts | import { Octokit } from "@octokit/rest";
async function spamContentFilter() {
if (process.env.SPAM_COMMENT_GITHUB_TOKEN === undefined) {
throw new Error("SPAM_COMMENT_GITHUB_TOKEN is not set");
}
if (process.env.COMMENT_JSON === undefined) {
throw new Error("COMMENT_JSON is not set");
}
if (process.env.COMMENT_ID === undefined) {
throw new Error("COMMENT_ID is not set");
}
if (process.env.REPO_OWNER === undefined) {
throw new Error("REPO_OWNER is not set");
}
if (process.env.REPO_NAME === undefined) {
throw new Error("REPO_NAME is not set");
}
const octokit = new Octokit({ auth: process.env.SPAM_COMMENT_GITHUB_TOKEN });
const comment: { body: string } = JSON.parse(process.env.COMMENT_JSON || "");
const commentId = parseInt(process.env.COMMENT_ID || "", 10);
const owner = process.env.REPO_OWNER || "";
const repo = process.env.REPO_NAME || "";
const SPAM_COMMENT_REGEX = [
/^download\s+(?:https?:\/\/)?[\w-]+(\.[\w-]+)+[^\s]+\s+password:\s*.+\s+in the installer menu, select\s*.+$/i,
];
if (
SPAM_COMMENT_REGEX.some((pattern) =>
pattern.test(comment.body.toLowerCase())
)
) {
try {
await octokit.rest.issues.deleteComment({
owner,
repo,
comment_id: commentId,
});
console.log(`Deleted spam comment with ID: ${commentId}`);
} catch (error) {
console.error("Error deleting comment:", error);
}
} else {
console.log("Comment is not spam");
}
}
void spamContentFilter();
|
0 | lc_public_repos/langchainjs/libs/langchain-scripts/src | lc_public_repos/langchainjs/libs/langchain-scripts/src/migrations/0_1.ts | import { ImportSpecifier, Project, SourceFile, SyntaxKind } from "ts-morph";
import { glob } from "glob";
import path from "node:path";
import { LangChainConfig } from "../types.js";
type ExportedSymbol = { symbol: string; kind: SyntaxKind };
type EntrypointAndSymbols = {
entrypoint: string;
exportedSymbols: Array<ExportedSymbol>;
};
const enum UpgradingModule {
COHERE = "cohere",
PINECONE = "pinecone",
}
/**
* @param {string} packagePath
* @param {Project} project
* @returns {Array<EntrypointAndSymbols> }
*/
async function getEntrypointsFromFile(
packagePath: string,
project: Project
): Promise<Array<EntrypointAndSymbols>> {
const { config }: { config: LangChainConfig } = await import(
path.join(packagePath, "langchain.config.js")
);
const { entrypoints, deprecatedNodeOnly } = config;
const result = Object.entries(entrypoints).flatMap(([key, value]) => {
if (deprecatedNodeOnly?.includes(key)) {
return [];
}
const newFile = project.addSourceFileAtPath(
path.join(packagePath, "src", `${value}.ts`)
);
const exportedSymbolsMap = newFile.getExportedDeclarations();
const exportedSymbols = Array.from(exportedSymbolsMap.entries())
.filter(([_, declarations]) => declarations.length > 0)
.map(([symbol, declarations]) => ({
kind: declarations[0].getKind(),
symbol,
}));
return {
entrypoint: key,
exportedSymbols,
};
});
return result;
}
type FoundSymbol = {
entrypoint: string;
foundSymbol: string;
packageSuffix: string;
};
/**
* Finds a matching symbol in the array of exported symbols.
* @param {{ symbol: string, kind: SyntaxKind }} target - The target symbol and its kind to find.
* @param {Array<EntrypointAndSymbols>} exportedSymbols - The array of exported symbols to search.
* @param {string} packageSuffix - The suffix of the package to import from. Eg, core
* @returns {{ entrypoint: string, foundSymbol: string } | undefined} The matching symbol or undefined if not found.
*/
function findMatchingSymbol(
target: { symbol: string; kind: SyntaxKind },
exportedSymbols: Array<EntrypointAndSymbols>,
packageSuffix: string
): FoundSymbol | undefined {
for (const entry of exportedSymbols) {
const foundSymbol = entry.exportedSymbols.find(
({ symbol, kind }) => symbol === target.symbol && kind === target.kind
);
if (foundSymbol) {
return {
entrypoint: entry.entrypoint,
foundSymbol: foundSymbol.symbol,
packageSuffix,
}; // Return the matching entry object
}
}
return undefined;
}
/**
* @param {Array<EntrypointAndSymbols>} entrypoints
* @returns {Array<EntrypointAndSymbols>}
*/
function removeLoad(
entrypoints: Array<EntrypointAndSymbols>
): Array<EntrypointAndSymbols> {
return entrypoints.flatMap((entrypoint) => {
const newEntrypoint =
entrypoint.entrypoint === "index" ? "" : `/${entrypoint.entrypoint}`;
const withoutLoadOrIndex = entrypoint.exportedSymbols.filter((item) => {
if (item.symbol === "load" && newEntrypoint === "load") {
return false;
}
return true;
});
return {
entrypoint: newEntrypoint,
exportedSymbols: withoutLoadOrIndex,
};
});
}
function updateImport({
matchingSymbols,
namedImport,
projectFile,
namedImportText,
}: {
matchingSymbols: Array<FoundSymbol | undefined>;
namedImport: ImportSpecifier;
projectFile: SourceFile;
namedImportText: string;
}): boolean {
const firstMatchingSymbol = matchingSymbols.find(
(matchingSymbol) => matchingSymbol
);
if (firstMatchingSymbol) {
console.debug(
`Found matching symbol in the "@langchain/${firstMatchingSymbol.packageSuffix}" package.`,
{
matchingSymbol: firstMatchingSymbol,
}
);
namedImport.remove();
projectFile.addImportDeclaration({
moduleSpecifier: `@langchain/${firstMatchingSymbol.packageSuffix}${firstMatchingSymbol.entrypoint}`,
namedImports: [namedImportText],
});
return true;
}
return false;
}
/**
* Find imports from deprecated pre 0.1 LangChain modules and update them to import
* from the new LangChain packages.
*/
export async function updateEntrypointsFrom0_0_xTo0_1_x({
localLangChainPath,
codePath,
customGlobPattern,
customIgnorePattern,
skipCheck,
}: {
/**
* The absolute path to the locally cloned LangChain repo root.
* @example "/Users/username/code/langchainjs"
*/
localLangChainPath: string;
/**
* The absolute path to the source directory of the codebase to update.
* @example "/Users/username/code/my-project/src"
*/
codePath: string;
/**
* Optionally, pass in a custom glob pattern to match files.
* The backslash included in the example and default is only for
* JSDoc to escape the asterisk. Do not include unless intentionally.
* @example "/*.d.ts"
* @default "**\/*.ts"
*/
customGlobPattern?: string;
/**
* A custom ignore pattern for ignoring files.
* The backslash included in the example and default is only for
* JSDoc to escape the asterisk. Do not include unless intentionally.
* @example ["**\/node_modules/**", "**\/dist/**", "**\/*.d.ts"]
* @default node_modules/**
*/
customIgnorePattern?: string[] | string;
/**
* Optionally skip checking the passed modules for imports to
* update.
* @example [UpgradingModule.COHERE]
* @default undefined
*/
skipCheck?: Array<UpgradingModule>;
}) {
const project = new Project();
const langchainCorePackageEntrypoints = removeLoad(
await getEntrypointsFromFile(
path.join(localLangChainPath, "langchain-core"),
project
)
);
const langchainCommunityPackageEntrypoints = removeLoad(
await getEntrypointsFromFile(
path.join(localLangChainPath, "libs", "langchain-community"),
project
)
);
const langchainOpenAIPackageEntrypoints = removeLoad(
await getEntrypointsFromFile(
path.join(localLangChainPath, "libs", "langchain-openai"),
project
)
);
const langchainCoherePackageEntrypoints = !skipCheck?.includes(
UpgradingModule.COHERE
)
? removeLoad(
await getEntrypointsFromFile(
path.join(localLangChainPath, "libs", "langchain-cohere"),
project
)
)
: null;
const langchainPineconePackageEntrypoints = !skipCheck?.includes(
UpgradingModule.PINECONE
)
? removeLoad(
await getEntrypointsFromFile(
path.join(localLangChainPath, "libs", "langchain-pinecone"),
project
)
)
: null;
const globPattern = customGlobPattern || "/**/*.ts";
const ignorePattern = customIgnorePattern;
const allCodebaseFiles = (
await glob(path.join(codePath, globPattern), {
ignore: ignorePattern,
})
)
.map((filePath) => path.resolve(filePath))
.filter((filePath) => !filePath.includes("node_modules/"));
for await (const filePath of allCodebaseFiles) {
let projectFile: SourceFile;
try {
projectFile = project.addSourceFileAtPath(filePath);
if (!projectFile) {
throw new Error(`Failed to add source file at path: ${filePath}`);
}
} catch (error) {
console.error(
{
filePath,
error,
},
"Error occurred while trying to add source file. Continuing"
);
return;
}
try {
const imports = projectFile.getImportDeclarations();
imports.forEach((importItem) => {
// Get all imports
const module = importItem.getModuleSpecifierValue();
// Get only the named imports. Eg: import { foo } from "langchain/util";
const namedImports = importItem.getNamedImports();
if (!module.startsWith("langchain/")) {
return;
}
// look at each import and see if it exists in
let didUpdate = false;
namedImports.forEach((namedImport) => {
const namedImportText = namedImport.getText();
let namedImportKind: SyntaxKind | null = null;
const symbol = namedImport.getSymbol();
if (symbol) {
// Resolve alias symbol to its original symbol
const aliasedSymbol = symbol.getAliasedSymbol() || symbol;
// Get the original declarations of the symbol
const declarations = aliasedSymbol.getDeclarations();
if (declarations.length > 0) {
// Assuming the first declaration is the original one
const originalDeclarationKind = declarations[0].getKind();
namedImportKind = originalDeclarationKind;
}
}
// If we couldn't find the kind of the named imports kind, skip it
if (!namedImportKind) {
return;
}
const matchingSymbolCore = findMatchingSymbol(
{ symbol: namedImportText, kind: namedImportKind },
langchainCorePackageEntrypoints,
"core"
);
const matchingSymbolCommunity = findMatchingSymbol(
{ symbol: namedImportText, kind: namedImportKind },
langchainCommunityPackageEntrypoints,
"community"
);
const matchingSymbolOpenAI = findMatchingSymbol(
{ symbol: namedImportText, kind: namedImportKind },
langchainOpenAIPackageEntrypoints,
"openai"
);
const matchingSymbolCohere = langchainCoherePackageEntrypoints
? findMatchingSymbol(
{ symbol: namedImportText, kind: namedImportKind },
langchainCoherePackageEntrypoints,
"cohere"
)
: undefined;
const matchingSymbolPinecone = langchainPineconePackageEntrypoints
? findMatchingSymbol(
{ symbol: namedImportText, kind: namedImportKind },
langchainPineconePackageEntrypoints,
"pinecone"
)
: undefined;
didUpdate = updateImport({
matchingSymbols: [
matchingSymbolCore,
matchingSymbolOpenAI,
matchingSymbolCohere,
matchingSymbolPinecone,
matchingSymbolCommunity,
],
namedImport,
projectFile,
namedImportText,
});
});
if (didUpdate) {
projectFile.saveSync();
// Check if all named imports were removed, and only a file import remains.
// eg: import { foo } from "langchain/anthropic"; -> import "langchain/anthropic";
// if so, remove the import entirely
const importClause = importItem.getImportClause();
if (
!importClause ||
(!importClause.getDefaultImport() &&
importClause.getNamedImports().length === 0)
) {
importItem.remove();
projectFile.saveSync();
}
}
});
} catch (error) {
console.error(
{
filePath,
error,
},
"Error occurred while trying to read file. Continuing"
);
}
// Remove source file from the project after we're done with it
// to prevent OOM errors.
if (projectFile) {
project.removeSourceFile(projectFile);
}
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-scripts/src | lc_public_repos/langchainjs/libs/langchain-scripts/src/migrations/0_2.ts | import { glob } from "glob";
import { ImportSpecifier, Project } from "ts-morph";
import path from "node:path";
import {
type DeprecatedEntrypoint,
importMap as importMapArr,
} from "../_data/importMap.js";
const DEPRECATED_AND_DELETED_IMPORTS = [
"PromptLayerOpenAI",
"loadPrompt",
"ChatGPTPluginRetriever",
];
type MigrationUpdate = {
/**
* The path of the file which was updated.
*/
path: string;
/**
* The original import statement.
*/
oldImport: string;
/**
* The updated import statement.
*/
updatedImport: string;
};
export interface UpdateLangChainFields {
/**
* The path to the project to check.
* TypeScript files will be extracted
* via this glob pattern: `${projectPath}/{star}{star}/{star}.{ts,tsx,js,jsx}`.
* @optional - Not required if `files`, or 'tsConfigPath' is provided.
*/
projectPath?: string;
/**
* A list of .ts file paths to check.
* @optional - Not required if `projectPath`, or 'tsConfigPath' is provided.
*/
files?: string[];
/**
* Path to the tsConfig file. This will be used to load
* all the project files into the script.
* @optional - Not required if `projectPath`, or 'files' is provided.
*/
tsConfigPath?: string;
/**
* Whether or not to log a message when an import is updated.
* @default false
*/
shouldLog?: boolean;
/**
* Whether or not the invocation is a test run.
* If `testRun` is set to true, the script will NOT save changes.
* @default false
*/
testRun?: boolean;
}
/**
* Find the entrypoint in the import map that matches the
* old entrypoint and named imports.
*/
function findNewEntrypoint(
importMap: Array<DeprecatedEntrypoint>,
entrypointToReplace: string,
namedImports: ImportSpecifier[]
): {
newEntrypoint: string;
namedImports: string[] | null;
} | null {
// First, see if we can find an exact match
const exactEntrypoints = importMap.filter((item) => {
if (item.old === entrypointToReplace) {
return true;
}
if (item.old.endsWith("/*")) {
const oldWithoutStar = item.old.replace("/*", "");
if (entrypointToReplace.startsWith(oldWithoutStar)) {
return true;
}
}
return false;
});
if (exactEntrypoints.length) {
const withNamedImport = exactEntrypoints.find((item) => {
if (item.namedImport === null) {
return true;
}
return (
namedImports.find(
(namedImport) => namedImport.getName() === item.namedImport
) !== undefined
);
});
if (withNamedImport) {
return {
newEntrypoint: withNamedImport.new,
namedImports: null,
};
}
}
// if we can not find an exact match, see if we can find a named import match
const namedImportMatch = importMap.filter((item) => {
if (item.namedImport === null) {
return false;
}
return (
namedImports.find(
(namedImport) => namedImport.getName() === item.namedImport
) !== undefined
);
});
if (namedImportMatch.length) {
return {
newEntrypoint: namedImportMatch[0].new,
namedImports: namedImportMatch
.map((item) => item.namedImport)
.filter((i): i is string => i !== null),
};
}
return null;
}
/**
* Migrates a project's LangChain imports from version 0.0.x or 0.1.x to 0.2.x.
* This function updates the import statements in the specified project files
* based on the provided import map.
*
* @param {UpdateLangChainFields} fields - The configuration object for the migration.
* @param {string} [fields.projectPath] - The path to the project to check. TypeScript files will be extracted
* via this glob pattern: `${projectPath}/{star-star}/{star}.{ts,tsx,js,jsx}`. Not required if `files` or `tsConfigPath` is provided.
* @param {string[]} [fields.files] - A list of .ts file paths to check. Not required if `projectPath` or `tsConfigPath` is provided.
* @param {string} [fields.tsConfigPath] - Path to the tsConfig file. This will be used to load
* all the project files into the script. Not required if `projectPath` or `files` is provided.
* @param {boolean} [fields.shouldLog=false] - Whether or not to log a message when an import is updated.
* @returns {Promise<Array<MigrationUpdate> | null>} - A promise that resolves to an array of migration updates if successful, or null if an error occurs.
* @throws {Error} - If more than one of `projectPath`, `tsConfigPath`, or `files` is provided, or if none of them are provided.
*
* @example
* ```typescript
* import { updateEntrypointsFrom0_x_xTo0_2_x } from "@langchain/scripts/migrations";
*
* const pathToMyProject = "...";
*
* updateEntrypointsFrom0_x_xTo0_2_x({
* projectPath: pathToMyProject,
* shouldLog: true,
* });
* ```
*/
export async function updateEntrypointsFrom0_x_xTo0_2_x(
fields: UpdateLangChainFields
): Promise<Array<MigrationUpdate> | null> {
if (
fields.projectPath &&
fields.files &&
fields.files.length > 0 &&
fields.tsConfigPath
) {
throw new Error(
"Only one of `projectPath`, `tsConfigPath`, or `files` can be provided."
);
}
if (
!fields.projectPath &&
(!fields.files || fields.files.length === 0) &&
!fields.tsConfigPath
) {
throw new Error(
"One of `projectPath`, `tsConfigPath`, or `files` must be provided."
);
}
let projectFiles: string[] | null = null;
if (fields.projectPath) {
projectFiles = glob.sync(
path.join(fields.projectPath, "/**/*.{ts,tsx,js,jsx}")
);
} else if (fields.files) {
projectFiles = fields.files;
}
// Instantiate tsMorph project
const project = new Project({
tsConfigFilePath: fields.tsConfigPath,
});
if (projectFiles) {
project.addSourceFilesAtPaths(projectFiles);
}
const updates: Array<MigrationUpdate> = [];
// Iterate over every file and check imports
project.getSourceFiles().forEach((sourceFile) => {
try {
const allImports = sourceFile.getImportDeclarations();
const filePath = sourceFile.getFilePath();
allImports.forEach((importDeclaration) => {
const namedImports = importDeclaration.getNamedImports();
if (namedImports.length === 0) {
// no-op
return;
}
if (
namedImports.length === 1 &&
DEPRECATED_AND_DELETED_IMPORTS.find(
(dep) => dep === namedImports[0].getName()
) !== undefined
) {
// deprecated import, do not update
return;
}
const importPath = importDeclaration.getModuleSpecifierValue();
const importPathText = importDeclaration.getModuleSpecifier().getText();
const importPathTextWithoutQuotes = importPathText.slice(
1,
importPathText.length - 1
);
if (!importPathTextWithoutQuotes.startsWith("langchain/")) {
if (
importPathTextWithoutQuotes !==
"@langchain/community/retrievers/self_query/qdrant"
) {
return;
}
}
const matchingEntrypoint = findNewEntrypoint(
importMapArr,
importPathTextWithoutQuotes,
namedImports
);
if (matchingEntrypoint === null) {
// no-op
return;
}
if (matchingEntrypoint.namedImports?.length) {
const importsBefore = namedImports;
const importsRemoved: Array<string> = [];
namedImports.forEach((namedImport) => {
const namedImportText = namedImport.getName();
if (
matchingEntrypoint.namedImports?.find(
(s) => s === namedImportText
)
) {
importsRemoved.push(namedImportText);
namedImport.remove();
}
});
if (importsBefore.length === importsRemoved.length) {
// all named imports were removed, delete the old import
importDeclaration.remove();
}
// Create a new import with the proper named imports
sourceFile.addImportDeclaration({
moduleSpecifier: matchingEntrypoint.newEntrypoint,
namedImports: importsRemoved,
});
} else {
importDeclaration.setModuleSpecifier(
matchingEntrypoint.newEntrypoint
);
}
// Update import
if (fields.shouldLog) {
console.log(
`Updated import: ${importPath} to ${matchingEntrypoint.newEntrypoint} inside ${filePath}`
);
}
updates.push({
path: filePath,
oldImport: importPathTextWithoutQuotes,
updatedImport: matchingEntrypoint.newEntrypoint,
});
});
} catch (e) {
console.error(
{
path: sourceFile.getFilePath(),
error: e,
},
"Error updating imports."
);
}
});
// save changes
try {
if (!fields.testRun) {
await project.save();
}
return updates;
} catch (e) {
console.error(
{
error: e,
},
"Error saving changes."
);
return null;
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-scripts/src | lc_public_repos/langchainjs/libs/langchain-scripts/src/migrations/index.ts | export * from "./0_1.js";
export * from "./0_2.js";
|
0 | lc_public_repos/langchainjs/libs/langchain-scripts/src | lc_public_repos/langchainjs/libs/langchain-scripts/src/_data/importMap.ts | export type DeprecatedEntrypoint = {
old: string;
new: string;
namedImport: string | null;
};
export const importMap: Array<DeprecatedEntrypoint> = [
{
old: "langchain/vectorstores/base",
new: "@langchain/core/vectorstores",
namedImport: "MaxMarginalRelevanceSearchOptions",
},
{
old: "langchain/vectorstores/base",
new: "@langchain/core/vectorstores",
namedImport: "VectorStoreRetrieverMMRSearchKwargs",
},
{
old: "langchain/vectorstores/base",
new: "@langchain/core/vectorstores",
namedImport: "VectorStoreRetrieverInput",
},
{
old: "langchain/vectorstores/base",
new: "@langchain/core/vectorstores",
namedImport: "VectorStoreRetrieverInterface",
},
{
old: "langchain/vectorstores/base",
new: "@langchain/core/vectorstores",
namedImport: "VectorStoreRetriever",
},
{
old: "langchain/vectorstores/base",
new: "@langchain/core/vectorstores",
namedImport: "VectorStoreInterface",
},
{
old: "langchain/vectorstores/base",
new: "@langchain/core/vectorstores",
namedImport: "VectorStore",
},
{
old: "langchain/vectorstores/base",
new: "@langchain/core/vectorstores",
namedImport: "SaveableVectorStore",
},
{
old: "langchain/tools/*",
new: "@langchain/core/tools",
namedImport: "ToolParams",
},
{
old: "langchain/tools/base",
new: "@langchain/core/tools",
namedImport: "ToolInputParsingException",
},
{
old: "langchain/tools/*",
new: "@langchain/core/tools",
namedImport: "StructuredTool",
},
{
old: "langchain/tools",
new: "@langchain/core/tools",
namedImport: "Tool",
},
{
old: "langchain/tools/dynamic",
new: "@langchain/core/tools",
namedImport: "BaseDynamicToolInput",
},
{
old: "langchain/tools/*",
new: "@langchain/core/tools",
namedImport: "DynamicToolInput",
},
{
old: "langchain/tools/*",
new: "@langchain/core/tools",
namedImport: "DynamicStructuredToolInput",
},
{
old: "langchain/tools/*",
new: "@langchain/core/tools",
namedImport: "DynamicTool",
},
{
old: "langchain/tools/*",
new: "@langchain/core/tools",
namedImport: "DynamicStructuredTool",
},
{
old: "langchain/schema/storage",
new: "@langchain/core/stores",
namedImport: "BaseStoreInterface",
},
{
old: "langchain/schema/storage",
new: "@langchain/core/stores",
namedImport: "BaseStore",
},
{
old: "langchain/storage/in_memory",
new: "@langchain/core/stores",
namedImport: "InMemoryStore",
},
{
old: "langchain/schema/*",
new: "@langchain/core/prompt_values",
namedImport: "BasePromptValue",
},
{
old: "langchain/prompts/*",
new: "@langchain/core/prompt_values",
namedImport: "StringPromptValue",
},
{
old: "langchain/prompts/chat",
new: "@langchain/core/prompt_values",
namedImport: "ChatPromptValueFields",
},
{
old: "langchain/prompts/chat",
new: "@langchain/core/prompt_values",
namedImport: "ChatPromptValue",
},
{
old: "langchain/schema",
new: "@langchain/core/outputs",
namedImport: "LLMResult",
},
{
old: "langchain/schema/*",
new: "@langchain/core/outputs",
namedImport: "RUN_KEY",
},
{
old: "langchain/schema/*",
new: "@langchain/core/outputs",
namedImport: "Generation",
},
{
old: "langchain/schema/*",
new: "@langchain/core/outputs",
namedImport: "GenerationChunkFields",
},
{
old: "langchain/schema/*",
new: "@langchain/core/outputs",
namedImport: "GenerationChunk",
},
{
old: "langchain/schema/*",
new: "@langchain/core/outputs",
namedImport: "LLMResult",
},
{
old: "langchain/schema/*",
new: "@langchain/core/outputs",
namedImport: "ChatGeneration",
},
{
old: "langchain/schema/*",
new: "@langchain/core/outputs",
namedImport: "ChatGenerationChunk",
},
{
old: "langchain/schema/*",
new: "@langchain/core/outputs",
namedImport: "ChatResult",
},
{
old: "langchain/memory/base",
new: "@langchain/core/memory",
namedImport: "getPromptInputKey",
},
{
old: "langchain/memory/*",
new: "@langchain/core/memory",
namedImport: "OutputValues",
},
{
old: "langchain/memory/*",
new: "@langchain/core/memory",
namedImport: "MemoryVariables",
},
{
old: "langchain/memory/*",
new: "@langchain/core/memory",
namedImport: "BaseMemory",
},
{
old: "langchain/memory/*",
new: "@langchain/core/memory",
namedImport: "getInputValue",
},
{
old: "langchain/memory/*",
new: "@langchain/core/memory",
namedImport: "getOutputValue",
},
{
old: "langchain/embeddings/base",
new: "@langchain/core/embeddings",
namedImport: "EmbeddingsParams",
},
{
old: "langchain/embeddings/base",
new: "@langchain/core/embeddings",
namedImport: "EmbeddingsInterface",
},
{
old: "langchain/schema/*",
new: "@langchain/core/chat_history",
namedImport: "BaseChatMessageHistory",
},
{
old: "langchain/schema/*",
new: "@langchain/core/chat_history",
namedImport: "BaseListChatMessageHistory",
},
{
old: "langchain/cache/base",
new: "@langchain/core/caches",
namedImport: "deserializeStoredGeneration",
},
{
old: "langchain/cache/base",
new: "@langchain/core/caches",
namedImport: "serializeGeneration",
},
{
old: "langchain/cache/base",
new: "@langchain/core/caches",
namedImport: "getCacheKey",
},
{
old: "langchain/schema/*",
new: "@langchain/core/caches",
namedImport: "BaseCache",
},
{
old: "langchain/cache/*",
new: "@langchain/core/caches",
namedImport: "InMemoryCache",
},
{
old: "langchain/schema/*",
new: "@langchain/core/agents",
namedImport: "AgentAction",
},
{
old: "langchain/schema/*",
new: "@langchain/core/agents",
namedImport: "AgentFinish",
},
{
old: "langchain/schema/*",
new: "@langchain/core/agents",
namedImport: "AgentStep",
},
{
old: "langchain/util/tiktoken",
new: "@langchain/core/utils/tiktoken",
namedImport: "getEncoding",
},
{
old: "langchain/util/tiktoken",
new: "@langchain/core/utils/tiktoken",
namedImport: "encodingForModel",
},
{
old: "langchain/util/stream",
new: "@langchain/core/utils/stream",
namedImport: "atee",
},
{
old: "langchain/util/stream",
new: "@langchain/core/utils/stream",
namedImport: "concat",
},
{
old: "langchain/util/stream",
new: "@langchain/core/utils/stream",
namedImport: "pipeGeneratorWithSetup",
},
{
old: "langchain/util/stream",
new: "@langchain/core/utils/stream",
namedImport: "IterableReadableStreamInterface",
},
{
old: "langchain/util/stream",
new: "@langchain/core/utils/stream",
namedImport: "IterableReadableStream",
},
{
old: "langchain/util/stream",
new: "@langchain/core/utils/stream",
namedImport: "AsyncGeneratorWithSetup",
},
{
old: "langchain/util/math",
new: "@langchain/core/utils/math",
namedImport: "matrixFunc",
},
{
old: "langchain/util/math",
new: "@langchain/core/utils/math",
namedImport: "normalize",
},
{
old: "langchain/util/math",
new: "@langchain/core/utils/math",
namedImport: "cosineSimilarity",
},
{
old: "langchain/util/math",
new: "@langchain/core/utils/math",
namedImport: "innerProduct",
},
{
old: "langchain/util/math",
new: "@langchain/core/utils/math",
namedImport: "euclideanDistance",
},
{
old: "langchain/util/math",
new: "@langchain/core/utils/math",
namedImport: "maximalMarginalRelevance",
},
{
old: "langchain/util/event-source-parse",
new: "@langchain/core/utils/event_source_parse",
namedImport: "getBytes",
},
{
old: "langchain/util/event-source-parse",
new: "@langchain/core/utils/event_source_parse",
namedImport: "getLines",
},
{
old: "langchain/util/event-source-parse",
new: "@langchain/core/utils/event_source_parse",
namedImport: "getMessages",
},
{
old: "langchain/util/event-source-parse",
new: "@langchain/core/utils/event_source_parse",
namedImport: "convertEventStreamToIterableReadableDataStream",
},
{
old: "langchain/util/event-source-parse",
new: "@langchain/core/utils/event_source_parse",
namedImport: "EventStreamContentType",
},
{
old: "langchain/util/event-source-parse",
new: "@langchain/core/utils/event_source_parse",
namedImport: "EventSourceMessage",
},
{
old: "langchain/util/env",
new: "@langchain/core/utils/env",
namedImport: "getRuntimeEnvironment",
},
{
old: "langchain/util/env",
new: "@langchain/core/utils/env",
namedImport: "getEnvironmentVariable",
},
{
old: "langchain/util/env",
new: "@langchain/core/utils/env",
namedImport: "isBrowser",
},
{
old: "langchain/util/env",
new: "@langchain/core/utils/env",
namedImport: "isWebWorker",
},
{
old: "langchain/util/env",
new: "@langchain/core/utils/env",
namedImport: "isJsDom",
},
{
old: "langchain/util/env",
new: "@langchain/core/utils/env",
namedImport: "isDeno",
},
{
old: "langchain/util/env",
new: "@langchain/core/utils/env",
namedImport: "isNode",
},
{
old: "langchain/util/env",
new: "@langchain/core/utils/env",
namedImport: "getEnv",
},
{
old: "langchain/util/env",
new: "@langchain/core/utils/env",
namedImport: "RuntimeEnvironment",
},
{
old: "langchain/util/async_caller",
new: "@langchain/core/utils/async_caller",
namedImport: "FailedAttemptHandler",
},
{
old: "langchain/util/async_caller",
new: "@langchain/core/utils/async_caller",
namedImport: "AsyncCallerParams",
},
{
old: "langchain/util/async_caller",
new: "@langchain/core/utils/async_caller",
namedImport: "AsyncCallerCallOptions",
},
{
old: "langchain/util/async_caller",
new: "@langchain/core/utils/async_caller",
namedImport: "AsyncCaller",
},
{
old: "langchain/util/types",
new: "@langchain/core/utils/types",
namedImport: "StringWithAutocomplete",
},
{
old: "langchain/schema/*",
new: "@langchain/core/utils/types",
namedImport: "InputValues",
},
{
old: "langchain/schema/*",
new: "@langchain/core/utils/types",
namedImport: "PartialValues",
},
{
old: "langchain/schema/*",
new: "@langchain/core/utils/types",
namedImport: "ChainValues",
},
{
old: "langchain/schema/tests/lib",
new: "@langchain/core/utils/testing",
namedImport: "FakeSplitIntoListParser",
},
{
old: "langchain/schema/tests/lib",
new: "@langchain/core/utils/testing",
namedImport: "FakeRunnable",
},
{
old: "langchain/schema/tests/lib",
new: "@langchain/core/utils/testing",
namedImport: "FakeLLM",
},
{
old: "langchain/schema/tests/lib",
new: "@langchain/core/utils/testing",
namedImport: "FakeStreamingLLM",
},
{
old: "langchain/smith/tests/runner_utils.int.test",
new: "@langchain/core/utils/testing",
namedImport: "FakeChatModel",
},
{
old: "langchain/schema/tests/lib",
new: "@langchain/core/utils/testing",
namedImport: "FakeStreamingChatModel",
},
{
old: "langchain/schema/tests/lib",
new: "@langchain/core/utils/testing",
namedImport: "FakeRetriever",
},
{
old: "langchain/schema/tests/lib",
new: "@langchain/core/utils/testing",
namedImport: "FakeChatInput",
},
{
old: "langchain/schema/tests/lib",
new: "@langchain/core/utils/testing",
namedImport: "FakeListChatModel",
},
{
old: "langchain/schema/tests/lib",
new: "@langchain/core/utils/testing",
namedImport: "FakeChatMessageHistory",
},
{
old: "langchain/schema/tests/lib",
new: "@langchain/core/utils/testing",
namedImport: "FakeListChatMessageHistory",
},
{
old: "langchain/schema/tests/lib",
new: "@langchain/core/utils/testing",
namedImport: "FakeTracer",
},
{
old: "langchain/schema/tests/lib",
new: "@langchain/core/utils/testing",
namedImport: "FakeToolParams",
},
{
old: "langchain/schema/tests/lib",
new: "@langchain/core/utils/testing",
namedImport: "FakeTool",
},
{
old: "langchain/schema/tests/lib",
new: "@langchain/core/utils/testing",
namedImport: "FakeEmbeddings",
},
{
old: "langchain/schema/tests/lib",
new: "@langchain/core/utils/testing",
namedImport: "SyntheticEmbeddings",
},
{
old: "langchain/schema/tests/lib",
new: "@langchain/core/utils/testing",
namedImport: "SingleRunExtractor",
},
{
old: "langchain/util/axios-fetch-adapter.d",
new: "@langchain/core/utils/fast-json-patch",
namedImport: "default",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/utils/fast-json-patch",
namedImport: "Operation",
},
{
old: "langchain/types/type-utils",
new: "@langchain/core/types/type-utils",
namedImport: "Optional",
},
{
old: "langchain/callbacks/*",
new: "@langchain/core/tracers/tracer_langchain",
namedImport: "Run",
},
{
old: "langchain/callbacks/handlers/tracer_langchain",
new: "@langchain/core/tracers/tracer_langchain",
namedImport: "RunCreate2",
},
{
old: "langchain/callbacks/handlers/tracer_langchain",
new: "@langchain/core/tracers/tracer_langchain",
namedImport: "RunUpdate",
},
{
old: "langchain/callbacks/handlers/tracer_langchain",
new: "@langchain/core/tracers/tracer_langchain",
namedImport: "LangChainTracerFields",
},
{
old: "langchain/callbacks/*",
new: "@langchain/core/tracers/tracer_langchain",
namedImport: "LangChainTracer",
},
{
old: "langchain/callbacks/*",
new: "@langchain/core/tracers/run_collector",
namedImport: "RunCollectorCallbackHandler",
},
{
old: "langchain/callbacks/handlers/log_stream",
new: "@langchain/core/tracers/log_stream",
namedImport: "LogEntry",
},
{
old: "langchain/callbacks/handlers/log_stream",
new: "@langchain/core/tracers/log_stream",
namedImport: "RunState",
},
{
old: "langchain/callbacks/handlers/log_stream",
new: "@langchain/core/tracers/log_stream",
namedImport: "RunLogPatch",
},
{
old: "langchain/callbacks/handlers/log_stream",
new: "@langchain/core/tracers/log_stream",
namedImport: "RunLog",
},
{
old: "langchain/callbacks/handlers/log_stream",
new: "@langchain/core/tracers/log_stream",
namedImport: "StreamEventData",
},
{
old: "langchain/callbacks/handlers/log_stream",
new: "@langchain/core/tracers/log_stream",
namedImport: "StreamEvent",
},
{
old: "langchain/callbacks/handlers/log_stream",
new: "@langchain/core/tracers/log_stream",
namedImport: "SchemaFormat",
},
{
old: "langchain/callbacks/handlers/log_stream",
new: "@langchain/core/tracers/log_stream",
namedImport: "LogStreamCallbackHandlerInput",
},
{
old: "langchain/callbacks/handlers/log_stream",
new: "@langchain/core/tracers/log_stream",
namedImport: "LogStreamCallbackHandler",
},
{
old: "langchain/callbacks/handlers/initialize",
new: "@langchain/core/tracers/initialize",
namedImport: "getTracingCallbackHandler",
},
{
old: "langchain/callbacks/*",
new: "@langchain/core/tracers/initialize",
namedImport: "getTracingV2CallbackHandler",
},
{
old: "langchain/callbacks/*",
new: "@langchain/core/tracers/console",
namedImport: "ConsoleCallbackHandler",
},
{
old: "langchain/callbacks/*",
new: "@langchain/core/tracers/base",
namedImport: "RunType",
},
{
old: "langchain/callbacks/*",
new: "@langchain/core/tracers/base",
namedImport: "Run",
},
{
old: "langchain/callbacks/handlers/tracer",
new: "@langchain/core/tracers/base",
namedImport: "AgentRun",
},
{
old: "langchain/callbacks/*",
new: "@langchain/core/tracers/base",
namedImport: "BaseTracer",
},
{
old: "langchain/retrievers/self_query/base",
new: "@langchain/core/structured_query",
namedImport: "TranslatorOpts",
},
{
old: "@langchain/community/retrievers/self_query/qdrant",
new: "@langchain/community/structured_query/qdrant",
namedImport: null,
},
{
old: "langchain/retrievers/self_query/chroma",
new: "@langchain/community/structured_query/chroma",
namedImport: null,
},
{
old: "langchain/retrievers/self_query/pinecone",
new: "@langchain/pinecone",
namedImport: null,
},
{
old: "langchain/retrievers/self_query/supabase",
new: "@langchain/community/structured_query/supabase",
namedImport: null,
},
{
old: "langchain/retrievers/self_query/supabase_utils",
new: "@langchain/community/structured_query/supabase_utils",
namedImport: null,
},
{
old: "langchain/retrievers/self_query/*",
new: "@langchain/core/structured_query",
namedImport: "BaseTranslator",
},
{
old: "langchain/retrievers/self_query/*",
new: "@langchain/core/structured_query",
namedImport: "BasicTranslator",
},
{
old: "langchain/retrievers/self_query/functional",
new: "@langchain/core/structured_query",
namedImport: null,
},
{
old: "langchain/retrievers/self_query/*",
new: "@langchain/core/structured_query",
namedImport: "FunctionalTranslator",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "AND",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "OR",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "NOT",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "Operator",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "EQ",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "NE",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "LT",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "GT",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "LTE",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "GTE",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "Comparator",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "Operators",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "Comparators",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "VisitorResult",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "VisitorOperationResult",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "VisitorComparisonResult",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "VisitorStructuredQueryResult",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "Visitor",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "Expression",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "FilterDirective",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "Comparison",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "StructuredQuery",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "isObject",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "isFilterEmpty",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "isInt",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "isFloat",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "isString",
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: "castValue",
},
{
old: "langchain/runnables/remote",
new: "@langchain/core/runnables/remote",
namedImport: "RemoteRunnable",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "RunnableFunc",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "RunnableLike",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "RunnableRetryFailedAttemptHandler",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "Runnable",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "RunnableBindingArgs",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "RunnableBinding",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "RunnableEach",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "RunnableRetry",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "RunnableSequence",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "RunnableMap",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "RunnableParallel",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "RunnableLambda",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "RunnableWithFallbacks",
},
{
old: "langchain/schema/runnable/passthrough",
new: "@langchain/core/runnables",
namedImport: "RunnableAssign",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "RunnablePick",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "_coerceToRunnable",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "RunnableBatchOptions",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "RunnableInterface",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "RunnableIOSchema",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "RunnableConfig",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "getCallbackManagerForConfig",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "patchConfig",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "ensureConfig",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "mergeConfigs",
},
{
old: "langchain/schema/runnable/passthrough",
new: "@langchain/core/runnables",
namedImport: "RunnablePassthrough",
},
{
old: "langchain/schema/runnable/router",
new: "@langchain/core/runnables",
namedImport: "RouterInput",
},
{
old: "langchain/schema/runnable/router",
new: "@langchain/core/runnables",
namedImport: "RouterRunnable",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "RunnableBranch",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "Branch",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "BranchLike",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "RunnableWithMessageHistoryInputs",
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: "RunnableWithMessageHistory",
},
{
old: "langchain/schema/retriever",
new: "@langchain/core/retrievers",
namedImport: "BaseRetrieverInput",
},
{
old: "langchain/schema/retriever",
new: "@langchain/core/retrievers",
namedImport: "BaseRetrieverInterface",
},
{
old: "langchain/schema/retriever",
new: "@langchain/core/retrievers",
namedImport: "BaseRetriever",
},
{
old: "langchain/retrievers/document_compressors/*",
new: "@langchain/core/retrievers/document_compressors/base",
namedImport: "BaseDocumentCompressor",
},
{
old: "langchain/prompts/base",
new: "@langchain/core/prompts",
namedImport: "TypedPromptInputValues",
},
{
old: "langchain/schema/*",
new: "@langchain/core/prompts",
namedImport: "Example",
},
{
old: "langchain/prompts/*",
new: "@langchain/core/prompts",
namedImport: "BasePromptTemplateInput",
},
{
old: "langchain/prompts/*",
new: "@langchain/core/prompts",
namedImport: "BasePromptTemplate",
},
{
old: "langchain/prompts/chat",
new: "@langchain/core/prompts",
namedImport: "BaseMessagePromptTemplate",
},
{
old: "langchain/prompts/*",
new: "@langchain/core/prompts",
namedImport: "MessagesPlaceholder",
},
{
old: "langchain/prompts/chat",
new: "@langchain/core/prompts",
namedImport: "MessageStringPromptTemplateFields",
},
{
old: "langchain/prompts/chat",
new: "@langchain/core/prompts",
namedImport: "BaseMessageStringPromptTemplate",
},
{
old: "langchain/prompts/*",
new: "@langchain/core/prompts",
namedImport: "BaseChatPromptTemplate",
},
{
old: "langchain/prompts/chat",
new: "@langchain/core/prompts",
namedImport: "ChatMessagePromptTemplateFields",
},
{
old: "langchain/prompts/*",
new: "@langchain/core/prompts",
namedImport: "ChatMessagePromptTemplate",
},
{
old: "langchain/prompts/*",
new: "@langchain/core/prompts",
namedImport: "HumanMessagePromptTemplate",
},
{
old: "langchain/prompts/*",
new: "@langchain/core/prompts",
namedImport: "AIMessagePromptTemplate",
},
{
old: "langchain/prompts/*",
new: "@langchain/core/prompts",
namedImport: "SystemMessagePromptTemplate",
},
{
old: "langchain/prompts/chat",
new: "@langchain/core/prompts",
namedImport: "ChatPromptTemplateInput",
},
{
old: "langchain/prompts/chat",
new: "@langchain/core/prompts",
namedImport: "BaseMessagePromptTemplateLike",
},
{
old: "langchain/prompts/*",
new: "@langchain/core/prompts",
namedImport: "ChatPromptTemplate",
},
{
old: "langchain/prompts/*",
new: "@langchain/core/prompts",
namedImport: "FewShotPromptTemplateInput",
},
{
old: "langchain/prompts/*",
new: "@langchain/core/prompts",
namedImport: "FewShotPromptTemplate",
},
{
old: "langchain/prompts/*",
new: "@langchain/core/prompts",
namedImport: "FewShotChatMessagePromptTemplateInput",
},
{
old: "langchain/prompts/*",
new: "@langchain/core/prompts",
namedImport: "FewShotChatMessagePromptTemplate",
},
{
old: "langchain/prompts/pipeline",
new: "@langchain/core/prompts",
namedImport: "PipelinePromptParams",
},
{
old: "langchain/prompts/pipeline",
new: "@langchain/core/prompts",
namedImport: "PipelinePromptTemplateInput",
},
{
old: "langchain/prompts/pipeline",
new: "@langchain/core/prompts",
namedImport: "PipelinePromptTemplate",
},
{
old: "langchain/prompts/prompt",
new: "@langchain/core/prompts",
namedImport: "PromptTemplateInput",
},
{
old: "langchain/prompts/prompt",
new: "@langchain/core/prompts",
namedImport: "ParamsFromFString",
},
{
old: "langchain/prompts/prompt",
new: "@langchain/core/prompts",
namedImport: "PromptTemplate",
},
{
old: "langchain/prompts/serde",
new: "@langchain/core/prompts",
namedImport: "SerializedPromptTemplate",
},
{
old: "langchain/prompts/serde",
new: "@langchain/core/prompts",
namedImport: "SerializedFewShotTemplate",
},
{
old: "langchain/prompts/serde",
new: "@langchain/core/prompts",
namedImport: "SerializedBasePromptTemplate",
},
{
old: "langchain/prompts/*",
new: "@langchain/core/prompts",
namedImport: "BaseStringPromptTemplate",
},
{
old: "langchain/prompts/template",
new: "@langchain/core/prompts",
namedImport: "TemplateFormat",
},
{
old: "langchain/prompts/template",
new: "@langchain/core/prompts",
namedImport: "parseFString",
},
{
old: "langchain/prompts/template",
new: "@langchain/core/prompts",
namedImport: "interpolateFString",
},
{
old: "langchain/prompts/template",
new: "@langchain/core/prompts",
namedImport: "DEFAULT_FORMATTER_MAPPING",
},
{
old: "langchain/prompts/template",
new: "@langchain/core/prompts",
namedImport: "DEFAULT_PARSER_MAPPING",
},
{
old: "langchain/prompts/template",
new: "@langchain/core/prompts",
namedImport: "renderTemplate",
},
{
old: "langchain/prompts/template",
new: "@langchain/core/prompts",
namedImport: "parseTemplate",
},
{
old: "langchain/prompts/template",
new: "@langchain/core/prompts",
namedImport: "checkValidTemplate",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "FormatInstructionsOptions",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "BaseLLMOutputParser",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "BaseOutputParser",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "OutputParserException",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "BytesOutputParser",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "ListOutputParser",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "CommaSeparatedListOutputParser",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "CustomListOutputParser",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "NumberedListOutputParser",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "MarkdownListOutputParser",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "StringOutputParser",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "JsonMarkdownStructuredOutputParserInput",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "JsonMarkdownFormatInstructionsOptions",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "StructuredOutputParser",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "JsonMarkdownStructuredOutputParser",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "AsymmetricStructuredOutputParserFields",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "AsymmetricStructuredOutputParser",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "BaseTransformOutputParser",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "BaseCumulativeTransformOutputParserInput",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "BaseCumulativeTransformOutputParser",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "JsonOutputParser",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "parsePartialJson",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "parseJsonMarkdown",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "parseXMLMarkdown",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "XML_FORMAT_INSTRUCTIONS",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "XMLOutputParserFields",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "Content",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "XMLResult",
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: "XMLOutputParser",
},
{
old: "langchain/output_parsers/openai_tools",
new: "@langchain/core/output_parsers/openai_tools",
namedImport: "ParsedToolCall",
},
{
old: "langchain/output_parsers/openai_tools",
new: "@langchain/core/output_parsers/openai_tools",
namedImport: "JsonOutputToolsParserParams",
},
{
old: "langchain/output_parsers/openai_tools",
new: "@langchain/core/output_parsers/openai_tools",
namedImport: "JsonOutputToolsParser",
},
{
old: "langchain/output_parsers/openai_tools",
new: "@langchain/core/output_parsers/openai_tools",
namedImport: "JsonOutputKeyToolsParserParams",
},
{
old: "langchain/output_parsers/openai_tools",
new: "@langchain/core/output_parsers/openai_tools",
namedImport: "JsonOutputKeyToolsParser",
},
{
old: "langchain/output_parsers/openai_functions",
new: "@langchain/core/output_parsers/openai_functions",
namedImport: "FunctionParameters",
},
{
old: "langchain/output_parsers/openai_functions",
new: "@langchain/core/output_parsers/openai_functions",
namedImport: "OutputFunctionsParser",
},
{
old: "langchain/output_parsers/openai_functions",
new: "@langchain/core/output_parsers/openai_functions",
namedImport: "JsonOutputFunctionsParser",
},
{
old: "langchain/output_parsers/openai_functions",
new: "@langchain/core/output_parsers/openai_functions",
namedImport: "JsonKeyOutputFunctionsParser",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "ToolMessageFieldsWithToolCallId",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "ToolMessage",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "ToolMessageChunk",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "AIMessage",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "AIMessageChunk",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "isBaseMessage",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "isBaseMessageChunk",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "StoredMessageData",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "StoredMessage",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "StoredGeneration",
},
{
old: "langchain/types/assemblyai-types",
new: "@langchain/core/messages",
namedImport: "MessageType",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "MessageContent",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "BaseMessageFields",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "BaseMessage",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "OpenAIToolCall",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "BaseMessageChunk",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "BaseMessageLike",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "ChatMessageFieldsWithRole",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "ChatMessage",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "ChatMessageChunk",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "FunctionMessageFieldsWithName",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "FunctionMessage",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "FunctionMessageChunk",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "HumanMessage",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "HumanMessageChunk",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "SystemMessage",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "SystemMessageChunk",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "coerceMessageLikeToMessage",
},
{
old: "langchain/memory/*",
new: "@langchain/core/messages",
namedImport: "getBufferString",
},
{
old: "langchain/schema/*",
new: "@langchain/core/messages",
namedImport: "mapStoredMessageToChatMessage",
},
{
old: "langchain/stores/message/utils",
new: "@langchain/core/messages",
namedImport: "mapStoredMessagesToChatMessages",
},
{
old: "langchain/stores/message/utils",
new: "@langchain/core/messages",
namedImport: "mapChatMessagesToStoredMessages",
},
{
old: "langchain/load/serializable",
new: "@langchain/core/load/serializable",
namedImport: "get_lc_unique_name",
},
{
old: "langchain/load/serializable",
new: "@langchain/core/load/serializable",
namedImport: "BaseSerialized",
},
{
old: "langchain/load/serializable",
new: "@langchain/core/load/serializable",
namedImport: "SerializedConstructor",
},
{
old: "langchain/load/serializable",
new: "@langchain/core/load/serializable",
namedImport: "SerializedSecret",
},
{
old: "langchain/load/serializable",
new: "@langchain/core/load/serializable",
namedImport: "SerializedNotImplemented",
},
{
old: "langchain/load/serializable",
new: "@langchain/core/load/serializable",
namedImport: "Serialized",
},
{
old: "langchain/load/serializable",
new: "@langchain/core/load/serializable",
namedImport: "SerializableInterface",
},
{
old: "langchain/load/serializable",
new: "@langchain/core/load/serializable",
namedImport: "Serializable",
},
{
old: "langchain/load/map_keys",
new: "@langchain/core/load/map_keys",
namedImport: "SerializedFields",
},
{
old: "langchain/load/*",
new: "@langchain/core/load",
namedImport: "load",
},
{
old: "langchain/load/import_map",
new: "@langchain/core/load/import_map",
namedImport: "agents",
},
{
old: "langchain/load/import_map",
new: "@langchain/core/load/import_map",
namedImport: "output_parsers",
},
{
old: "langchain/llms/base",
new: "@langchain/core/language_models/llms",
namedImport: "SerializedLLM",
},
{
old: "langchain/llms/base",
new: "@langchain/core/language_models/llms",
namedImport: "BaseLLMParams",
},
{
old: "langchain/llms/base",
new: "@langchain/core/language_models/llms",
namedImport: "BaseLLMCallOptions",
},
{
old: "langchain/llms/base",
new: "@langchain/core/language_models/llms",
namedImport: "BaseLLM",
},
{
old: "langchain/llms/base",
new: "@langchain/core/language_models/llms",
namedImport: "LLM",
},
{
old: "langchain/chat_models/base",
new: "@langchain/core/language_models/chat_models",
namedImport: "createChatMessageChunkEncoderStream",
},
{
old: "langchain/chat_models/base",
new: "@langchain/core/language_models/chat_models",
namedImport: "SerializedChatModel",
},
{
old: "langchain/llms/base",
new: "@langchain/core/language_models/chat_models",
namedImport: "SerializedLLM",
},
{
old: "langchain/chat_models/base",
new: "@langchain/core/language_models/chat_models",
namedImport: "BaseChatModelParams",
},
{
old: "langchain/chat_models/base",
new: "@langchain/core/language_models/chat_models",
namedImport: "BaseChatModelCallOptions",
},
{
old: "langchain/chat_models/base",
new: "@langchain/core/language_models/chat_models",
namedImport: "BaseChatModel",
},
{
old: "langchain/chat_models/base",
new: "@langchain/core/language_models/chat_models",
namedImport: "SimpleChatModel",
},
{
old: "langchain/base_language/count_tokens",
new: "@langchain/core/language_models/base",
namedImport: "getEmbeddingContextSize",
},
{
old: "langchain/base_language/*",
new: "@langchain/core/language_models/base",
namedImport: "getModelContextSize",
},
{
old: "langchain/base_language/*",
new: "@langchain/core/language_models/base",
namedImport: "calculateMaxTokens",
},
{
old: "langchain/llms/base",
new: "@langchain/core/language_models/base",
namedImport: "SerializedLLM",
},
{
old: "langchain/base_language/*",
new: "@langchain/core/language_models/base",
namedImport: "BaseLangChainParams",
},
{
old: "langchain/base_language/*",
new: "@langchain/core/language_models/base",
namedImport: "BaseLangChain",
},
{
old: "langchain/base_language/*",
new: "@langchain/core/language_models/base",
namedImport: "BaseLanguageModelParams",
},
{
old: "langchain/base_language/*",
new: "@langchain/core/language_models/base",
namedImport: "BaseLanguageModelCallOptions",
},
{
old: "langchain/base_language/*",
new: "@langchain/core/language_models/base",
namedImport: "BaseFunctionCallOptions",
},
{
old: "langchain/base_language/*",
new: "@langchain/core/language_models/base",
namedImport: "BaseLanguageModelInput",
},
{
old: "langchain/base_language/*",
new: "@langchain/core/language_models/base",
namedImport: "BaseLanguageModelInterface",
},
{
old: "langchain/base_language/*",
new: "@langchain/core/language_models/base",
namedImport: "BaseLanguageModel",
},
{
old: "langchain/prompts/*",
new: "@langchain/core/example_selectors",
namedImport: "BaseExampleSelector",
},
{
old: "langchain/prompts/*",
new: "@langchain/core/example_selectors",
namedImport: "isLLM",
},
{
old: "langchain/prompts/*",
new: "@langchain/core/example_selectors",
namedImport: "isChatModel",
},
{
old: "langchain/prompts/selectors/conditional",
new: "@langchain/core/example_selectors",
namedImport: "BaseGetPromptAsyncOptions",
},
{
old: "langchain/prompts/*",
new: "@langchain/core/example_selectors",
namedImport: "BasePromptSelector",
},
{
old: "langchain/prompts/*",
new: "@langchain/core/example_selectors",
namedImport: "ConditionalPromptSelector",
},
{
old: "langchain/prompts/*",
new: "@langchain/core/example_selectors",
namedImport: "LengthBasedExampleSelectorInput",
},
{
old: "langchain/prompts/*",
new: "@langchain/core/example_selectors",
namedImport: "LengthBasedExampleSelector",
},
{
old: "langchain/prompts/*",
new: "@langchain/core/example_selectors",
namedImport: "SemanticSimilarityExampleSelectorInput",
},
{
old: "langchain/prompts/*",
new: "@langchain/core/example_selectors",
namedImport: "SemanticSimilarityExampleSelector",
},
{
old: "langchain/document",
new: "@langchain/core/documents",
namedImport: "DocumentInput",
},
{
old: "langchain/document",
new: "@langchain/core/documents",
namedImport: "Document",
},
{
old: "langchain/schema/document",
new: "@langchain/core/documents",
namedImport: "BaseDocumentTransformer",
},
{
old: "langchain/schema/document",
new: "@langchain/core/documents",
namedImport: "MappingDocumentTransformer",
},
{
old: "langchain/callbacks/promises",
new: "@langchain/core/callbacks/promises",
namedImport: "consumeCallback",
},
{
old: "langchain/callbacks/promises",
new: "@langchain/core/callbacks/promises",
namedImport: "awaitAllCallbacks",
},
{
old: "langchain/callbacks/manager",
new: "@langchain/core/callbacks/manager",
namedImport: "parseCallbackConfigArg",
},
{
old: "langchain/callbacks/manager",
new: "@langchain/core/callbacks/manager",
namedImport: "ensureHandler",
},
{
old: "langchain/callbacks/manager",
new: "@langchain/core/callbacks/manager",
namedImport: "traceAsGroup",
},
{
old: "langchain/callbacks/manager",
new: "@langchain/core/callbacks/manager",
namedImport: "CallbackManagerOptions",
},
{
old: "langchain/callbacks/manager",
new: "@langchain/core/callbacks/manager",
namedImport: "Callbacks",
},
{
old: "langchain/callbacks/manager",
new: "@langchain/core/callbacks/manager",
namedImport: "BaseCallbackConfig",
},
{
old: "langchain/callbacks/manager",
new: "@langchain/core/callbacks/manager",
namedImport: "BaseCallbackManager",
},
{
old: "langchain/callbacks/manager",
new: "@langchain/core/callbacks/manager",
namedImport: "CallbackManagerForRetrieverRun",
},
{
old: "langchain/callbacks/manager",
new: "@langchain/core/callbacks/manager",
namedImport: "CallbackManagerForLLMRun",
},
{
old: "langchain/callbacks/manager",
new: "@langchain/core/callbacks/manager",
namedImport: "CallbackManagerForChainRun",
},
{
old: "langchain/callbacks/manager",
new: "@langchain/core/callbacks/manager",
namedImport: "CallbackManagerForToolRun",
},
{
old: "langchain/callbacks/manager",
new: "@langchain/core/callbacks/manager",
namedImport: "CallbackManager",
},
{
old: "langchain/callbacks/manager",
new: "@langchain/core/callbacks/manager",
namedImport: "TraceGroup",
},
{
old: "langchain/callbacks/*",
new: "@langchain/core/callbacks/base",
namedImport: "BaseCallbackHandlerInput",
},
{
old: "langchain/callbacks/*",
new: "@langchain/core/callbacks/base",
namedImport: "NewTokenIndices",
},
{
old: "langchain/callbacks/base",
new: "@langchain/core/callbacks/base",
namedImport: "HandleLLMNewTokenCallbackFields",
},
{
old: "langchain/callbacks/*",
new: "@langchain/core/callbacks/base",
namedImport: "CallbackHandlerMethods",
},
{
old: "langchain/callbacks/*",
new: "@langchain/core/callbacks/base",
namedImport: "BaseCallbackHandler",
},
{
old: "langchain/chat_models/yandex",
new: "@langchain/yandex",
namedImport: "ChatYandexGPT",
},
{
old: "langchain/llms/yandex",
new: "@langchain/yandex",
namedImport: "YandexGPTInputs",
},
{
old: "langchain/llms/yandex",
new: "@langchain/yandex",
namedImport: "YandexGPT",
},
{
old: "langchain/vectorstores/weaviate",
new: "@langchain/weaviate",
namedImport: "flattenObjectForWeaviate",
},
{
old: "langchain/vectorstores/weaviate",
new: "@langchain/weaviate",
namedImport: "WeaviateLibArgs",
},
{
old: "langchain/vectorstores/weaviate",
new: "@langchain/weaviate",
namedImport: "WeaviateFilter",
},
{
old: "langchain/vectorstores/weaviate",
new: "@langchain/weaviate",
namedImport: "WeaviateStore",
},
{
old: "langchain/stores/message/redis",
new: "@langchain/redis",
namedImport: "RedisChatMessageHistoryInput",
},
{
old: "langchain/stores/message/redis",
new: "@langchain/redis",
namedImport: "RedisChatMessageHistory",
},
{
old: "langchain/vectorstores/redis",
new: "@langchain/redis",
namedImport: "CreateSchemaVectorField",
},
{
old: "langchain/vectorstores/redis",
new: "@langchain/redis",
namedImport: "CreateSchemaFlatVectorField",
},
{
old: "langchain/vectorstores/redis",
new: "@langchain/redis",
namedImport: "CreateSchemaHNSWVectorField",
},
{
old: "langchain/vectorstores/redis",
new: "@langchain/redis",
namedImport: "RedisSearchLanguages",
},
{
old: "langchain/vectorstores/redis",
new: "@langchain/redis",
namedImport: "RedisVectorStoreIndexOptions",
},
{
old: "langchain/vectorstores/redis",
new: "@langchain/redis",
namedImport: "RedisVectorStoreConfig",
},
{
old: "langchain/vectorstores/redis",
new: "@langchain/redis",
namedImport: "RedisAddOptions",
},
{
old: "langchain/vectorstores/redis",
new: "@langchain/redis",
namedImport: "RedisVectorStoreFilterType",
},
{
old: "langchain/vectorstores/redis",
new: "@langchain/redis",
namedImport: "RedisVectorStore",
},
{
old: "langchain/vectorstores/pinecone",
new: "@langchain/pinecone",
namedImport: "PineconeDeleteParams",
},
{
old: "langchain/vectorstores/pinecone",
new: "@langchain/pinecone",
namedImport: "PineconeStore",
},
{
old: "langchain/llms/openai",
new: "@langchain/openai",
namedImport: "AzureOpenAIInput",
},
{
old: "langchain/llms/openai",
new: "@langchain/openai",
namedImport: "OpenAICallOptions",
},
{
old: "langchain/chat_models/openai",
new: "@langchain/openai",
namedImport: "OpenAIChatInput",
},
{
old: "langchain/chat_models/openai",
new: "@langchain/openai",
namedImport: "ChatOpenAICallOptions",
},
{
old: "langchain/chat_models/openai",
new: "@langchain/openai",
namedImport: "ChatOpenAI",
},
{
old: "langchain/llms/openai",
new: "@langchain/openai",
namedImport: "OpenAIInput",
},
{
old: "langchain/llms/openai-chat",
new: "@langchain/openai",
namedImport: "OpenAIChatCallOptions",
},
{
old: "langchain/llms/openai",
new: "@langchain/openai",
namedImport: "OpenAIChat",
},
{
old: "langchain/llms/openai",
new: "@langchain/openai",
namedImport: "OpenAI",
},
{
old: "langchain/embeddings/openai",
new: "@langchain/openai",
namedImport: "OpenAIEmbeddingsParams",
},
{
old: "langchain/embeddings/openai",
new: "@langchain/openai",
namedImport: "OpenAIEmbeddings",
},
{
old: "langchain/tools/convert_to_openai",
new: "@langchain/openai",
namedImport: "formatToOpenAIAssistantTool",
},
{
old: "langchain/tools/*",
new: "@langchain/openai",
namedImport: "formatToOpenAIFunction",
},
{
old: "langchain/tools/*",
new: "@langchain/openai",
namedImport: "formatToOpenAITool",
},
{
old: "langchain/util/azure",
new: "@langchain/openai",
namedImport: "getEndpoint",
},
{
old: "langchain/util/azure",
new: "@langchain/openai",
namedImport: "OpenAIEndpointConfig",
},
{
old: "langchain/stores/message/mongodb",
new: "@langchain/mongodb",
namedImport: "MongoDBChatMessageHistoryInput",
},
{
old: "langchain/stores/message/mongodb",
new: "@langchain/mongodb",
namedImport: "MongoDBChatMessageHistory",
},
{
old: "langchain/vectorstores/mongodb_atlas",
new: "@langchain/mongodb",
namedImport: "MongoDBAtlasVectorSearchLibArgs",
},
{
old: "langchain/vectorstores/mongodb_atlas",
new: "@langchain/mongodb",
namedImport: "MongoDBAtlasVectorSearch",
},
{
old: "langchain/types/assemblyai-types",
new: "@langchain/mongodb/node_modules/mongodb",
namedImport: "Timestamp",
},
{
old: "langchain/vectorstores/singlestore",
new: "@langchain/mongodb/node_modules/mongodb",
namedImport: "ConnectionOptions",
},
{
old: "langchain/vectorstores/cassandra",
new: "@langchain/mongodb/node_modules/mongodb",
namedImport: "Filter",
},
{
old: "langchain/util/convex",
new: "@langchain/mongodb/node_modules/mongodb/client-side-encryption/providers/utils",
namedImport: "get",
},
{
old: "langchain/chat_models/googlepalm",
new: "@langchain/google/genai",
namedImport: "BaseMessageExamplePair",
},
{
old: "langchain/types/googlevertexai-types",
new: "@langchain/google/common",
namedImport: "GoogleAbstractedClientOpsMethod",
},
{
old: "langchain/types/googlevertexai-types",
new: "@langchain/google/common",
namedImport: "GoogleAbstractedClientOpsResponseType",
},
{
old: "langchain/types/googlevertexai-types",
new: "@langchain/google/common",
namedImport: "GoogleAbstractedClientOps",
},
{
old: "langchain/types/googlevertexai-types",
new: "@langchain/google/common",
namedImport: "GoogleAbstractedClient",
},
{
old: "langchain/util/googlevertexai-connection",
new: "@langchain/google/common",
namedImport: "GoogleConnection",
},
{
old: "langchain/types/googlevertexai-types",
new: "@langchain/google/common",
namedImport: "GoogleConnectionParams",
},
{
old: "langchain/types/googlevertexai-types",
new: "@langchain/google/common",
namedImport: "GoogleResponse",
},
{
old: "langchain/util/googlevertexai-connection",
new: "@langchain/google/common",
namedImport: "complexValue",
},
{
old: "langchain/util/googlevertexai-connection",
new: "@langchain/google/common",
namedImport: "simpleValue",
},
{
old: "langchain/types/googlevertexai-types",
new: "@langchain/google/common/utils",
namedImport: "GoogleVertexAIBasePrediction",
},
{
old: "langchain/types/googlevertexai-types",
new: "@langchain/google/common/utils",
namedImport: "GoogleVertexAILLMPredictions",
},
{
old: "langchain/vectorstores/zep",
new: "@langchain/community/vectorstores/zep",
namedImport: "IZepArgs",
},
{
old: "langchain/vectorstores/zep",
new: "@langchain/community/vectorstores/zep",
namedImport: "IZepConfig",
},
{
old: "langchain/vectorstores/zep",
new: "@langchain/community/vectorstores/zep",
namedImport: "IZepDeleteParams",
},
{
old: "langchain/vectorstores/zep",
new: "@langchain/community/vectorstores/zep",
namedImport: "ZepVectorStore",
},
{
old: "langchain/vectorstores/xata",
new: "@langchain/community/vectorstores/xata",
namedImport: "XataClientArgs",
},
{
old: "langchain/vectorstores/xata",
new: "@langchain/community/vectorstores/xata",
namedImport: "XataVectorSearch",
},
{
old: "langchain/vectorstores/voy",
new: "@langchain/community/vectorstores/voy",
namedImport: "VoyClient",
},
{
old: "langchain/vectorstores/voy",
new: "@langchain/community/vectorstores/voy",
namedImport: "VoyVectorStore",
},
{
old: "langchain/vectorstores/vercel_postgres",
new: "@langchain/community/vectorstores/vercel_postgres",
namedImport: "VercelPostgresFields",
},
{
old: "langchain/vectorstores/vercel_postgres",
new: "@langchain/community/vectorstores/vercel_postgres",
namedImport: "VercelPostgres",
},
{
old: "langchain/vectorstores/vectara",
new: "@langchain/community/vectorstores/vectara",
namedImport: "VectaraLibArgs",
},
{
old: "langchain/vectorstores/vectara",
new: "@langchain/community/vectorstores/vectara",
namedImport: "VectaraFile",
},
{
old: "langchain/vectorstores/vectara",
new: "@langchain/community/vectorstores/vectara",
namedImport: "VectaraContextConfig",
},
{
old: "langchain/vectorstores/vectara",
new: "@langchain/community/vectorstores/vectara",
namedImport: "MMRConfig",
},
{
old: "langchain/vectorstores/vectara",
new: "@langchain/community/vectorstores/vectara",
namedImport: "VectaraSummary",
},
{
old: "langchain/vectorstores/vectara",
new: "@langchain/community/vectorstores/vectara",
namedImport: "VectaraFilter",
},
{
old: "langchain/vectorstores/vectara",
new: "@langchain/community/vectorstores/vectara",
namedImport: "DEFAULT_FILTER",
},
{
old: "langchain/vectorstores/vectara",
new: "@langchain/community/vectorstores/vectara",
namedImport: "VectaraRetrieverInput",
},
{
old: "langchain/vectorstores/vectara",
new: "@langchain/community/vectorstores/vectara",
namedImport: "VectaraStore",
},
{
old: "langchain/vectorstores/usearch",
new: "@langchain/community/vectorstores/usearch",
namedImport: "USearchArgs",
},
{
old: "langchain/vectorstores/usearch",
new: "@langchain/community/vectorstores/usearch",
namedImport: "USearch",
},
{
old: "langchain/vectorstores/typesense",
new: "@langchain/community/vectorstores/typesense",
namedImport: "TypesenseConfig",
},
{
old: "langchain/vectorstores/typesense",
new: "@langchain/community/vectorstores/typesense",
namedImport: "Typesense",
},
{
old: "langchain/vectorstores/typeorm",
new: "@langchain/community/vectorstores/typeorm",
namedImport: "TypeORMVectorStoreArgs",
},
{
old: "langchain/vectorstores/typeorm",
new: "@langchain/community/vectorstores/typeorm",
namedImport: "TypeORMVectorStoreDocument",
},
{
old: "langchain/vectorstores/typeorm",
new: "@langchain/community/vectorstores/typeorm",
namedImport: "TypeORMVectorStore",
},
{
old: "langchain/vectorstores/tigris",
new: "@langchain/community/vectorstores/tigris",
namedImport: "TigrisLibArgs",
},
{
old: "langchain/vectorstores/tigris",
new: "@langchain/community/vectorstores/tigris",
namedImport: "TigrisVectorStore",
},
{
old: "langchain/vectorstores/supabase",
new: "@langchain/community/vectorstores/supabase",
namedImport: "SupabaseMetadata",
},
{
old: "langchain/vectorstores/supabase",
new: "@langchain/community/vectorstores/supabase",
namedImport: "SupabaseFilter",
},
{
old: "langchain/vectorstores/supabase",
new: "@langchain/community/vectorstores/supabase",
namedImport: "SupabaseFilterRPCCall",
},
{
old: "langchain/vectorstores/supabase",
new: "@langchain/community/vectorstores/supabase",
namedImport: "SupabaseLibArgs",
},
{
old: "langchain/vectorstores/supabase",
new: "@langchain/community/vectorstores/supabase",
namedImport: "SupabaseVectorStore",
},
{
old: "langchain/vectorstores/singlestore",
new: "@langchain/community/vectorstores/singlestore",
namedImport: "DistanceMetrics",
},
{
old: "langchain/vectorstores/singlestore",
new: "@langchain/community/vectorstores/singlestore",
namedImport: "SingleStoreVectorStoreConfig",
},
{
old: "langchain/vectorstores/singlestore",
new: "@langchain/community/vectorstores/singlestore",
namedImport: "SingleStoreVectorStore",
},
{
old: "langchain/vectorstores/rockset",
new: "@langchain/community/vectorstores/rockset",
namedImport: "RocksetStoreError",
},
{
old: "langchain/vectorstores/rockset",
new: "@langchain/community/vectorstores/rockset",
namedImport: "RocksetStoreDestroyedError",
},
{
old: "langchain/vectorstores/rockset",
new: "@langchain/community/vectorstores/rockset",
namedImport: "SimilarityMetric",
},
{
old: "langchain/vectorstores/rockset",
new: "@langchain/community/vectorstores/rockset",
namedImport: "RocksetLibArgs",
},
{
old: "langchain/vectorstores/rockset",
new: "@langchain/community/vectorstores/rockset",
namedImport: "RocksetStore",
},
{
old: "langchain/vectorstores/qdrant",
new: "@langchain/community/vectorstores/qdrant",
namedImport: "QdrantLibArgs",
},
{
old: "langchain/vectorstores/qdrant",
new: "@langchain/community/vectorstores/qdrant",
namedImport: "QdrantAddDocumentOptions",
},
{
old: "langchain/vectorstores/qdrant",
new: "@langchain/community/vectorstores/qdrant",
namedImport: "QdrantVectorStore",
},
{
old: "langchain/vectorstores/prisma",
new: "@langchain/community/vectorstores/prisma",
namedImport: "PrismaSqlFilter",
},
{
old: "langchain/vectorstores/prisma",
new: "@langchain/community/vectorstores/prisma",
namedImport: "PrismaVectorStore",
},
{
old: "langchain/vectorstores/pinecone",
new: "@langchain/community/vectorstores/pinecone",
namedImport: "PineconeLibArgs",
},
{
old: "langchain/vectorstores/pgvector",
new: "@langchain/community/vectorstores/pgvector",
namedImport: "DistanceStrategy",
},
{
old: "langchain/vectorstores/pgvector",
new: "@langchain/community/vectorstores/pgvector",
namedImport: "PGVectorStoreArgs",
},
{
old: "langchain/vectorstores/pgvector",
new: "@langchain/community/vectorstores/pgvector",
namedImport: "PGVectorStore",
},
{
old: "langchain/vectorstores/opensearch",
new: "@langchain/community/vectorstores/opensearch",
namedImport: "OpenSearchClientArgs",
},
{
old: "langchain/vectorstores/opensearch",
new: "@langchain/community/vectorstores/opensearch",
namedImport: "OpenSearchVectorStore",
},
{
old: "langchain/vectorstores/neo4j_vector",
new: "@langchain/community/vectorstores/neo4j_vector",
namedImport: "SearchType",
},
{
old: "langchain/vectorstores/pgvector",
new: "@langchain/community/vectorstores/neo4j_vector",
namedImport: "DistanceStrategy",
},
{
old: "langchain/vectorstores/neo4j_vector",
new: "@langchain/community/vectorstores/neo4j_vector",
namedImport: "Neo4jVectorStore",
},
{
old: "langchain/vectorstores/myscale",
new: "@langchain/community/vectorstores/myscale",
namedImport: "MyScaleLibArgs",
},
{
old: "langchain/vectorstores/myscale",
new: "@langchain/community/vectorstores/myscale",
namedImport: "ColumnMap",
},
{
old: "langchain/vectorstores/myscale",
new: "@langchain/community/vectorstores/myscale",
namedImport: "metric",
},
{
old: "langchain/vectorstores/myscale",
new: "@langchain/community/vectorstores/myscale",
namedImport: "MyScaleFilter",
},
{
old: "langchain/vectorstores/myscale",
new: "@langchain/community/vectorstores/myscale",
namedImport: "MyScaleStore",
},
{
old: "langchain/vectorstores/momento_vector_index",
new: "@langchain/community/vectorstores/momento_vector_index",
namedImport: null,
},
{
old: "langchain/vectorstores/milvus",
new: "@langchain/community/vectorstores/milvus",
namedImport: "MilvusLibArgs",
},
{
old: "langchain/vectorstores/milvus",
new: "@langchain/community/vectorstores/milvus",
namedImport: "Milvus",
},
{
old: "langchain/vectorstores/lancedb",
new: "@langchain/community/vectorstores/lancedb",
namedImport: "LanceDBArgs",
},
{
old: "langchain/vectorstores/lancedb",
new: "@langchain/community/vectorstores/lancedb",
namedImport: "LanceDB",
},
{
old: "langchain/vectorstores/hnswlib",
new: "@langchain/community/vectorstores/hnswlib",
namedImport: "HNSWLibBase",
},
{
old: "langchain/vectorstores/hnswlib",
new: "@langchain/community/vectorstores/hnswlib",
namedImport: "HNSWLibArgs",
},
{
old: "langchain/vectorstores/hnswlib",
new: "@langchain/community/vectorstores/hnswlib",
namedImport: "HNSWLib",
},
{
old: "langchain/vectorstores/pgvector",
new: "@langchain/community/vectorstores/hanavector",
namedImport: "DistanceStrategy",
},
{
old: "langchain/vectorstores/googlevertexai",
new: "@langchain/community/vectorstores/googlevertexai",
namedImport: "IdDocumentInput",
},
{
old: "langchain/vectorstores/googlevertexai",
new: "@langchain/community/vectorstores/googlevertexai",
namedImport: "IdDocument",
},
{
old: "langchain/vectorstores/googlevertexai",
new: "@langchain/community/vectorstores/googlevertexai",
namedImport: "MatchingEngineDeleteParams",
},
{
old: "langchain/vectorstores/googlevertexai",
new: "@langchain/community/vectorstores/googlevertexai",
namedImport: "Restriction",
},
{
old: "langchain/vectorstores/googlevertexai",
new: "@langchain/community/vectorstores/googlevertexai",
namedImport: "PublicAPIEndpointInfo",
},
{
old: "langchain/vectorstores/googlevertexai",
new: "@langchain/community/vectorstores/googlevertexai",
namedImport: "MatchingEngineArgs",
},
{
old: "langchain/vectorstores/googlevertexai",
new: "@langchain/community/vectorstores/googlevertexai",
namedImport: "MatchingEngine",
},
{
old: "langchain/vectorstores/faiss",
new: "@langchain/community/vectorstores/faiss",
namedImport: "FaissLibArgs",
},
{
old: "langchain/vectorstores/faiss",
new: "@langchain/community/vectorstores/faiss",
namedImport: "FaissStore",
},
{
old: "langchain/vectorstores/elasticsearch",
new: "@langchain/community/vectorstores/elasticsearch",
namedImport: "ElasticClientArgs",
},
{
old: "langchain/vectorstores/elasticsearch",
new: "@langchain/community/vectorstores/elasticsearch",
namedImport: "ElasticVectorSearch",
},
{
old: "langchain/vectorstores/convex",
new: "@langchain/community/vectorstores/convex",
namedImport: "ConvexVectorStoreConfig",
},
{
old: "langchain/vectorstores/convex",
new: "@langchain/community/vectorstores/convex",
namedImport: "ConvexVectorStore",
},
{
old: "langchain/vectorstores/clickhouse",
new: "@langchain/community/vectorstores/clickhouse",
namedImport: "ClickHouseLibArgs",
},
{
old: "langchain/vectorstores/myscale",
new: "@langchain/community/vectorstores/clickhouse",
namedImport: "ColumnMap",
},
{
old: "langchain/vectorstores/clickhouse",
new: "@langchain/community/vectorstores/clickhouse",
namedImport: "ClickHouseFilter",
},
{
old: "langchain/vectorstores/clickhouse",
new: "@langchain/community/vectorstores/clickhouse",
namedImport: "ClickHouseStore",
},
{
old: "langchain/vectorstores/chroma",
new: "@langchain/community/vectorstores/chroma",
namedImport: "ChromaLibArgs",
},
{
old: "langchain/vectorstores/chroma",
new: "@langchain/community/vectorstores/chroma",
namedImport: "ChromaDeleteParams",
},
{
old: "langchain/vectorstores/chroma",
new: "@langchain/community/vectorstores/chroma",
namedImport: "Chroma",
},
{
old: "langchain/vectorstores/cassandra",
new: "@langchain/community/vectorstores/cassandra",
namedImport: "Column",
},
{
old: "langchain/vectorstores/cassandra",
new: "@langchain/community/vectorstores/cassandra",
namedImport: "Index",
},
{
old: "langchain/vectorstores/cassandra",
new: "@langchain/community/vectorstores/cassandra",
namedImport: "WhereClause",
},
{
old: "langchain/vectorstores/cassandra",
new: "@langchain/community/vectorstores/cassandra",
namedImport: "SupportedVectorTypes",
},
{
old: "langchain/vectorstores/cassandra",
new: "@langchain/community/vectorstores/cassandra",
namedImport: "CassandraLibArgs",
},
{
old: "langchain/vectorstores/cassandra",
new: "@langchain/community/vectorstores/cassandra",
namedImport: "CassandraStore",
},
{
old: "langchain/vectorstores/analyticdb",
new: "@langchain/community/vectorstores/analyticdb",
namedImport: "AnalyticDBArgs",
},
{
old: "langchain/vectorstores/analyticdb",
new: "@langchain/community/vectorstores/analyticdb",
namedImport: "AnalyticDBVectorStore",
},
{
old: "langchain/util/convex",
new: "@langchain/community/vectorstores/tests/convex/convex/langchain/db",
namedImport: "get",
},
{
old: "langchain/util/convex",
new: "@langchain/community/vectorstores/tests/convex/convex/langchain/db",
namedImport: "insert",
},
{
old: "langchain/util/convex",
new: "@langchain/community/vectorstores/tests/convex/convex/langchain/db",
namedImport: "lookup",
},
{
old: "langchain/util/convex",
new: "@langchain/community/vectorstores/tests/convex/convex/langchain/db",
namedImport: "upsert",
},
{
old: "langchain/util/convex",
new: "@langchain/community/vectorstores/tests/convex/convex/langchain/db",
namedImport: "deleteMany",
},
{
old: "langchain/vectorstores/closevector/web",
new: "@langchain/community/vectorstores/closevector/web",
namedImport: null,
},
{
old: "langchain/vectorstores/closevector/node",
new: "@langchain/community/vectorstores/closevector/node",
namedImport: "CloseVectorNodeArgs",
},
{
old: "langchain/vectorstores/closevector/node",
new: "@langchain/community/vectorstores/closevector/node",
namedImport: "CloseVectorNode",
},
{
old: "langchain/llms/ollama",
new: "@langchain/community/utils/ollama",
namedImport: "OllamaInput",
},
{
old: "langchain/llms/ollama",
new: "@langchain/community/utils/ollama",
namedImport: "OllamaCallOptions",
},
{
old: "langchain/util/googlevertexai-connection",
new: "@langchain/community/utils/googlevertexai-connection",
namedImport: "GoogleVertexAIConnection",
},
{
old: "langchain/util/googlevertexai-connection",
new: "@langchain/community/utils/googlevertexai-connection",
namedImport: "GoogleVertexAILLMConnection",
},
{
old: "langchain/util/googlevertexai-connection",
new: "@langchain/community/utils/googlevertexai-connection",
namedImport: "GoogleVertexAILLMResponse",
},
{
old: "langchain/util/googlevertexai-connection",
new: "@langchain/community/utils/googlevertexai-connection",
namedImport: "GoogleVertexAIStream",
},
{
old: "langchain/util/event-source-parse",
new: "@langchain/community/utils/event_source_parse",
namedImport: "getBytes",
},
{
old: "langchain/util/event-source-parse",
new: "@langchain/community/utils/event_source_parse",
namedImport: "getLines",
},
{
old: "langchain/util/event-source-parse",
new: "@langchain/community/utils/event_source_parse",
namedImport: "getMessages",
},
{
old: "langchain/util/event-source-parse",
new: "@langchain/community/utils/event_source_parse",
namedImport: "convertEventStreamToIterableReadableDataStream",
},
{
old: "langchain/util/event-source-parse",
new: "@langchain/community/utils/event_source_parse",
namedImport: "EventStreamContentType",
},
{
old: "langchain/util/event-source-parse",
new: "@langchain/community/utils/event_source_parse",
namedImport: "EventSourceMessage",
},
{
old: "langchain/util/convex",
new: "@langchain/community/utils/convex",
namedImport: "get",
},
{
old: "langchain/util/convex",
new: "@langchain/community/utils/convex",
namedImport: "insert",
},
{
old: "langchain/util/convex",
new: "@langchain/community/utils/convex",
namedImport: "lookup",
},
{
old: "langchain/util/convex",
new: "@langchain/community/utils/convex",
namedImport: "upsert",
},
{
old: "langchain/util/convex",
new: "@langchain/community/utils/convex",
namedImport: "deleteMany",
},
{
old: "langchain/vectorstores/cassandra",
new: "@langchain/community/utils/cassandra",
namedImport: "Column",
},
{
old: "langchain/vectorstores/cassandra",
new: "@langchain/community/utils/cassandra",
namedImport: "Index",
},
{
old: "langchain/vectorstores/cassandra",
new: "@langchain/community/utils/cassandra",
namedImport: "WhereClause",
},
{
old: "langchain/types/type-utils",
new: "@langchain/community/types/type-utils",
namedImport: "Optional",
},
{
old: "langchain/types/googlevertexai-types",
new: "@langchain/community/types/googlevertexai-types",
namedImport: "GoogleVertexAIConnectionParams",
},
{
old: "langchain/types/googlevertexai-types",
new: "@langchain/community/types/googlevertexai-types",
namedImport: "GoogleVertexAIModelParams",
},
{
old: "langchain/types/googlevertexai-types",
new: "@langchain/community/types/googlevertexai-types",
namedImport: "GoogleVertexAIBaseLLMInput",
},
{
old: "langchain/tools/wolframalpha",
new: "@langchain/community/tools/wolframalpha",
namedImport: "WolframAlphaTool",
},
{
old: "langchain/tools/wikipedia_query_run",
new: "@langchain/community/tools/wikipedia_query_run",
namedImport: "WikipediaQueryRunParams",
},
{
old: "langchain/tools/wikipedia_query_run",
new: "@langchain/community/tools/wikipedia_query_run",
namedImport: "WikipediaQueryRun",
},
{
old: "langchain/retrievers/tavily_search_api",
new: "@langchain/community/tools/tavily_search",
namedImport: "TavilySearchAPIRetrieverFields",
},
{
old: "langchain/tools/serper",
new: "@langchain/community/tools/serper",
namedImport: "SerperParameters",
},
{
old: "langchain/tools/serper",
new: "@langchain/community/tools/serper",
namedImport: "Serper",
},
{
old: "langchain/tools/serpapi",
new: "@langchain/community/tools/serpapi",
namedImport: "SerpAPIParameters",
},
{
old: "langchain/tools/serpapi",
new: "@langchain/community/tools/serpapi",
namedImport: "SerpAPI",
},
{
old: "langchain/tools/searxng_search",
new: "@langchain/community/tools/searxng_search",
namedImport: "SearxngSearch",
},
{
old: "langchain/tools/searchapi",
new: "@langchain/community/tools/searchapi",
namedImport: "SearchApiParameters",
},
{
old: "langchain/tools/searchapi",
new: "@langchain/community/tools/searchapi",
namedImport: "SearchApi",
},
{
old: "langchain/tools/*",
new: "@langchain/community/tools/ifttt",
namedImport: "IFTTTWebhook",
},
{
old: "langchain/tools/google_places",
new: "@langchain/community/tools/google_places",
namedImport: "GooglePlacesAPIParams",
},
{
old: "langchain/tools/google_places",
new: "@langchain/community/tools/google_places",
namedImport: "GooglePlacesAPI",
},
{
old: "langchain/tools/*",
new: "@langchain/community/tools/google_custom_search",
namedImport: "GoogleCustomSearchParams",
},
{
old: "langchain/tools/*",
new: "@langchain/community/tools/google_custom_search",
namedImport: "GoogleCustomSearch",
},
{
old: "langchain/tools/dynamic",
new: "@langchain/community/tools/dynamic",
namedImport: "BaseDynamicToolInput",
},
{
old: "langchain/tools/*",
new: "@langchain/community/tools/dynamic",
namedImport: "DynamicToolInput",
},
{
old: "langchain/tools/*",
new: "@langchain/community/tools/dynamic",
namedImport: "DynamicStructuredToolInput",
},
{
old: "langchain/tools/*",
new: "@langchain/community/tools/dynamic",
namedImport: "DynamicTool",
},
{
old: "langchain/tools/*",
new: "@langchain/community/tools/dynamic",
namedImport: "DynamicStructuredTool",
},
{
old: "langchain/tools/*",
new: "@langchain/community/tools/dataforseo_api_search",
namedImport: "DataForSeoApiConfig",
},
{
old: "langchain/tools/*",
new: "@langchain/community/tools/dataforseo_api_search",
namedImport: "DataForSeoAPISearch",
},
{
old: "langchain/tools/*",
new: "@langchain/community/tools/dadjokeapi",
namedImport: "DadJokeAPI",
},
{
old: "langchain/tools/connery",
new: "@langchain/community/tools/connery",
namedImport: "ConneryServiceParams",
},
{
old: "langchain/tools/connery",
new: "@langchain/community/tools/connery",
namedImport: "ConneryAction",
},
{
old: "langchain/tools/connery",
new: "@langchain/community/tools/connery",
namedImport: "ConneryService",
},
{
old: "langchain/tools/calculator",
new: "@langchain/community/tools/calculator",
namedImport: "Calculator",
},
{
old: "langchain/tools/*",
new: "@langchain/community/tools/brave_search",
namedImport: "BraveSearchParams",
},
{
old: "langchain/tools/*",
new: "@langchain/community/tools/brave_search",
namedImport: "BraveSearch",
},
{
old: "langchain/tools/*",
new: "@langchain/community/tools/bingserpapi",
namedImport: "BingSerpAPI",
},
{
old: "langchain/tools/aws_sfn",
new: "@langchain/community/tools/aws_sfn",
namedImport: "SfnConfig",
},
{
old: "langchain/tools/aws_sfn",
new: "@langchain/community/tools/aws_sfn",
namedImport: "StartExecutionAWSSfnTool",
},
{
old: "langchain/tools/aws_sfn",
new: "@langchain/community/tools/aws_sfn",
namedImport: "DescribeExecutionAWSSfnTool",
},
{
old: "langchain/tools/aws_sfn",
new: "@langchain/community/tools/aws_sfn",
namedImport: "SendTaskSuccessAWSSfnTool",
},
{
old: "langchain/tools/aws_lambda",
new: "@langchain/community/tools/aws_lambda",
namedImport: "AWSLambda",
},
{
old: "langchain/tools/aiplugin",
new: "@langchain/community/tools/aiplugin",
namedImport: "AIPluginToolParams",
},
{
old: "langchain/tools/*",
new: "@langchain/community/tools/aiplugin",
namedImport: "AIPluginTool",
},
{
old: "langchain/tools/google_calendar/*",
new: "@langchain/community/tools/google_calendar",
namedImport: "GoogleCalendarCreateTool",
},
{
old: "langchain/tools/google_calendar/*",
new: "@langchain/community/tools/google_calendar",
namedImport: "GoogleCalendarViewTool",
},
{
old: "langchain/tools/google_calendar/*",
new: "@langchain/community/tools/google_calendar",
namedImport: "GoogleCalendarAgentParams",
},
{
old: "langchain/tools/gmail/*",
new: "@langchain/community/tools/gmail",
namedImport: "GmailCreateDraft",
},
{
old: "langchain/tools/gmail/*",
new: "@langchain/community/tools/gmail",
namedImport: "GmailGetMessage",
},
{
old: "langchain/tools/gmail/*",
new: "@langchain/community/tools/gmail",
namedImport: "GmailGetThread",
},
{
old: "langchain/tools/gmail/*",
new: "@langchain/community/tools/gmail",
namedImport: "GmailSearch",
},
{
old: "langchain/tools/gmail/*",
new: "@langchain/community/tools/gmail",
namedImport: "GmailSendMessage",
},
{
old: "langchain/tools/gmail/*",
new: "@langchain/community/tools/gmail",
namedImport: "GmailBaseToolParams",
},
{
old: "langchain/tools/gmail/*",
new: "@langchain/community/tools/gmail",
namedImport: "CreateDraftSchema",
},
{
old: "langchain/tools/gmail/*",
new: "@langchain/community/tools/gmail",
namedImport: "GetMessageSchema",
},
{
old: "langchain/tools/gmail/*",
new: "@langchain/community/tools/gmail",
namedImport: "GetThreadSchema",
},
{
old: "langchain/tools/gmail/*",
new: "@langchain/community/tools/gmail",
namedImport: "SearchSchema",
},
{
old: "langchain/tools/gmail/*",
new: "@langchain/community/tools/gmail",
namedImport: "SendMessageSchema",
},
{
old: "langchain/stores/message/xata",
new: "@langchain/community/stores/message/xata",
namedImport: "XataChatMessageHistoryInput",
},
{
old: "langchain/stores/message/xata",
new: "@langchain/community/stores/message/xata",
namedImport: "XataChatMessageHistory",
},
{
old: "langchain/stores/message/upstash_redis",
new: "@langchain/community/stores/message/upstash_redis",
namedImport: "UpstashRedisChatMessageHistoryInput",
},
{
old: "langchain/stores/message/upstash_redis",
new: "@langchain/community/stores/message/upstash_redis",
namedImport: "UpstashRedisChatMessageHistory",
},
{
old: "langchain/stores/message/planetscale",
new: "@langchain/community/stores/message/planetscale",
namedImport: "PlanetScaleChatMessageHistoryInput",
},
{
old: "langchain/stores/message/planetscale",
new: "@langchain/community/stores/message/planetscale",
namedImport: "PlanetScaleChatMessageHistory",
},
{
old: "langchain/stores/message/momento",
new: "@langchain/community/stores/message/momento",
namedImport: "MomentoChatMessageHistoryProps",
},
{
old: "langchain/stores/message/momento",
new: "@langchain/community/stores/message/momento",
namedImport: "MomentoChatMessageHistory",
},
{
old: "langchain/stores/message/in_memory",
new: "@langchain/community/stores/message/in_memory",
namedImport: "ChatMessageHistory",
},
{
old: "langchain/stores/message/firestore",
new: "@langchain/community/stores/message/firestore",
namedImport: "FirestoreDBChatMessageHistory",
},
{
old: "langchain/stores/message/firestore",
new: "@langchain/community/stores/message/firestore",
namedImport: "FirestoreChatMessageHistory",
},
{
old: "langchain/stores/message/dynamodb",
new: "@langchain/community/stores/message/dynamodb",
namedImport: "DynamoDBChatMessageHistoryFields",
},
{
old: "langchain/stores/message/dynamodb",
new: "@langchain/community/stores/message/dynamodb",
namedImport: "DynamoDBChatMessageHistory",
},
{
old: "langchain/stores/message/convex",
new: "@langchain/community/stores/message/convex",
namedImport: "ConvexChatMessageHistoryInput",
},
{
old: "langchain/stores/message/convex",
new: "@langchain/community/stores/message/convex",
namedImport: "ConvexChatMessageHistory",
},
{
old: "langchain/stores/message/cassandra",
new: "@langchain/community/stores/message/cassandra",
namedImport: "CassandraChatMessageHistoryOptions",
},
{
old: "langchain/stores/message/cassandra",
new: "@langchain/community/stores/message/cassandra",
namedImport: "CassandraChatMessageHistory",
},
{
old: "langchain/stores/doc/in_memory",
new: "@langchain/community/stores/doc/in_memory",
namedImport: "InMemoryDocstore",
},
{
old: "langchain/stores/doc/in_memory",
new: "@langchain/community/stores/doc/in_memory",
namedImport: "SynchronousInMemoryDocstore",
},
{
old: "langchain/schema/*",
new: "@langchain/community/stores/doc/base",
namedImport: "Docstore",
},
{
old: "langchain/storage/vercel_kv",
new: "@langchain/community/storage/vercel_kv",
namedImport: "VercelKVStore",
},
{
old: "langchain/storage/upstash_redis",
new: "@langchain/community/storage/upstash_redis",
namedImport: "UpstashRedisStoreInput",
},
{
old: "langchain/storage/upstash_redis",
new: "@langchain/community/storage/upstash_redis",
namedImport: "UpstashRedisStore",
},
{
old: "langchain/storage/ioredis",
new: "@langchain/community/storage/ioredis",
namedImport: "RedisByteStore",
},
{
old: "langchain/storage/convex",
new: "@langchain/community/storage/convex",
namedImport: "ConvexKVStoreConfig",
},
{
old: "langchain/storage/convex",
new: "@langchain/community/storage/convex",
namedImport: "ConvexKVStore",
},
{
old: "langchain/retrievers/zep",
new: "@langchain/community/retrievers/zep",
namedImport: "ZepRetrieverConfig",
},
{
old: "langchain/retrievers/zep",
new: "@langchain/community/retrievers/zep",
namedImport: "ZepRetriever",
},
{
old: "langchain/retrievers/vespa",
new: "@langchain/community/retrievers/vespa",
namedImport: "VespaRetrieverParams",
},
{
old: "langchain/retrievers/vespa",
new: "@langchain/community/retrievers/vespa",
namedImport: "VespaRetriever",
},
{
old: "langchain/vectorstores/vectara",
new: "@langchain/community/retrievers/vectara_summary",
namedImport: "VectaraRetrieverInput",
},
{
old: "langchain/retrievers/vectara_summary",
new: "@langchain/community/retrievers/vectara_summary",
namedImport: "VectaraSummaryRetriever",
},
{
old: "langchain/retrievers/tavily_search_api",
new: "@langchain/community/retrievers/tavily_search_api",
namedImport: "TavilySearchAPIRetrieverFields",
},
{
old: "langchain/retrievers/tavily_search_api",
new: "@langchain/community/retrievers/tavily_search_api",
namedImport: "TavilySearchAPIRetriever",
},
{
old: "langchain/vectorstores/supabase",
new: "@langchain/community/retrievers/supabase",
namedImport: "SupabaseLibArgs",
},
{
old: "langchain/retrievers/supabase",
new: "@langchain/community/retrievers/supabase",
namedImport: "SupabaseHybridSearchParams",
},
{
old: "langchain/retrievers/supabase",
new: "@langchain/community/retrievers/supabase",
namedImport: "SupabaseHybridSearch",
},
{
old: "langchain/retrievers/metal",
new: "@langchain/community/retrievers/metal",
namedImport: "MetalRetrieverFields",
},
{
old: "langchain/retrievers/metal",
new: "@langchain/community/retrievers/metal",
namedImport: "MetalRetriever",
},
{
old: "langchain/retrievers/databerry",
new: "@langchain/community/retrievers/databerry",
namedImport: "DataberryRetrieverArgs",
},
{
old: "langchain/retrievers/databerry",
new: "@langchain/community/retrievers/databerry",
namedImport: "DataberryRetriever",
},
{
old: "langchain/retrievers/chaindesk",
new: "@langchain/community/retrievers/chaindesk",
namedImport: "ChaindeskRetrieverArgs",
},
{
old: "langchain/retrievers/chaindesk",
new: "@langchain/community/retrievers/chaindesk",
namedImport: "ChaindeskRetriever",
},
{
old: "langchain/retrievers/amazon_kendra",
new: "@langchain/community/retrievers/amazon_kendra",
namedImport: "AmazonKendraRetrieverArgs",
},
{
old: "langchain/retrievers/amazon_kendra",
new: "@langchain/community/retrievers/amazon_kendra",
namedImport: "AmazonKendraRetriever",
},
{
old: "langchain/retrievers/remote/*",
new: "@langchain/community/retrievers/remote",
namedImport: "RemoteRetriever",
},
{
old: "langchain/retrievers/remote/*",
new: "@langchain/community/retrievers/remote",
namedImport: "RemoteRetrieverParams",
},
{
old: "langchain/retrievers/remote/*",
new: "@langchain/community/retrievers/remote",
namedImport: "RemoteRetrieverAuth",
},
{
old: "langchain/retrievers/remote/*",
new: "@langchain/community/retrievers/remote",
namedImport: "RemoteRetrieverValues",
},
{
old: "langchain/memory/zep",
new: "@langchain/community/memory/zep",
namedImport: "ZepMemoryInput",
},
{
old: "langchain/memory/zep",
new: "@langchain/community/memory/zep",
namedImport: "ZepMemory",
},
{
old: "langchain/memory/motorhead_memory",
new: "@langchain/community/memory/motorhead_memory",
namedImport: "MotorheadMemoryMessage",
},
{
old: "langchain/memory/motorhead_memory",
new: "@langchain/community/memory/motorhead_memory",
namedImport: "MotorheadMemoryInput",
},
{
old: "langchain/memory/motorhead_memory",
new: "@langchain/community/memory/motorhead_memory",
namedImport: "MotorheadMemory",
},
{
old: "langchain/memory/*",
new: "@langchain/community/memory/chat_memory",
namedImport: "BaseChatMemoryInput",
},
{
old: "langchain/memory/*",
new: "@langchain/community/memory/chat_memory",
namedImport: "BaseChatMemory",
},
{
old: "langchain/load/serializable",
new: "@langchain/community/load/serializable",
namedImport: "get_lc_unique_name",
},
{
old: "langchain/load/serializable",
new: "@langchain/community/load/serializable",
namedImport: "BaseSerialized",
},
{
old: "langchain/load/serializable",
new: "@langchain/community/load/serializable",
namedImport: "SerializedConstructor",
},
{
old: "langchain/load/serializable",
new: "@langchain/community/load/serializable",
namedImport: "SerializedSecret",
},
{
old: "langchain/load/serializable",
new: "@langchain/community/load/serializable",
namedImport: "SerializedNotImplemented",
},
{
old: "langchain/load/serializable",
new: "@langchain/community/load/serializable",
namedImport: "Serialized",
},
{
old: "langchain/load/serializable",
new: "@langchain/community/load/serializable",
namedImport: "SerializableInterface",
},
{
old: "langchain/load/serializable",
new: "@langchain/community/load/serializable",
namedImport: "Serializable",
},
{
old: "langchain/load/map_keys",
new: "@langchain/community/load/map_keys",
namedImport: "SerializedFields",
},
{
old: "langchain/load/import_type",
new: "@langchain/community/load",
namedImport: "OptionalImportMap",
},
{
old: "langchain/load/import_type",
new: "@langchain/community/load",
namedImport: "SecretMap",
},
{
old: "langchain/load/import_constants",
new: "@langchain/community/load",
namedImport: "optionalImportEntrypoints",
},
{
old: "langchain/load/import_map",
new: "@langchain/community/load/import_map",
namedImport: "llms__fireworks",
},
{
old: "langchain/load/import_map",
new: "@langchain/community/load/import_map",
namedImport: "chat_models__fireworks",
},
{
old: "langchain/load/import_map",
new: "@langchain/community/load/import_map",
namedImport: "retrievers__remote",
},
{
old: "langchain/load/import_map",
new: "@langchain/community/load/import_map",
namedImport: "retrievers__vespa",
},
{
old: "langchain/load/import_map",
new: "@langchain/community/load/import_map",
namedImport: "stores__doc__in_memory",
},
{
old: "langchain/load/import_map",
new: "@langchain/community/load/import_map",
namedImport: "stores__message__in_memory",
},
{
old: "langchain/llms/writer",
new: "@langchain/community/llms/writer",
namedImport: "WriterInput",
},
{
old: "langchain/llms/writer",
new: "@langchain/community/llms/writer",
namedImport: "Writer",
},
{
old: "langchain/llms/watsonx_ai",
new: "@langchain/community/llms/watsonx_ai",
namedImport: "WatsonxAIParams",
},
{
old: "langchain/llms/watsonx_ai",
new: "@langchain/community/llms/watsonx_ai",
namedImport: "WatsonxAI",
},
{
old: "langchain/llms/sagemaker_endpoint",
new: "@langchain/community/llms/sagemaker_endpoint",
namedImport: "BaseSageMakerContentHandler",
},
{
old: "langchain/llms/sagemaker_endpoint",
new: "@langchain/community/llms/sagemaker_endpoint",
namedImport: "SageMakerLLMContentHandler",
},
{
old: "langchain/llms/sagemaker_endpoint",
new: "@langchain/community/llms/sagemaker_endpoint",
namedImport: "SageMakerEndpointInput",
},
{
old: "langchain/llms/sagemaker_endpoint",
new: "@langchain/community/llms/sagemaker_endpoint",
namedImport: "SageMakerEndpoint",
},
{
old: "langchain/llms/replicate",
new: "@langchain/community/llms/replicate",
namedImport: "ReplicateInput",
},
{
old: "langchain/llms/replicate",
new: "@langchain/community/llms/replicate",
namedImport: "Replicate",
},
{
old: "langchain/llms/raycast",
new: "@langchain/community/llms/raycast",
namedImport: "RaycastAIInput",
},
{
old: "langchain/llms/raycast",
new: "@langchain/community/llms/raycast",
namedImport: "RaycastAI",
},
{
old: "langchain/llms/portkey",
new: "@langchain/community/llms/portkey",
namedImport: "getPortkeySession",
},
{
old: "langchain/llms/portkey",
new: "@langchain/community/llms/portkey",
namedImport: "PortkeySession",
},
{
old: "langchain/llms/portkey",
new: "@langchain/community/llms/portkey",
namedImport: "Portkey",
},
{
old: "langchain/llms/ollama",
new: "@langchain/community/llms/ollama",
namedImport: "OllamaInput",
},
{
old: "langchain/llms/ollama",
new: "@langchain/community/llms/ollama",
namedImport: "OllamaCallOptions",
},
{
old: "langchain/llms/ollama",
new: "@langchain/community/llms/ollama",
namedImport: "Ollama",
},
{
old: "langchain/llms/llama_cpp",
new: "@langchain/community/llms/llama_cpp",
namedImport: "LlamaCppInputs",
},
{
old: "langchain/llms/llama_cpp",
new: "@langchain/community/llms/llama_cpp",
namedImport: "LlamaCppCallOptions",
},
{
old: "langchain/llms/llama_cpp",
new: "@langchain/community/llms/llama_cpp",
namedImport: "LlamaCpp",
},
{
old: "langchain/llms/hf",
new: "@langchain/community/llms/hf",
namedImport: "HFInput",
},
{
old: "langchain/llms/hf",
new: "@langchain/community/llms/hf",
namedImport: "HuggingFaceInference",
},
{
old: "langchain/llms/gradient_ai",
new: "@langchain/community/llms/gradient_ai",
namedImport: "GradientLLMParams",
},
{
old: "langchain/llms/gradient_ai",
new: "@langchain/community/llms/gradient_ai",
namedImport: "GradientLLM",
},
{
old: "langchain/llms/googlepalm",
new: "@langchain/community/llms/googlepalm",
namedImport: "GooglePaLMTextInput",
},
{
old: "langchain/llms/googlepalm",
new: "@langchain/community/llms/googlepalm",
namedImport: "GooglePaLM",
},
{
old: "langchain/llms/fireworks",
new: "@langchain/community/llms/fireworks",
namedImport: "FireworksCallOptions",
},
{
old: "langchain/llms/fireworks",
new: "@langchain/community/llms/fireworks",
namedImport: "Fireworks",
},
{
old: "langchain/llms/aleph_alpha",
new: "@langchain/community/llms/aleph_alpha",
namedImport: "AlephAlphaInput",
},
{
old: "langchain/llms/aleph_alpha",
new: "@langchain/community/llms/aleph_alpha",
namedImport: "AlephAlpha",
},
{
old: "langchain/llms/ai21",
new: "@langchain/community/llms/ai21",
namedImport: "AI21PenaltyData",
},
{
old: "langchain/llms/ai21",
new: "@langchain/community/llms/ai21",
namedImport: "AI21Input",
},
{
old: "langchain/llms/ai21",
new: "@langchain/community/llms/ai21",
namedImport: "AI21",
},
{
old: "langchain/llms/googlevertexai/web",
new: "@langchain/community/llms/googlevertexai/web",
namedImport: null,
},
{
old: "langchain/llms/bedrock",
new: "@langchain/community/llms/bedrock",
namedImport: null,
},
{
old: "langchain/graphs/neo4j_graph",
new: "@langchain/community/graphs/neo4j_graph",
namedImport: "AddGraphDocumentsConfig",
},
{
old: "langchain/graphs/neo4j_graph",
new: "@langchain/community/graphs/neo4j_graph",
namedImport: "NodeType",
},
{
old: "langchain/graphs/neo4j_graph",
new: "@langchain/community/graphs/neo4j_graph",
namedImport: "RelType",
},
{
old: "langchain/graphs/neo4j_graph",
new: "@langchain/community/graphs/neo4j_graph",
namedImport: "PathType",
},
{
old: "langchain/graphs/neo4j_graph",
new: "@langchain/community/graphs/neo4j_graph",
namedImport: "BASE_ENTITY_LABEL",
},
{
old: "langchain/graphs/neo4j_graph",
new: "@langchain/community/graphs/neo4j_graph",
namedImport: "Neo4jGraph",
},
{
old: "langchain/embeddings/voyage",
new: "@langchain/community/embeddings/voyage",
namedImport: "VoyageEmbeddingsParams",
},
{
old: "langchain/embeddings/voyage",
new: "@langchain/community/embeddings/voyage",
namedImport: "CreateVoyageEmbeddingRequest",
},
{
old: "langchain/embeddings/voyage",
new: "@langchain/community/embeddings/voyage",
namedImport: "VoyageEmbeddings",
},
{
old: "langchain/embeddings/tensorflow",
new: "@langchain/community/embeddings/tensorflow",
namedImport: "TensorFlowEmbeddingsParams",
},
{
old: "langchain/embeddings/tensorflow",
new: "@langchain/community/embeddings/tensorflow",
namedImport: "TensorFlowEmbeddings",
},
{
old: "langchain/embeddings/ollama",
new: "@langchain/community/embeddings/ollama",
namedImport: "OllamaEmbeddings",
},
{
old: "langchain/embeddings/minimax",
new: "@langchain/community/embeddings/minimax",
namedImport: "MinimaxEmbeddingsParams",
},
{
old: "langchain/embeddings/minimax",
new: "@langchain/community/embeddings/minimax",
namedImport: "CreateMinimaxEmbeddingRequest",
},
{
old: "langchain/embeddings/minimax",
new: "@langchain/community/embeddings/minimax",
namedImport: "MinimaxEmbeddings",
},
{
old: "langchain/embeddings/llama_cpp",
new: "@langchain/community/embeddings/llama_cpp",
namedImport: "LlamaCppEmbeddingsParams",
},
{
old: "langchain/embeddings/llama_cpp",
new: "@langchain/community/embeddings/llama_cpp",
namedImport: "LlamaCppEmbeddings",
},
{
old: "langchain/embeddings/hf_transformers",
new: "@langchain/community/embeddings/hf_transformers",
namedImport: "HuggingFaceTransformersEmbeddingsParams",
},
{
old: "langchain/embeddings/hf_transformers",
new: "@langchain/community/embeddings/hf_transformers",
namedImport: "HuggingFaceTransformersEmbeddings",
},
{
old: "langchain/embeddings/hf",
new: "@langchain/community/embeddings/hf",
namedImport: "HuggingFaceInferenceEmbeddingsParams",
},
{
old: "langchain/embeddings/hf",
new: "@langchain/community/embeddings/hf",
namedImport: "HuggingFaceInferenceEmbeddings",
},
{
old: "langchain/embeddings/gradient_ai",
new: "@langchain/community/embeddings/gradient_ai",
namedImport: "GradientEmbeddingsParams",
},
{
old: "langchain/embeddings/gradient_ai",
new: "@langchain/community/embeddings/gradient_ai",
namedImport: "GradientEmbeddings",
},
{
old: "langchain/embeddings/googlevertexai",
new: "@langchain/community/embeddings/googlevertexai",
namedImport: "GoogleVertexAIEmbeddingsParams",
},
{
old: "langchain/embeddings/googlevertexai",
new: "@langchain/community/embeddings/googlevertexai",
namedImport: "GoogleVertexAIEmbeddings",
},
{
old: "langchain/embeddings/googlepalm",
new: "@langchain/community/embeddings/googlepalm",
namedImport: "GooglePaLMEmbeddingsParams",
},
{
old: "langchain/embeddings/googlepalm",
new: "@langchain/community/embeddings/googlepalm",
namedImport: "GooglePaLMEmbeddings",
},
{
old: "langchain/embeddings/bedrock",
new: "@langchain/community/embeddings/bedrock",
namedImport: "BedrockEmbeddingsParams",
},
{
old: "langchain/embeddings/bedrock",
new: "@langchain/community/embeddings/bedrock",
namedImport: "BedrockEmbeddings",
},
{
old: "langchain/document_transformers/mozilla_readability",
new: "@langchain/community/document_transformers/mozilla_readability",
namedImport: "MozillaReadabilityTransformer",
},
{
old: "langchain/document_transformers/html_to_text",
new: "@langchain/community/document_transformers/html_to_text",
namedImport: "HtmlToTextTransformer",
},
{
old: "langchain/chat_models/portkey",
new: "@langchain/community/chat_models/portkey",
namedImport: "PortkeyChat",
},
{
old: "langchain/chat_models/ollama",
new: "@langchain/community/chat_models/ollama",
namedImport: "ChatOllamaInput",
},
{
old: "langchain/chat_models/ollama",
new: "@langchain/community/chat_models/ollama",
namedImport: "ChatOllamaCallOptions",
},
{
old: "langchain/chat_models/ollama",
new: "@langchain/community/chat_models/ollama",
namedImport: "ChatOllama",
},
{
old: "langchain/chat_models/minimax",
new: "@langchain/community/chat_models/minimax",
namedImport: "MinimaxMessageRole",
},
{
old: "langchain/chat_models/minimax",
new: "@langchain/community/chat_models/minimax",
namedImport: "ChatMinimaxCallOptions",
},
{
old: "langchain/chat_models/minimax",
new: "@langchain/community/chat_models/minimax",
namedImport: "ChatMinimax",
},
{
old: "langchain/chat_models/minimax",
new: "@langchain/community/chat_models/minimax",
namedImport: "ChatCompletionResponseMessageFunctionCall",
},
{
old: "langchain/chat_models/minimax",
new: "@langchain/community/chat_models/minimax",
namedImport: "ChatCompletionResponseChoicesPro",
},
{
old: "langchain/llms/llama_cpp",
new: "@langchain/community/chat_models/llama_cpp",
namedImport: "LlamaCppInputs",
},
{
old: "langchain/llms/llama_cpp",
new: "@langchain/community/chat_models/llama_cpp",
namedImport: "LlamaCppCallOptions",
},
{
old: "langchain/chat_models/llama_cpp",
new: "@langchain/community/chat_models/llama_cpp",
namedImport: "ChatLlamaCpp",
},
{
old: "langchain/chat_models/googlepalm",
new: "@langchain/community/chat_models/googlepalm",
namedImport: "GooglePaLMChatInput",
},
{
old: "langchain/chat_models/googlepalm",
new: "@langchain/community/chat_models/googlepalm",
namedImport: "ChatGooglePaLM",
},
{
old: "langchain/chat_models/fireworks",
new: "@langchain/community/chat_models/fireworks",
namedImport: "ChatFireworksCallOptions",
},
{
old: "langchain/chat_models/fireworks",
new: "@langchain/community/chat_models/fireworks",
namedImport: "ChatFireworks",
},
{
old: "langchain/chat_models/baiduwenxin",
new: "@langchain/community/chat_models/baiduwenxin",
namedImport: "WenxinMessageRole",
},
{
old: "langchain/chat_models/baiduwenxin",
new: "@langchain/community/chat_models/baiduwenxin",
namedImport: "ChatBaiduWenxin",
},
{
old: "langchain/chat_models/iflytek_xinghuo/web",
new: "@langchain/community/chat_models/iflytek_xinghuo/web",
namedImport: null,
},
{
old: "langchain/chat_models/googlevertexai/web",
new: "@langchain/community/chat_models/googlevertexai/web",
namedImport: null,
},
{
old: "langchain/chat_models/bedrock/web",
new: "@langchain/community/chat_models/bedrock/web",
namedImport: null,
},
{
old: "langchain/callbacks/handlers/llmonitor",
new: "@langchain/community/callbacks/handlers/llmonitor",
namedImport: "convertToLLMonitorMessages",
},
{
old: "langchain/callbacks/*",
new: "@langchain/community/callbacks/handlers/llmonitor",
namedImport: "Run",
},
{
old: "langchain/callbacks/handlers/tracer_langchain",
new: "@langchain/community/callbacks/handlers/llmonitor",
namedImport: "RunUpdate",
},
{
old: "langchain/callbacks/handlers/llmonitor",
new: "@langchain/community/callbacks/handlers/llmonitor",
namedImport: "LLMonitorHandlerFields",
},
{
old: "langchain/callbacks/handlers/llmonitor",
new: "@langchain/community/callbacks/handlers/llmonitor",
namedImport: "LLMonitorHandler",
},
{
old: "langchain/cache/upstash_redis",
new: "@langchain/community/caches/upstash_redis",
namedImport: "UpstashRedisCacheProps",
},
{
old: "langchain/cache/upstash_redis",
new: "@langchain/community/caches/upstash_redis",
namedImport: "UpstashRedisCache",
},
{
old: "langchain/cache/momento",
new: "@langchain/community/caches/momento",
namedImport: "MomentoCacheProps",
},
{
old: "langchain/cache/momento",
new: "@langchain/community/caches/momento",
namedImport: "MomentoCache",
},
{
old: "langchain/agents/*",
new: "@langchain/community/agents/toolkits/base",
namedImport: "Toolkit",
},
{
old: "langchain/agents/toolkits/aws_sfn",
new: "@langchain/community/agents/toolkits/aws_sfn",
namedImport: "AWSSfnToolkitArgs",
},
{
old: "langchain/agents/toolkits/aws_sfn",
new: "@langchain/community/agents/toolkits/aws_sfn",
namedImport: "AWSSfnToolkit",
},
{
old: "langchain/agents/toolkits/aws_sfn",
new: "@langchain/community/agents/toolkits/aws_sfn",
namedImport: "createAWSSfnAgent",
},
{
old: "langchain/stores/doc/gcs",
new: "@langchain/community/stores/doc/gcs",
namedImport: null,
},
{
old: "langchain/agents/toolkits/connery/*",
new: "@langchain/community/agents/toolkits/connery",
namedImport: "ConneryToolkit",
},
{
old: "langchain/embeddings/base",
new: "@langchain/community/",
namedImport: "Embeddings",
},
{
old: "langchain/vectorstores/singlestore",
new: "@langchain/community/",
namedImport: "Metadata",
},
{
old: "langchain/document_loaders/web/notionapi",
new: "@langchain/community/",
namedImport: "GetResponse",
},
{
old: "langchain/chat_models/minimax",
new: "@langchain/community/generated",
namedImport: "ConfigurationParameters",
},
{
old: "langchain/llms/cohere",
new: "@langchain/cohere",
namedImport: "CohereInput",
},
{
old: "langchain/llms/cohere",
new: "@langchain/cohere",
namedImport: "Cohere",
},
{
old: "langchain/embeddings/cohere",
new: "@langchain/cohere",
namedImport: "CohereEmbeddingsParams",
},
{
old: "langchain/embeddings/cohere",
new: "@langchain/cohere",
namedImport: "CohereEmbeddings",
},
{
old: "langchain/chat_models/cloudflare_workersai",
new: "@langchain/cloudflare",
namedImport: "ChatCloudflareWorkersAICallOptions",
},
{
old: "langchain/chat_models/cloudflare_workersai",
new: "@langchain/cloudflare",
namedImport: "ChatCloudflareWorkersAI",
},
{
old: "langchain/llms/cloudflare_workersai",
new: "@langchain/cloudflare",
namedImport: "CloudflareWorkersAIInput",
},
{
old: "langchain/llms/cloudflare_workersai",
new: "@langchain/cloudflare",
namedImport: "CloudflareWorkersAI",
},
{
old: "langchain/embeddings/cloudflare_workersai",
new: "@langchain/cloudflare",
namedImport: "CloudflareWorkersAIEmbeddingsParams",
},
{
old: "langchain/embeddings/cloudflare_workersai",
new: "@langchain/cloudflare",
namedImport: "CloudflareWorkersAIEmbeddings",
},
{
old: "langchain/vectorstores/cloudflare_vectorize",
new: "@langchain/cloudflare",
namedImport: "VectorizeLibArgs",
},
{
old: "langchain/vectorstores/cloudflare_vectorize",
new: "@langchain/cloudflare",
namedImport: "VectorizeDeleteParams",
},
{
old: "langchain/vectorstores/cloudflare_vectorize",
new: "@langchain/cloudflare",
namedImport: "CloudflareVectorizeStore",
},
{
old: "langchain/cache/cloudflare_kv",
new: "@langchain/cloudflare",
namedImport: "CloudflareKVCache",
},
{
old: "langchain/stores/message/cloudflare_d1",
new: "@langchain/cloudflare",
namedImport: "CloudflareD1MessageHistoryInput",
},
{
old: "langchain/stores/message/cloudflare_d1",
new: "@langchain/cloudflare",
namedImport: "CloudflareD1MessageHistory",
},
{
old: "langchain/util/event-source-parse",
new: "@langchain/cloudflare/utils/event_source_parse",
namedImport: "getBytes",
},
{
old: "langchain/util/event-source-parse",
new: "@langchain/cloudflare/utils/event_source_parse",
namedImport: "getLines",
},
{
old: "langchain/util/event-source-parse",
new: "@langchain/cloudflare/utils/event_source_parse",
namedImport: "getMessages",
},
{
old: "langchain/util/event-source-parse",
new: "@langchain/cloudflare/utils/event_source_parse",
namedImport: "convertEventStreamToIterableReadableDataStream",
},
{
old: "langchain/util/event-source-parse",
new: "@langchain/cloudflare/utils/event_source_parse",
namedImport: "EventStreamContentType",
},
{
old: "langchain/util/event-source-parse",
new: "@langchain/cloudflare/utils/event_source_parse",
namedImport: "EventSourceMessage",
},
{
old: "langchain/chat_models/anthropic",
new: "@langchain/anthropic",
namedImport: "AnthropicInput",
},
{
old: "langchain/chat_models/anthropic",
new: "@langchain/anthropic",
namedImport: "ChatAnthropic",
},
{
old: "langchain/text_splitter",
new: "@langchain/textsplitters",
namedImport: null,
},
{
old: "langchain/vectorstores/zep",
new: "@langchain/community/vectorstores/zep",
namedImport: null,
},
{
old: "langchain/vectorstores/xata",
new: "@langchain/community/vectorstores/xata",
namedImport: null,
},
{
old: "langchain/vectorstores/weaviate",
new: "@langchain/community/vectorstores/weaviate",
namedImport: null,
},
{
old: "langchain/vectorstores/voy",
new: "@langchain/community/vectorstores/voy",
namedImport: null,
},
{
old: "langchain/vectorstores/vercel_postgres",
new: "@langchain/community/vectorstores/vercel_postgres",
namedImport: null,
},
{
old: "langchain/vectorstores/vectara",
new: "@langchain/community/vectorstores/vectara",
namedImport: null,
},
{
old: "langchain/vectorstores/usearch",
new: "@langchain/community/vectorstores/usearch",
namedImport: null,
},
{
old: "langchain/vectorstores/typesense",
new: "@langchain/community/vectorstores/typesense",
namedImport: null,
},
{
old: "langchain/vectorstores/typeorm",
new: "@langchain/community/vectorstores/typeorm",
namedImport: null,
},
{
old: "langchain/vectorstores/tigris",
new: "@langchain/community/vectorstores/tigris",
namedImport: null,
},
{
old: "langchain/vectorstores/supabase",
new: "@langchain/community/vectorstores/supabase",
namedImport: null,
},
{
old: "langchain/vectorstores/singlestore",
new: "@langchain/community/vectorstores/singlestore",
namedImport: null,
},
{
old: "langchain/vectorstores/rockset",
new: "@langchain/community/vectorstores/rockset",
namedImport: null,
},
{
old: "langchain/vectorstores/redis",
new: "@langchain/community/vectorstores/redis",
namedImport: null,
},
{
old: "langchain/vectorstores/qdrant",
new: "@langchain/community/vectorstores/qdrant",
namedImport: null,
},
{
old: "langchain/vectorstores/prisma",
new: "@langchain/community/vectorstores/prisma",
namedImport: null,
},
{
old: "langchain/vectorstores/pinecone",
new: "@langchain/community/vectorstores/pinecone",
namedImport: null,
},
{
old: "langchain/vectorstores/pgvector",
new: "@langchain/community/vectorstores/pgvector",
namedImport: null,
},
{
old: "langchain/vectorstores/opensearch",
new: "@langchain/community/vectorstores/opensearch",
namedImport: null,
},
{
old: "langchain/vectorstores/neo4j_vector",
new: "@langchain/community/vectorstores/neo4j_vector",
namedImport: null,
},
{
old: "langchain/vectorstores/myscale",
new: "@langchain/community/vectorstores/myscale",
namedImport: null,
},
{
old: "langchain/vectorstores/mongodb_atlas",
new: "@langchain/community/vectorstores/mongodb_atlas",
namedImport: null,
},
{
old: "langchain/vectorstores/momento_vector_/*",
new: "@langchain/community/vectorstores/momento_vector_index",
namedImport: null,
},
{
old: "langchain/vectorstores/milvus",
new: "@langchain/community/vectorstores/milvus",
namedImport: null,
},
{
old: "langchain/vectorstores/lancedb",
new: "@langchain/community/vectorstores/lancedb",
namedImport: null,
},
{
old: "langchain/vectorstores/hnswlib",
new: "@langchain/community/vectorstores/hnswlib",
namedImport: null,
},
{
old: "langchain/vectorstores/googlevertexai",
new: "@langchain/community/vectorstores/googlevertexai",
namedImport: null,
},
{
old: "langchain/vectorstores/faiss",
new: "@langchain/community/vectorstores/faiss",
namedImport: null,
},
{
old: "langchain/vectorstores/elasticsearch",
new: "@langchain/community/vectorstores/elasticsearch",
namedImport: null,
},
{
old: "langchain/vectorstores/convex",
new: "@langchain/community/vectorstores/convex",
namedImport: null,
},
{
old: "langchain/vectorstores/cloudflare_vectorize",
new: "@langchain/community/vectorstores/cloudflare_vectorize",
namedImport: null,
},
{
old: "langchain/vectorstores/clickhouse",
new: "@langchain/community/vectorstores/clickhouse",
namedImport: null,
},
{
old: "langchain/vectorstores/chroma",
new: "@langchain/community/vectorstores/chroma",
namedImport: null,
},
{
old: "langchain/vectorstores/cassandra",
new: "@langchain/community/vectorstores/cassandra",
namedImport: null,
},
{
old: "langchain/vectorstores/base",
new: "@langchain/core/vectorstores",
namedImport: null,
},
{
old: "langchain/vectorstores/analyticdb",
new: "@langchain/community/vectorstores/analyticdb",
namedImport: null,
},
{
old: "langchain/vectorstores/closevector/node",
new: "@langchain/community/vectorstores/closevector/node",
namedImport: null,
},
{
old: "langchain/util/tiktoken",
new: "@langchain/core/utils/tiktoken",
namedImport: null,
},
{
old: "langchain/util/stream",
new: "@langchain/core/utils/stream",
namedImport: null,
},
{
old: "langchain/util/math",
new: "@langchain/core/utils/math",
namedImport: null,
},
{
old: "langchain/util/event-source-parse",
new: "@langchain/community/utils/event_source_parse",
namedImport: null,
},
{
old: "langchain/util/convex",
new: "@langchain/community/util/convex",
namedImport: null,
},
{
old: "langchain/util/async_caller",
new: "@langchain/core/utils/async_caller",
namedImport: null,
},
{
old: "langchain/tools/wolframalpha",
new: "@langchain/community/tools/wolframalpha",
namedImport: null,
},
{
old: "langchain/tools/wikipedia_query_run",
new: "@langchain/community/tools/wikipedia_query_run",
namedImport: null,
},
{
old: "langchain/tools/serper",
new: "@langchain/community/tools/serper",
namedImport: null,
},
{
old: "langchain/tools/serpapi",
new: "@langchain/community/tools/serpapi",
namedImport: null,
},
{
old: "langchain/tools/searxng_search",
new: "@langchain/community/tools/searxng_search",
namedImport: null,
},
{
old: "langchain/tools/searchapi",
new: "@langchain/community/tools/searchapi",
namedImport: null,
},
{
old: "langchain/tools/google_places",
new: "@langchain/community/tools/google_places",
namedImport: null,
},
{
old: "langchain/tools/google_custom_search",
new: "@langchain/community/tools/google_custom_search",
namedImport: null,
},
{
old: "langchain/tools/dynamic",
new: "@langchain/community/tools/dynamic",
namedImport: null,
},
{
old: "langchain/tools/dataforseo_api_search",
new: "@langchain/community/tools/dataforseo_api_search",
namedImport: null,
},
{
old: "langchain/tools/dadjokeapi",
new: "@langchain/community/tools/dadjokeapi",
namedImport: null,
},
{
old: "langchain/tools/connery",
new: "@langchain/community/tools/connery",
namedImport: null,
},
{
old: "langchain/tools/calculator",
new: "@langchain/community/tools/calculator",
namedImport: null,
},
{
old: "langchain/tools/brave_search",
new: "@langchain/community/tools/brave_search",
namedImport: null,
},
{
old: "langchain/tools/bingserpapi",
new: "@langchain/community/tools/bingserpapi",
namedImport: null,
},
{
old: "langchain/tools/aws_sfn",
new: "@langchain/community/tools/aws_sfn",
namedImport: null,
},
{
old: "langchain/tools/aws_lambda",
new: "@langchain/community/tools/aws_lambda",
namedImport: null,
},
{
old: "langchain/tools/aiplugin",
new: "@langchain/community/tools/aiplugin",
namedImport: null,
},
{
old: "langchain/tools/IFTTTWebhook",
new: "@langchain/community/tools/ifttt",
namedImport: null,
},
{
old: "langchain/tools/gmail/*",
new: "@langchain/community/tools/gmail",
namedImport: null,
},
{
old: "langchain/stores/message/xata",
new: "@langchain/community/stores/message/xata",
namedImport: null,
},
{
old: "langchain/stores/message/upstash_redis",
new: "@langchain/community/stores/message/upstash_redis",
namedImport: null,
},
{
old: "langchain/stores/message/redis",
new: "@langchain/community/stores/message/redis",
namedImport: null,
},
{
old: "langchain/stores/message/planetscale",
new: "@langchain/community/stores/message/planetscale",
namedImport: null,
},
{
old: "langchain/stores/message/mongodb",
new: "@langchain/community/stores/message/mongodb",
namedImport: null,
},
{
old: "langchain/stores/message/momento",
new: "@langchain/community/stores/message/momento",
namedImport: null,
},
{
old: "langchain/stores/message/ioredis",
new: "@langchain/community/stores/message/ioredis",
namedImport: null,
},
{
old: "langchain/stores/message/in_memory",
new: "@langchain/community/stores/message/in_memory",
namedImport: null,
},
{
old: "langchain/stores/message/firestore",
new: "@langchain/community/stores/message/firestore",
namedImport: null,
},
{
old: "langchain/stores/message/dynamodb",
new: "@langchain/community/stores/message/dynamodb",
namedImport: null,
},
{
old: "langchain/stores/message/convex",
new: "@langchain/community/stores/message/convex",
namedImport: null,
},
{
old: "langchain/stores/message/cloudflare_d1",
new: "@langchain/community/stores/message/cloudflare_d1",
namedImport: null,
},
{
old: "langchain/stores/message/cassandra",
new: "@langchain/community/stores/message/cassandra",
namedImport: null,
},
{
old: "langchain/stores/doc/in_memory",
new: "@langchain/community/stores/doc/in_memory",
namedImport: null,
},
{
old: "langchain/storage/vercel_kv",
new: "@langchain/community/storage/vercel_kv",
namedImport: null,
},
{
old: "langchain/storage/upstash_redis",
new: "@langchain/community/storage/upstash_redis",
namedImport: null,
},
{
old: "langchain/storage/ioredis",
new: "@langchain/community/storage/ioredis",
namedImport: null,
},
{
old: "langchain/storage/convex",
new: "@langchain/community/storage/convex",
namedImport: null,
},
{
old: "langchain/schema/storage",
new: "@langchain/core/stores",
namedImport: null,
},
{
old: "langchain/schema/retriever",
new: "@langchain/core/retrievers",
namedImport: null,
},
{
old: "langchain/schema/output_parser",
new: "@langchain/core/output_parsers",
namedImport: null,
},
{
old: "langchain/schema/tests/lib",
new: "@langchain/core/utils/testing",
namedImport: null,
},
{
old: "langchain/schema/runnable/*",
new: "@langchain/core/runnables",
namedImport: null,
},
{
old: "langchain/runnables/remote",
new: "@langchain/core/runnables/remote",
namedImport: null,
},
{
old: "langchain/runnables/*",
new: "@langchain/core/runnables",
namedImport: null,
},
{
old: "langchain/retrievers/zep",
new: "@langchain/community/retrievers/zep",
namedImport: null,
},
{
old: "langchain/retrievers/vespa",
new: "@langchain/community/retrievers/vespa",
namedImport: null,
},
{
old: "langchain/retrievers/vectara_summary",
new: "@langchain/community/retrievers/vectara_summary",
namedImport: null,
},
{
old: "langchain/retrievers/tavily_search_api",
new: "@langchain/community/retrievers/tavily_search_api",
namedImport: null,
},
{
old: "langchain/retrievers/supabase",
new: "@langchain/community/retrievers/supabase",
namedImport: null,
},
{
old: "langchain/retrievers/metal",
new: "@langchain/community/retrievers/metal",
namedImport: null,
},
{
old: "langchain/retrievers/databerry",
new: "@langchain/community/retrievers/databerry",
namedImport: null,
},
{
old: "langchain/retrievers/chaindesk",
new: "@langchain/community/retrievers/chaindesk",
namedImport: null,
},
{
old: "langchain/retrievers/amazon_kendra",
new: "@langchain/community/retrievers/amazon_kendra",
namedImport: null,
},
{
old: "langchain/prompts/selectors/SemanticSimilarityExampleSelector",
new: "@langchain/core/example_selectors",
namedImport: null,
},
{
old: "langchain/memory/zep",
new: "@langchain/community/memory/zep",
namedImport: null,
},
{
old: "langchain/memory/motorhead_memory",
new: "@langchain/community/memory/motorhead_memory",
namedImport: null,
},
{
old: "langchain/memory/chat_memory",
new: "@langchain/community/memory/chat_memory",
namedImport: null,
},
{
old: "langchain/memory/base",
new: "@langchain/core/memory",
namedImport: null,
},
{
old: "langchain/load/serializable",
new: "@langchain/core/load/serializable",
namedImport: null,
},
{
old: "langchain/llms/yandex",
new: "@langchain/community/llms/yandex",
namedImport: null,
},
{
old: "langchain/llms/writer",
new: "@langchain/community/llms/writer",
namedImport: null,
},
{
old: "langchain/llms/watsonx_ai",
new: "@langchain/community/llms/watsonx_ai",
namedImport: null,
},
{
old: "langchain/llms/sagemaker_endpoint",
new: "@langchain/community/llms/sagemaker_endpoint",
namedImport: null,
},
{
old: "langchain/llms/replicate",
new: "@langchain/community/llms/replicate",
namedImport: null,
},
{
old: "langchain/llms/raycast",
new: "@langchain/community/llms/raycast",
namedImport: null,
},
{
old: "langchain/llms/portkey",
new: "@langchain/community/llms/portkey",
namedImport: null,
},
{
old: "langchain/llms/ollama",
new: "@langchain/community/llms/ollama",
namedImport: null,
},
{
old: "langchain/llms/llama_cpp",
new: "@langchain/community/llms/llama_cpp",
namedImport: null,
},
{
old: "langchain/llms/hf",
new: "@langchain/community/llms/hf",
namedImport: null,
},
{
old: "langchain/llms/gradient_ai",
new: "@langchain/community/llms/gradient_ai",
namedImport: null,
},
{
old: "langchain/llms/googlepalm",
new: "@langchain/community/llms/googlepalm",
namedImport: null,
},
{
old: "langchain/llms/fireworks",
new: "@langchain/community/llms/fireworks",
namedImport: null,
},
{
old: "langchain/llms/cohere",
new: "@langchain/community/llms/cohere",
namedImport: null,
},
{
old: "langchain/llms/cloudflare_workersai",
new: "@langchain/community/llms/cloudflare_workersai",
namedImport: null,
},
{
old: "langchain/llms/base",
new: "@langchain/core/language_models/llms",
namedImport: null,
},
{
old: "langchain/llms/aleph_alpha",
new: "@langchain/community/llms/aleph_alpha",
namedImport: null,
},
{
old: "langchain/llms/ai21",
new: "@langchain/community/llms/ai21",
namedImport: null,
},
{
old: "langchain/llms/googlevertexai",
new: "@langchain/community/llms/googlevertexai",
namedImport: null,
},
{
old: "langchain/llms/bedrock/web",
new: "@langchain/community/llms/bedrock/web",
namedImport: null,
},
{
old: "langchain/graphs/neo4j_graph",
new: "@langchain/community/graphs/neo4j_graph",
namedImport: null,
},
{
old: "langchain/embeddings/voyage",
new: "@langchain/community/embeddings/voyage",
namedImport: null,
},
{
old: "langchain/embeddings/tensorflow",
new: "@langchain/community/embeddings/tensorflow",
namedImport: null,
},
{
old: "langchain/embeddings/ollama",
new: "@langchain/community/embeddings/ollama",
namedImport: null,
},
{
old: "langchain/embeddings/minimax",
new: "@langchain/community/embeddings/minimax",
namedImport: null,
},
{
old: "langchain/embeddings/llama_cpp",
new: "@langchain/community/embeddings/llama_cpp",
namedImport: null,
},
{
old: "langchain/embeddings/hf_transformers",
new: "@langchain/community/embeddings/hf_transformers",
namedImport: null,
},
{
old: "langchain/embeddings/hf",
new: "@langchain/community/embeddings/hf",
namedImport: null,
},
{
old: "langchain/embeddings/gradient_ai",
new: "@langchain/community/embeddings/gradient_ai",
namedImport: null,
},
{
old: "langchain/embeddings/googlevertexai",
new: "@langchain/community/embeddings/googlevertexai",
namedImport: null,
},
{
old: "langchain/embeddings/googlepalm",
new: "@langchain/community/embeddings/googlepalm",
namedImport: null,
},
{
old: "langchain/embeddings/fake",
new: "@langchain/core/utils/testing",
namedImport: null,
},
{
old: "langchain/embeddings/cohere",
new: "@langchain/community/embeddings/cohere",
namedImport: null,
},
{
old: "langchain/embeddings/cloudflare_workersai",
new: "@langchain/community/embeddings/cloudflare_workersai",
namedImport: null,
},
{
old: "langchain/embeddings/bedrock",
new: "@langchain/community/embeddings/bedrock",
namedImport: null,
},
{
old: "langchain/embeddings/base",
new: "@langchain/core/embeddings",
namedImport: null,
},
{
old: "langchain/document_transformers/mozilla_readability",
new: "@langchain/community/document_transformers/mozilla_readability",
namedImport: null,
},
{
old: "langchain/document_transformers/html_to_text",
new: "@langchain/community/document_transformers/html_to_text",
namedImport: null,
},
{
old: "langchain/chat_models/yandex",
new: "@langchain/community/chat_models/yandex",
namedImport: null,
},
{
old: "langchain/chat_models/portkey",
new: "@langchain/community/chat_models/portkey",
namedImport: null,
},
{
old: "langchain/chat_models/ollama",
new: "@langchain/community/chat_models/ollama",
namedImport: null,
},
{
old: "langchain/chat_models/minimax",
new: "@langchain/community/chat_models/minimax",
namedImport: null,
},
{
old: "langchain/chat_models/llama_cpp",
new: "@langchain/community/chat_models/llama_cpp",
namedImport: null,
},
{
old: "langchain/chat_models/googlepalm",
new: "@langchain/community/chat_models/googlepalm",
namedImport: null,
},
{
old: "langchain/chat_models/fireworks",
new: "@langchain/community/chat_models/fireworks",
namedImport: null,
},
{
old: "langchain/chat_models/cloudflare_workersai",
new: "@langchain/community/chat_models/cloudflare_workersai",
namedImport: null,
},
{
old: "langchain/chat_models/base",
new: "@langchain/core/language_models/chat_models",
namedImport: null,
},
{
old: "langchain/chat_models/baiduwenxin",
new: "@langchain/community/chat_models/baiduwenxin",
namedImport: null,
},
{
old: "langchain/chat_models/iflytek_xinghuo",
new: "@langchain/community/chat_models/iflytek_xinghuo",
namedImport: null,
},
{
old: "langchain/chat_models/googlevertexai",
new: "@langchain/community/chat_models/googlevertexai",
namedImport: null,
},
{
old: "langchain/chat_models/googlevertexai",
new: "@langchain/community/chat_models/googlevertexai",
namedImport: null,
},
{
old: "langchain/chat_models/bedrock",
new: "@langchain/community/chat_models/bedrock",
namedImport: null,
},
{
old: "langchain/chains/query_constructor/ir",
new: "@langchain/core/structured_query",
namedImport: null,
},
{
old: "langchain/callbacks/promises",
new: "@langchain/core/callbacks/promises",
namedImport: null,
},
{
old: "langchain/callbacks/manager",
new: "@langchain/core/callbacks/manager",
namedImport: null,
},
{
old: "langchain/callbacks/base",
new: "@langchain/core/callbacks/base",
namedImport: null,
},
{
old: "langchain/callbacks/handlers/tracer_langchain",
new: "@langchain/core/tracers/tracer_langchain",
namedImport: null,
},
{
old: "langchain/callbacks/handlers/tracer",
new: "@langchain/core/tracers/base",
namedImport: null,
},
{
old: "langchain/callbacks/handlers/run_collector",
new: "@langchain/core/tracers/run_collector",
namedImport: null,
},
{
old: "langchain/callbacks/handlers/log_stream",
new: "@langchain/core/tracers/log_stream",
namedImport: null,
},
{
old: "langchain/callbacks/handlers/llmonitor",
new: "@langchain/community/callbacks/handlers/llmonitor",
namedImport: null,
},
{
old: "langchain/callbacks/handlers/initialize",
new: "@langchain/core/tracers/initialize",
namedImport: null,
},
{
old: "langchain/callbacks/handlers/console",
new: "@langchain/core/tracers/console",
namedImport: null,
},
{
old: "langchain/cache/upstash_redis",
new: "@langchain/community/caches/upstash_redis",
namedImport: null,
},
{
old: "langchain/cache/momento",
new: "@langchain/community/caches/momento",
namedImport: null,
},
{
old: "langchain/cache/ioredis",
new: "@langchain/community/caches/ioredis",
namedImport: null,
},
{
old: "langchain/cache/cloudflare_kv",
new: "@langchain/community/caches/cloudflare_kv",
namedImport: null,
},
{
old: "langchain/cache/base",
new: "@langchain/core/caches",
namedImport: null,
},
{
old: "langchain/agents/toolkits/base",
new: "@langchain/community/agents/toolkits/base",
namedImport: null,
},
{
old: "langchain/agents/toolkits/connery/*",
new: "@langchain/community/agents/toolkits/connery",
namedImport: null,
},
{
old: "langchain/experimental/chat_models/ollama_functions",
new: "@langchain/community/experimental/chat_models/ollama_functions",
namedImport: null,
},
{
old: "langchain/experimental/multimodal_embeddings/googlevertexai",
new: "@langchain/community/experimental/multimodal_embeddings/googlevertexai",
namedImport: null,
},
];
|
0 | lc_public_repos/langchainjs/libs/langchain-scripts/src | lc_public_repos/langchainjs/libs/langchain-scripts/src/tests/check_broken_links.int.test.ts | import { test, expect } from "@jest/globals";
import { checkBrokenLinks } from "../check_broken_links.js";
test("Can load mdx file and find broken links", async () => {
const pathToMdxFiles = "./src/tests/__mdx__/";
await expect(
checkBrokenLinks(pathToMdxFiles, { logErrors: true })
).rejects.toThrow();
});
|
0 | lc_public_repos/langchainjs/libs/langchain-scripts/src | lc_public_repos/langchainjs/libs/langchain-scripts/src/tests/config_file.test.ts | import { test, expect } from "@jest/globals";
import { _verifyObjectIsLangChainConfig } from "../build/utils.js";
test("_verifyObjectIsLangChainConfig successfully fails an invalid object", async () => {
const { config } = await import("./langchain.invalid.config.js");
const isValid = _verifyObjectIsLangChainConfig(config);
expect(isValid).toBe(false);
});
test("_verifyObjectIsLangChainConfig successfully passes a valid object", async () => {
const { config } = await import("./langchain.valid.config.js");
const isValid = _verifyObjectIsLangChainConfig(config);
expect(isValid).toBe(true);
});
|
0 | lc_public_repos/langchainjs/libs/langchain-scripts/src | lc_public_repos/langchainjs/libs/langchain-scripts/src/tests/check_broken_links.test.ts | import { test, expect } from "@jest/globals";
import { extractLinks } from "../check_broken_links.js";
test("Regex can find links in md files", () => {
const link1 =
"https://console.anthropic.com/workbench/2812bee0-2333-42cb-876e-6a5a5aab035a";
const link2 =
"https://js.langchain.com/docs/get_started/installation#installing-integration-packages";
const link3 = "https://www.doordash.com/cart/";
const mdWithLinks = `---
title: Function calling
---
# Function calling
A growing number of chat models, like
[OpenAI](${link1}),
[Mistral](${link2}),
etc., have a function-calling API that lets you describe functions and
their arguments, and have the model return a JSON object with a function
to invoke and the inputs to that function. Function-calling is extremely
useful for building [tool-using chains and
agents](${link3}), and for getting
structured outputs from models more generally.`;
const links = extractLinks(mdWithLinks);
// console.log(links);
expect(links).toEqual([link1, link2, link3]);
});
|
0 | lc_public_repos/langchainjs/libs/langchain-scripts/src | lc_public_repos/langchainjs/libs/langchain-scripts/src/tests/langchain.valid.config.js | export const config = {
entrypoints: {
agents: "src/agents/index.ts",
utils: "src/utils/index.ts",
},
tsConfigPath: "tsconfig.json",
cjsSource: "build/cjs",
cjsDestination: "dist/cjs",
abs: (relativePath) => `/absolute/path/${relativePath}`,
requiresOptionalDependency: ["some-optional-package"],
deprecatedNodeOnly: ["old-node-only-feature"],
deprecatedOmitFromImportMap: ["redundant-feature"],
packageSuffix: "community",
shouldTestExports: true,
extraImportMapEntries: [
{
modules: ["extra-module"],
alias: ["extra-alias"],
path: "extra/path",
},
],
gitignorePaths: ["node_modules", "dist", ".yarn"],
internals: [/^internal-regex/],
};
|
0 | lc_public_repos/langchainjs/libs/langchain-scripts/src | lc_public_repos/langchainjs/libs/langchain-scripts/src/tests/langchain.invalid.config.js | export const config = {
entrypoints: "should be an object, not a string",
tsConfigPath: 123, // should be a string
cjsSource: null, // should be a string
cjsDestination: true, // should be a string
abs: "should be a function, not a string",
requiresOptionalDependency: "should be an array, not a string",
deprecatedNodeOnly: [123], // array elements should be strings
deprecatedOmitFromImportMap: [null], // array elements should be strings
packageSuffix: 456, // should be a string
shouldTestExports: "should be a boolean, not a string",
extraImportMapEntries: "should be an array, not a string",
gitignorePaths: [789], // array elements should be strings
internals: "should be an array, not a string",
};
|
0 | lc_public_repos/langchainjs/libs/langchain-scripts/src/tests/__mdx__ | lc_public_repos/langchainjs/libs/langchain-scripts/src/tests/__mdx__/modules/index.mdx | ---
sidebar_position: 1
---
# Installation
## Supported Environments
LangChain is written in TypeScript and can be used in:
- Node.js (ESM and CommonJS) - 18.x, 19.x, 20.x
- Cloudflare Workers
- Vercel / Next.js (Browser, Serverless and Edge functions)
- Supabase Edge Functions
- Browser
- Deno
- Bun
However, note that individual integrations may not be supported in all environments.
## Installation
To get started, install LangChain with the following command:
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
import CodeBlock from "@theme/CodeBlock";
<Tabs>
<TabItem value="npm" label="npm" default>
<CodeBlock language="bash">npm install -S langchain</CodeBlock>
</TabItem>
<TabItem value="yarn" label="Yarn">
<CodeBlock language="bash">yarn add langchain</CodeBlock>
</TabItem>
<TabItem value="pnpm" label="pnpm">
<CodeBlock language="bash">pnpm add langchain</CodeBlock>
</TabItem>
</Tabs>
### TypeScript
LangChain is written in TypeScript and provides type definitions for all of its public APIs.
## Installing integration packages
LangChain supports packages that contain specific module integrations with third-party providers.
They can be as specific as [`@langchain/google-genai`](/docs/integrations/platforms/google#chatgooglegenerativeai), which contains integrations just for Google AI Studio models,
or as broad as [`@langchain/community`](https://brokenLinkYo.ai), which contains broader variety of community contributed integrations.
These packages, as well as the main LangChain package, all depend on [`@langchain/core`](https://www.npmjs.com/package/@langchain/core), which contains the base abstractions
that these integration packages extend.
To ensure that all integrations and their types interact with each other properly, it is important that they all use the same version of `@langchain/core`.
The best way to guarantee this is to add a `"resolutions"` or `"overrides"` field like the following in your project's `package.json`. The name will depend on your package manager:
If you are using `yarn`:
```json title="yarn package.json"
{
"name": "your-project",
"version": "0.0.0",
"private": true,
"engines": {
"node": ">=18"
},
"dependencies": {
"@langchain/google-genai": "^0.0.2",
"@langchain/core": "^0.3.0"
},
"resolutions": {
"@langchain/core": "0.3.0"
}
}
```
Or for `npm`:
```json title="npm package.json"
{
"name": "your-project",
"version": "0.0.0",
"private": true,
"engines": {
"node": ">=18"
},
"dependencies": {
"@langchain/google-genai": "^0.0.2",
"@langchain/core": "^0.3.0"
},
"overrides": {
"@langchain/core": "0.3.0"
}
}
```
Or for `pnpm`:
```json title="pnpm package.json"
{
"name": "your-project",
"version": "0.0.0",
"private": true,
"engines": {
"node": ">=18"
},
"dependencies": {
"@langchain/google-genai": "^0.0.2",
"@langchain/core": "^0.3.0"
},
"pnpm": {
"overrides": {
"@langchain/core": "0.3.0"
}
}
}
```
### @langchain/community
The [@langchain/community](https://www.npmjs.com/package/@langchain/community) package contains third-party integrations.
It is automatically installed along with `langchain`, but can also be used separately with just `@langchain/core`. Install with:
```bash npm2yarn
npm install @langchain/community @langchain/core
```
### @langchain/core
The [@langchain/core](https://www.npmjs.com/package/@langchain/core) package contains base abstractions that the rest of the LangChain ecosystem uses, along with the LangChain Expression Language.
It is automatically installed along with `langchain`, but can also be used separately. Install with:
```bash npm2yarn
npm install @langchain/core
```
import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx";
<IntegrationInstallTooltip></IntegrationInstallTooltip>
## Loading the library
### ESM
LangChain provides an ESM build targeting Node.js environments. You can import it using the following syntax:
```bash npm2yarn
npm install @langchain/openai @langchain/core
```
```typescript
import { ChatOpenAI } from "@langchain/openai";
```
If you are using TypeScript in an ESM project we suggest updating your `tsconfig.json` to include the following:
```json title="tsconfig.json"
{
"compilerOptions": {
...
"target": "ES2020", // or higher
"module": "nodenext",
}
}
```
### CommonJS
LangChain provides a CommonJS build targeting Node.js environments. You can import it using the following syntax:
```typescript
const { ChatOpenAI } = require("@langchain/openai");
```
### Cloudflare Workers
LangChain can be used in Cloudflare Workers. You can import it using the following syntax:
```typescript
import { ChatOpenAI } from "@langchain/openai";
```
### Vercel / Next.js
LangChain can be used in Vercel / Next.js. We support using LangChain in frontend components, in Serverless functions and in Edge functions. You can import it using the following syntax:
```typescript
import { ChatOpenAI } from "@langchain/openai";
```
### Deno / Supabase Edge Functions
LangChain can be used in Deno / Supabase Edge Functions. You can import it using the following syntax:
```typescript
import { ChatOpenAI } from "https://esm.sh/@langchain/openai";
```
or
```typescript
import { ChatOpenAI } from "npm:@langchain/openai";
```
We recommend looking at our [Supabase Template](https://github.com/langchain-ai/langchain-template-supabase) for an example of how to use LangChain in Supabase Edge Functions.
### Browser
LangChain can be used in the browser. In our CI we test bundling LangChain with Webpack and Vite, but other bundlers should work too. You can import it using the following syntax:
```typescript
import { ChatOpenAI } from "@langchain/openai";
```
## Unsupported: Node.js 16
We do not support Node.js 16, but if you still want to run LangChain on Node.js 16, you will need to follow the instructions in this section. We do not guarantee that these instructions will continue to work in the future.
You will have to make `fetch` available globally, either:
- run your application with `NODE_OPTIONS='--experimental-fetch' node ...`, or
- install `node-fetch` and follow the instructions [here](https://github.com/node-fetch/node-fetch#providing-global-access)
You'll also need to [polyfill `ReadableStream`](https://www.npmjs.com/package/web-streams-polyfill) by installing:
```bash npm2yarn
npm i web-streams-polyfill@4
```
And then adding it to the global namespace in your main entrypoint:
```typescript
import "web-streams-polyfill/polyfill";
```
Additionally you'll have to polyfill `structuredClone`, eg. by installing `core-js` and following the instructions [here](https://github.com/zloirock/core-js).
If you are running Node.js 18+, you do not need to do anything.
|
0 | lc_public_repos/langchainjs/libs/langchain-scripts/src/tests/__mdx__ | lc_public_repos/langchainjs/libs/langchain-scripts/src/tests/__mdx__/modules/two.mdx | ---
sidebar_position: 1
---
# Installation
## Supported Environments
LangChain is written in TypeScript and can be used in:
- Node.js (ESM and CommonJS) - 18.x, 19.x, 20.x
- Cloudflare Workers
- Vercel / Next.js (Browser, Serverless and Edge functions)
- Supabase Edge Functions
- Browser
- Deno
- Bun
However, note that individual integrations may not be supported in all environments.
## Installation
To get started, install LangChain with the following command:
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
import CodeBlock from "@theme/CodeBlock";
<Tabs>
<TabItem value="npm" label="npm" default>
<CodeBlock language="bash">npm install -S langchain</CodeBlock>
</TabItem>
<TabItem value="yarn" label="Yarn">
<CodeBlock language="bash">yarn add langchain</CodeBlock>
</TabItem>
<TabItem value="pnpm" label="pnpm">
<CodeBlock language="bash">pnpm add langchain</CodeBlock>
</TabItem>
</Tabs>
### TypeScript
LangChain is written in TypeScript and provides type definitions for all of its public APIs.
## Installing integration packages
LangChain supports packages that contain specific module integrations with third-party providers.
They can be as specific as [`@langchain/google-genai`](/docs/integrations/platforms/google#chatgooglegenerativeai), which contains integrations just for Google AI Studio models,
These packages, as well as the main LangChain package, all depend on [`@langchain/core`](https://www.npmjs.com/package/@langchain/core), which contains the base abstractions
that these integration packages extend.
To ensure that all integrations and their types interact with each other properly, it is important that they all use the same version of `@langchain/core`.
The best way to guarantee this is to add a `"resolutions"` or `"overrides"` field like the following in your project's `package.json`. The name will depend on your package manager:
If you are using `yarn`:
```json title="yarn package.json"
{
"name": "your-project",
"version": "0.0.0",
"private": true,
"engines": {
"node": ">=18"
},
"dependencies": {
"@langchain/google-genai": "^0.0.2",
"@langchain/core": "^0.3.0"
},
"resolutions": {
"@langchain/core": "0.3.0"
}
}
```
Or for `npm`:
```json title="npm package.json"
{
"name": "your-project",
"version": "0.0.0",
"private": true,
"engines": {
"node": ">=18"
},
"dependencies": {
"@langchain/google-genai": "^0.0.2",
"@langchain/core": "^0.3.0"
},
"overrides": {
"@langchain/core": "0.3.0"
}
}
```
Or for `pnpm`:
```json title="pnpm package.json"
{
"name": "your-project",
"version": "0.0.0",
"private": true,
"engines": {
"node": ">=18"
},
"dependencies": {
"@langchain/google-genai": "^0.0.2",
"langchain": "0.0.207"
"@langchain/core": "^0.3.0"
"pnpm": {
"overrides": {
"@langchain/core": "0.3.0"
}
}
}
```
### @langchain/community
The [@langchain/community](https://www.npmjs.com/package/@langchain/community) package contains third-party integrations.
It is automatically installed along with `langchain`, but can also be used separately with just `@langchain/core`. Install with:
```bash npm2yarn
npm install @langchain/community @langchain/core
```
### @langchain/core
The [@langchain/core](https://www.npmjs.com/package/@langchain/core) package contains base abstractions that the rest of the LangChain ecosystem uses, along with the LangChain Expression Language.
It is automatically installed along with `langchain`, but can also be used separately. Install with:
```bash npm2yarn
npm install @langchain/core
```
import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx";
<IntegrationInstallTooltip></IntegrationInstallTooltip>
## Loading the library
### ESM
LangChain provides an ESM build targeting Node.js environments. You can import it using the following syntax:
```bash npm2yarn
npm install @langchain/openai @langchain/core
```
```typescript
import { ChatOpenAI } from "@langchain/openai";
```
If you are using TypeScript in an ESM project we suggest updating your `tsconfig.json` to include the following:
```json title="tsconfig.json"
{
"compilerOptions": {
...
"target": "ES2020", // or higher
"module": "nodenext",
}
}
```
### CommonJS
LangChain provides a CommonJS build targeting Node.js environments. You can import it using the following syntax:
```typescript
const { ChatOpenAI } = require("@langchain/openai");
```
### Cloudflare Workers
LangChain can be used in Cloudflare Workers. You can import it using the following syntax:
```typescript
import { ChatOpenAI } from "@langchain/openai";
```
### Vercel / Next.js
LangChain can be used in Vercel / Next.js. We support using LangChain in frontend components, in Serverless functions and in Edge functions. You can import it using the following syntax:
```typescript
import { ChatOpenAI } from "@langchain/openai";
```
### Deno / Supabase Edge Functions
LangChain can be used in Deno / Supabase Edge Functions. You can import it using the following syntax:
```typescript
import { ChatOpenAI } from "https://esm.sh/@langchain/openai";
```
or
```typescript
import { ChatOpenAI } from "npm:@langchain/openai";
```
We recommend looking at our [Supabase Template](https://github.com/langchain-ai/langchain-template-supabase) for an example of how to use LangChain in Supabase Edge Functions.
### Browser
LangChain can be used in the browser. In our CI we test bundling LangChain with Webpack and Vite, but other bundlers should work too. You can import it using the following syntax:
```typescript
import { ChatOpenAI } from "@langchain/openai";
```
## Unsupported: Node.js 16
We do not support Node.js 16, but if you still want to run LangChain on Node.js 16, you will need to follow the instructions in this section. We do not guarantee that these instructions will continue to work in the future.
You will have to make `fetch` available globally, either:
- run your application with `NODE_OPTIONS='--experimental-fetch' node ...`, or
- install `node-fetch` and follow the instructions [here](https://github.com/node-fetch/node-fetch#providing-global-access)
You'll also need to [polyfill `ReadableStream`](https://www.npmjs.com/package/web-streams-polyfill) by installing:
```bash npm2yarn
npm i web-streams-polyfill@4
```
And then adding it to the global namespace in your main entrypoint:
```typescript
import "web-streams-polyfill/polyfill";
```
Additionally you'll have to polyfill `structuredClone`, eg. by installing `core-js` and following the instructions [here](https://github.com/zloirock/core-js).
If you are running Node.js 18+, you do not need to do anything.
|
0 | lc_public_repos/langchainjs/libs/langchain-scripts/src | lc_public_repos/langchainjs/libs/langchain-scripts/src/cli/README.md | # Integration doc CLI
Generate integration documentation using the CLI.
## Supported integration types
- Chat models
- LLMs
- Text embeddings
- Retrievers
- Document loaders
## Usage
1. Build the CLI:
```bash
yarn build --filter=@langchain/scripts
```
2. Run the CLI:
```bash
yarn create:integration:doc --classname <Class Name> --type <Type>
```
The `--classname` field should be passed the full class name of the integration, e.g `ChatOpenAI` or `RecursiveUrlLoader`.
The `--type` field should be passed the type of the integration. It must be one of the following:
- `chat`
- `llm`
- `embeddings`
- `retriever`
- `doc_loader`
After invoking the script, you'll be prompted to fill out more integration-specific information.
Finally, the script will log the path of the newly created integration documentation. You should open this notebook, run all the cells, handle and remove any TODOs, and verify all links work as expected.
|
0 | lc_public_repos/langchainjs/libs/langchain-scripts/src | lc_public_repos/langchainjs/libs/langchain-scripts/src/cli/constants.ts | export const SIDEBAR_LABEL_PLACEHOLDER = "__sidebar_label__";
export const MODULE_NAME_PLACEHOLDER = "__module_name__";
export const PACKAGE_NAME_PLACEHOLDER = "__package_name__";
export const FULL_IMPORT_PATH_PLACEHOLDER = "__full_import_path__";
export const ENV_VAR_NAME_PLACEHOLDER = "__env_var_name__";
export const API_REF_MODULE_PLACEHOLDER = "__api_ref_module__";
export const API_REF_PACKAGE_PLACEHOLDER = "__api_ref_package__";
export const PYTHON_DOC_URL_PLACEHOLDER = "__python_doc_url__";
export const SERIALIZABLE_PLACEHOLDER = "__serializable__";
export const LOCAL_PLACEHOLDER = "__local__";
export const PY_SUPPORT_PLACEHOLDER = "__py_support__";
|
0 | lc_public_repos/langchainjs/libs/langchain-scripts/src/cli | lc_public_repos/langchainjs/libs/langchain-scripts/src/cli/docs/tools.ts | import * as path from "node:path";
import * as fs from "node:fs";
import {
boldText,
getUserInput,
greenText,
redBackground,
} from "../utils/get-input.js";
import { fetchURLStatus } from "../utils/fetch-url-status.js";
import {
SIDEBAR_LABEL_PLACEHOLDER,
MODULE_NAME_PLACEHOLDER,
PACKAGE_NAME_PLACEHOLDER,
FULL_IMPORT_PATH_PLACEHOLDER,
PYTHON_DOC_URL_PLACEHOLDER,
API_REF_MODULE_PLACEHOLDER,
API_REF_PACKAGE_PLACEHOLDER,
SERIALIZABLE_PLACEHOLDER,
PY_SUPPORT_PLACEHOLDER,
} from "../constants.js";
const TEMPLATE_PATH = path.resolve("./src/cli/docs/templates/tools.ipynb");
const INTEGRATIONS_DOCS_PATH = path.resolve(
"../../docs/core_docs/docs/integrations/tools"
);
type ExtraFields = {
serializable: boolean;
pySupport: boolean;
fullImportPath: string;
packageName: string;
};
async function promptExtraFields(): Promise<ExtraFields> {
const isSerializable = await getUserInput(
"Does this integration support serializable output? (y/n) ",
undefined,
true
);
const hasPySupport = await getUserInput(
"Does this integration have Python support? (y/n) ",
undefined,
true
);
const importPath = await getUserInput(
"What is the full import path of the integration? (e.g @langchain/community/llms/togetherai) ",
undefined,
true
);
let packageName = "";
if (importPath.startsWith("langchain/")) {
packageName = "langchain";
} else {
packageName = importPath.split("/").slice(0, 2).join("/");
}
const verifyPackageName = await getUserInput(
`Is ${packageName} the correct package name? (y/n) `,
undefined,
true
);
if (verifyPackageName.toLowerCase() === "n") {
packageName = await getUserInput(
"Please enter the full package name (e.g @langchain/community) ",
undefined,
true
);
}
return {
serializable: isSerializable.toLowerCase() === "y",
pySupport: hasPySupport.toLowerCase() === "y",
fullImportPath: importPath,
packageName,
};
}
export async function fillToolIntegrationDocTemplate(fields: {
className: string;
}) {
const sidebarLabel = fields.className.replace("Tool", "");
const pyDocUrl = `https://python.langchain.com/docs/integrations/tools/${sidebarLabel.toLowerCase()}/`;
const extraFields = await promptExtraFields();
const importPathEnding = extraFields.fullImportPath.split("/").pop() ?? "";
const apiRefModuleUrl = `https://api.js.langchain.com/classes/${extraFields.fullImportPath
.replace("@", "")
.replaceAll("/", "_")
.replaceAll("-", "_")}.${fields.className}.html`;
const apiRefPackageUrl = apiRefModuleUrl
.replace("/classes/", "/modules/")
.replace(`.${fields.className}.html`, ".html");
const apiRefUrlSuccesses = await Promise.all([
fetchURLStatus(apiRefModuleUrl),
fetchURLStatus(apiRefPackageUrl),
]);
if (apiRefUrlSuccesses.find((s) => !s)) {
console.warn(
"API ref URLs invalid. Please manually ensure they are correct."
);
}
const docTemplate = (await fs.promises.readFile(TEMPLATE_PATH, "utf-8"))
.replaceAll(SIDEBAR_LABEL_PLACEHOLDER, sidebarLabel)
.replaceAll(MODULE_NAME_PLACEHOLDER, fields.className)
.replaceAll(PACKAGE_NAME_PLACEHOLDER, extraFields.packageName)
.replaceAll(FULL_IMPORT_PATH_PLACEHOLDER, extraFields.fullImportPath)
.replaceAll(PYTHON_DOC_URL_PLACEHOLDER, pyDocUrl)
.replaceAll(API_REF_MODULE_PLACEHOLDER, apiRefModuleUrl)
.replaceAll(API_REF_PACKAGE_PLACEHOLDER, apiRefPackageUrl)
.replaceAll(
SERIALIZABLE_PLACEHOLDER,
extraFields?.serializable ? "beta" : "β"
)
.replaceAll(PY_SUPPORT_PLACEHOLDER, extraFields?.pySupport ? "β
" : "β");
const docPath = path.join(
INTEGRATIONS_DOCS_PATH,
`${importPathEnding}.ipynb`
);
await fs.promises.writeFile(docPath, docTemplate);
const prettyDocPath = docPath.split("docs/core_docs/")[1];
const updatePythonDocUrlText = ` ${redBackground(
"- Update the Python documentation URL with the proper URL."
)}`;
const successText = `\nSuccessfully created new document loader integration doc at ${prettyDocPath}.`;
console.log(
`${greenText(successText)}\n
${boldText("Next steps:")}
${extraFields?.pySupport ? updatePythonDocUrlText : ""}
- Run all code cells in the generated doc to record the outputs.
- Add extra sections on integration specific features.\n`
);
}
|
0 | lc_public_repos/langchainjs/libs/langchain-scripts/src/cli | lc_public_repos/langchainjs/libs/langchain-scripts/src/cli/docs/vectorstores.ts | import * as path from "node:path";
import * as fs from "node:fs";
import { boldText, getUserInput, greenText } from "../utils/get-input.js";
import { fetchURLStatus } from "../utils/fetch-url-status.js";
import {
SIDEBAR_LABEL_PLACEHOLDER,
MODULE_NAME_PLACEHOLDER,
PACKAGE_NAME_PLACEHOLDER,
FULL_IMPORT_PATH_PLACEHOLDER,
ENV_VAR_NAME_PLACEHOLDER,
PYTHON_DOC_URL_PLACEHOLDER,
API_REF_MODULE_PLACEHOLDER,
API_REF_PACKAGE_PLACEHOLDER,
} from "../constants.js";
const TEMPLATE_PATH = path.resolve(
"./src/cli/docs/templates/vectorstores.ipynb"
);
const INTEGRATIONS_DOCS_PATH = path.resolve(
"../../docs/core_docs/docs/integrations/vectorstores"
);
type ExtraFields = {
envVarName: string;
fullImportPath: string;
packageName: string;
};
async function promptExtraFields(fields: {
envVarGuess: string;
}): Promise<ExtraFields> {
const importPath = await getUserInput(
"What is the full import path of the integration? (e.g @langchain/community/llms/togetherai) ",
undefined,
true
);
let packageName = "";
if (importPath.startsWith("langchain/")) {
packageName = "langchain";
} else {
packageName = importPath.split("/").slice(0, 2).join("/");
}
const verifyPackageName = await getUserInput(
`Is ${packageName} the correct package name? (y/n) `,
undefined,
true
);
if (verifyPackageName.toLowerCase() === "n") {
packageName = await getUserInput(
"Please enter the full package name (e.g @langchain/community) ",
undefined,
true
);
}
const isEnvGuessCorrect = await getUserInput(
`Is the environment variable for the API key named ${fields.envVarGuess}? (y/n) `,
undefined,
true
);
let envVarName = fields.envVarGuess;
if (isEnvGuessCorrect.toLowerCase() === "n") {
envVarName = await getUserInput(
"Please enter the correct environment variable name ",
undefined,
true
);
}
return {
envVarName,
fullImportPath: importPath,
packageName,
};
}
export async function fillVectorStoreIntegrationDocTemplate(fields: {
className: string;
}) {
const sidebarLabel = fields.className.replace("VectorStore", "");
const pyDocUrl = `https://python.langchain.com/docs/integrations/vectorstores/${sidebarLabel.toLowerCase()}/`;
let envVarName = `${sidebarLabel.toUpperCase()}_API_KEY`;
const extraFields = await promptExtraFields({
envVarGuess: envVarName,
});
envVarName = extraFields.envVarName;
const importPathEnding = extraFields.fullImportPath.split("/").pop() ?? "";
const apiRefModuleUrl = `https://api.js.langchain.com/classes/${extraFields.fullImportPath
.replace("@", "")
.replaceAll("/", "_")
.replaceAll("-", "_")}.${fields.className}.html`;
const apiRefPackageUrl = apiRefModuleUrl
.replace("/classes/", "/modules/")
.replace(`.${fields.className}.html`, ".html");
const apiRefUrlSuccesses = await Promise.all([
fetchURLStatus(apiRefModuleUrl),
fetchURLStatus(apiRefPackageUrl),
]);
if (apiRefUrlSuccesses.find((s) => !s)) {
console.warn(
"API ref URLs invalid. Please manually ensure they are correct."
);
}
const docTemplate = (await fs.promises.readFile(TEMPLATE_PATH, "utf-8"))
.replaceAll(SIDEBAR_LABEL_PLACEHOLDER, sidebarLabel)
.replaceAll(MODULE_NAME_PLACEHOLDER, fields.className)
.replaceAll(PACKAGE_NAME_PLACEHOLDER, extraFields.packageName)
.replaceAll(FULL_IMPORT_PATH_PLACEHOLDER, extraFields.fullImportPath)
.replaceAll(ENV_VAR_NAME_PLACEHOLDER, envVarName)
.replaceAll(PYTHON_DOC_URL_PLACEHOLDER, pyDocUrl)
.replaceAll(API_REF_MODULE_PLACEHOLDER, apiRefModuleUrl)
.replaceAll(API_REF_PACKAGE_PLACEHOLDER, apiRefPackageUrl);
const docPath = path.join(
INTEGRATIONS_DOCS_PATH,
`${importPathEnding}.ipynb`
);
await fs.promises.writeFile(docPath, docTemplate);
const prettyDocPath = docPath.split("docs/core_docs/")[1];
console.log(
`${greenText(
`\nSuccessfully created new document loader integration doc at ${prettyDocPath}.`
)}\n
${boldText("Next steps:")}
- Run all code cells in the generated doc to record the outputs.
- Add extra sections on integration specific features.\n`
);
}
|
0 | lc_public_repos/langchainjs/libs/langchain-scripts/src/cli | lc_public_repos/langchainjs/libs/langchain-scripts/src/cli/docs/chat.ts | import * as path from "node:path";
import * as fs from "node:fs";
import {
boldText,
getUserInput,
greenText,
redBackground,
} from "../utils/get-input.js";
import { fetchURLStatus } from "../utils/fetch-url-status.js";
import {
SIDEBAR_LABEL_PLACEHOLDER,
MODULE_NAME_PLACEHOLDER,
PACKAGE_NAME_PLACEHOLDER,
FULL_IMPORT_PATH_PLACEHOLDER,
ENV_VAR_NAME_PLACEHOLDER,
API_REF_MODULE_PLACEHOLDER,
API_REF_PACKAGE_PLACEHOLDER,
PYTHON_DOC_URL_PLACEHOLDER,
LOCAL_PLACEHOLDER,
SERIALIZABLE_PLACEHOLDER,
PY_SUPPORT_PLACEHOLDER,
} from "../constants.js";
const TOOL_CALLING_PLACEHOLDER = "__tool_calling__";
const JSON_MODE_PLACEHOLDER = "__json_mode__";
const IMAGE_INPUT_PLACEHOLDER = "__image_input__";
const AUDIO_INPUT_PLACEHOLDER = "__audio_input__";
const VIDEO_INPUT_PLACEHOLDER = "__video_input__";
const TOKEN_LEVEL_STREAMING_PLACEHOLDER = "__token_level_streaming__";
const TOKEN_USAGE_PLACEHOLDER = "__token_usage__";
const LOGPROBS_PLACEHOLDER = "__logprobs__";
const TEMPLATE_PATH = path.resolve("./src/cli/docs/templates/chat.ipynb");
const INTEGRATIONS_DOCS_PATH = path.resolve(
"../../docs/core_docs/docs/integrations/chat"
);
type ExtraFields = {
/**
* If tool calling is true, structured output will also be true.
*/
toolCalling: boolean;
jsonMode: boolean;
imageInput: boolean;
audioInput: boolean;
videoInput: boolean;
tokenLevelStreaming: boolean;
tokenUsage: boolean;
logprobs: boolean;
local: boolean;
serializable: boolean;
pySupport: boolean;
envVarName: string;
fullImportPath: string;
packageName: string;
};
async function promptExtraFields(fields: {
envVarGuess: string;
}): Promise<ExtraFields> {
const hasToolCalling = await getUserInput(
"Does this integration support tool calling? (y/n) ",
undefined,
true
);
const hasJsonMode = await getUserInput(
"Does this integration support JSON mode? (y/n) ",
undefined,
true
);
const hasImageInput = await getUserInput(
"Does this integration support image input? (y/n) ",
undefined,
true
);
const hasAudioInput = await getUserInput(
"Does this integration support audio input? (y/n) ",
undefined,
true
);
const hasVideoInput = await getUserInput(
"Does this integration support video input? (y/n) ",
undefined,
true
);
const hasTokenLevelStreaming = await getUserInput(
"Does this integration support token level streaming? (y/n) ",
undefined,
true
);
const hasTokenUsage = await getUserInput(
"Does this integration support token usage? (y/n) ",
undefined,
true
);
const hasLogprobs = await getUserInput(
"Does this integration support logprobs? (y/n) ",
undefined,
true
);
const hasLocal = await getUserInput(
"Does this integration support local usage? (y/n) ",
undefined,
true
);
const hasSerializable = await getUserInput(
"Does this integration support serializable output? (y/n) ",
undefined,
true
);
const hasPySupport = await getUserInput(
"Does this integration have Python support? (y/n) ",
undefined,
true
);
const importPath = await getUserInput(
"What is the full import path of the integration? (e.g @langchain/community/chat_models/togetherai) ",
undefined,
true
);
let packageName = "";
if (importPath.startsWith("langchain/")) {
packageName = "langchain";
} else {
packageName = importPath.split("/").slice(0, 2).join("/");
}
const verifyPackageName = await getUserInput(
`Is ${packageName} the correct package name? (y/n) `,
undefined,
true
);
if (verifyPackageName.toLowerCase() === "n") {
packageName = await getUserInput(
"Please enter the full package name (e.g @langchain/community) ",
undefined,
true
);
}
const isEnvGuessCorrect = await getUserInput(
`Is the environment variable for the API key named ${fields.envVarGuess}? (y/n) `,
undefined,
true
);
let envVarName = fields.envVarGuess;
if (isEnvGuessCorrect.toLowerCase() === "n") {
envVarName = await getUserInput(
"Please enter the correct environment variable name ",
undefined,
true
);
}
return {
toolCalling: hasToolCalling.toLowerCase() === "y",
jsonMode: hasJsonMode.toLowerCase() === "y",
imageInput: hasImageInput.toLowerCase() === "y",
audioInput: hasAudioInput.toLowerCase() === "y",
videoInput: hasVideoInput.toLowerCase() === "y",
tokenLevelStreaming: hasTokenLevelStreaming.toLowerCase() === "y",
tokenUsage: hasTokenUsage.toLowerCase() === "y",
logprobs: hasLogprobs.toLowerCase() === "y",
local: hasLocal.toLowerCase() === "y",
serializable: hasSerializable.toLowerCase() === "y",
pySupport: hasPySupport.toLowerCase() === "y",
envVarName,
fullImportPath: importPath,
packageName,
};
}
export async function fillChatIntegrationDocTemplate(fields: {
className: string;
}) {
const sidebarLabel = fields.className.replace("Chat", "");
const pyDocUrl = `https://python.langchain.com/docs/integrations/chat/${sidebarLabel.toLowerCase()}/`;
let envVarName = `${sidebarLabel.toUpperCase()}_API_KEY`;
const extraFields = await promptExtraFields({
envVarGuess: envVarName,
});
envVarName = extraFields.envVarName;
const apiRefModuleUrl = `https://api.js.langchain.com/classes/${extraFields.fullImportPath
.replace("@", "")
.replaceAll("/", "_")
.replaceAll("-", "_")}.${fields.className}.html`;
const apiRefPackageUrl = apiRefModuleUrl
.replace("/classes/", "/modules/")
.replace(`.${fields.className}.html`, ".html");
const apiRefUrlSuccesses = await Promise.all([
fetchURLStatus(apiRefModuleUrl),
fetchURLStatus(apiRefPackageUrl),
]);
if (apiRefUrlSuccesses.find((s) => !s)) {
console.warn(
"API ref URLs invalid. Please manually ensure they are correct."
);
}
const docTemplate = (await fs.promises.readFile(TEMPLATE_PATH, "utf-8"))
.replaceAll(SIDEBAR_LABEL_PLACEHOLDER, sidebarLabel)
.replaceAll(MODULE_NAME_PLACEHOLDER, fields.className)
.replaceAll(PACKAGE_NAME_PLACEHOLDER, extraFields.packageName)
.replaceAll(FULL_IMPORT_PATH_PLACEHOLDER, extraFields.fullImportPath)
.replaceAll(ENV_VAR_NAME_PLACEHOLDER, extraFields.envVarName)
.replaceAll(API_REF_MODULE_PLACEHOLDER, apiRefModuleUrl)
.replaceAll(API_REF_PACKAGE_PLACEHOLDER, apiRefPackageUrl)
.replaceAll(PYTHON_DOC_URL_PLACEHOLDER, pyDocUrl)
.replaceAll(
TOOL_CALLING_PLACEHOLDER,
extraFields?.toolCalling ? "β
" : "β"
)
.replace(JSON_MODE_PLACEHOLDER, extraFields?.jsonMode ? "β
" : "β")
.replace(IMAGE_INPUT_PLACEHOLDER, extraFields?.imageInput ? "β
" : "β")
.replace(AUDIO_INPUT_PLACEHOLDER, extraFields?.audioInput ? "β
" : "β")
.replace(VIDEO_INPUT_PLACEHOLDER, extraFields?.videoInput ? "β
" : "β")
.replace(
TOKEN_LEVEL_STREAMING_PLACEHOLDER,
extraFields?.tokenLevelStreaming ? "β
" : "β"
)
.replace(TOKEN_USAGE_PLACEHOLDER, extraFields?.tokenUsage ? "β
" : "β")
.replace(LOGPROBS_PLACEHOLDER, extraFields?.logprobs ? "β
" : "β")
.replace(LOCAL_PLACEHOLDER, extraFields?.local ? "β
" : "β")
.replace(
SERIALIZABLE_PLACEHOLDER,
extraFields?.serializable ? "β
" : "beta"
)
.replace(PY_SUPPORT_PLACEHOLDER, extraFields?.pySupport ? "β
" : "β");
const docFileName = extraFields.fullImportPath.split("/").pop();
const docPath = path.join(INTEGRATIONS_DOCS_PATH, `${docFileName}.ipynb`);
await fs.promises.writeFile(docPath, docTemplate);
const prettyDocPath = docPath.split("docs/core_docs/")[1];
const updatePythonDocUrlText = ` ${redBackground(
"- Update the Python documentation URL with the proper URL."
)}`;
const successText = `\nSuccessfully created new chat model integration doc at ${prettyDocPath}.`;
console.log(
`${greenText(successText)}\n
${boldText("Next steps:")}
${extraFields?.pySupport ? updatePythonDocUrlText : ""}
- Run all code cells in the generated doc to record the outputs.
- Add extra sections on integration specific features.\n`
);
}
|
0 | lc_public_repos/langchainjs/libs/langchain-scripts/src/cli | lc_public_repos/langchainjs/libs/langchain-scripts/src/cli/docs/llms.ts | import * as path from "node:path";
import * as fs from "node:fs";
import {
boldText,
getUserInput,
greenText,
redBackground,
} from "../utils/get-input.js";
import { fetchURLStatus } from "../utils/fetch-url-status.js";
import {
API_REF_MODULE_PLACEHOLDER,
API_REF_PACKAGE_PLACEHOLDER,
ENV_VAR_NAME_PLACEHOLDER,
FULL_IMPORT_PATH_PLACEHOLDER,
LOCAL_PLACEHOLDER,
MODULE_NAME_PLACEHOLDER,
PACKAGE_NAME_PLACEHOLDER,
PY_SUPPORT_PLACEHOLDER,
PYTHON_DOC_URL_PLACEHOLDER,
SERIALIZABLE_PLACEHOLDER,
SIDEBAR_LABEL_PLACEHOLDER,
} from "../constants.js";
const TEMPLATE_PATH = path.resolve("./src/cli/docs/templates/llms.ipynb");
const INTEGRATIONS_DOCS_PATH = path.resolve(
"../../docs/core_docs/docs/integrations/llms"
);
type ExtraFields = {
local: boolean;
serializable: boolean;
pySupport: boolean;
packageName: string;
fullImportPath: string;
envVarName: string;
};
async function promptExtraFields(fields: {
envVarGuess: string;
}): Promise<ExtraFields> {
const hasLocal = await getUserInput(
"Does this integration support local usage? (y/n) ",
undefined,
true
);
const hasSerializable = await getUserInput(
"Does this integration support serializable output? (y/n) ",
undefined,
true
);
const hasPySupport = await getUserInput(
"Does this integration have Python support? (y/n) ",
undefined,
true
);
const importPath = await getUserInput(
"What is the full import path of the integration? (e.g @langchain/community/llms/togetherai) ",
undefined,
true
);
let packageName = "";
if (importPath.startsWith("langchain/")) {
packageName = "langchain";
} else {
packageName = importPath.split("/").slice(0, 2).join("/");
}
const verifyPackageName = await getUserInput(
`Is ${packageName} the correct package name? (y/n) `,
undefined,
true
);
if (verifyPackageName.toLowerCase() === "n") {
packageName = await getUserInput(
"Please enter the full package name (e.g @langchain/community) ",
undefined,
true
);
}
const isEnvGuessCorrect = await getUserInput(
`Is the environment variable for the API key named ${fields.envVarGuess}? (y/n) `,
undefined,
true
);
let envVarName = fields.envVarGuess;
if (isEnvGuessCorrect.toLowerCase() === "n") {
envVarName = await getUserInput(
"Please enter the correct environment variable name ",
undefined,
true
);
}
return {
local: hasLocal.toLowerCase() === "y",
serializable: hasSerializable.toLowerCase() === "y",
pySupport: hasPySupport.toLowerCase() === "y",
packageName,
fullImportPath: importPath,
envVarName,
};
}
export async function fillLLMIntegrationDocTemplate(fields: {
className: string;
}) {
const sidebarLabel = fields.className.replace("LLM", "").replace("Llm", "");
const pyDocUrl = `https://python.langchain.com/docs/integrations/llms/${sidebarLabel.toLowerCase()}/`;
let envVarName = `${sidebarLabel.toUpperCase()}_API_KEY`;
const extraFields = await promptExtraFields({
envVarGuess: envVarName,
});
envVarName = extraFields.envVarName;
const apiRefModuleUrl = `https://api.js.langchain.com/classes/${extraFields.fullImportPath
.replace("@", "")
.replaceAll("/", "_")
.replaceAll("-", "_")}.${fields.className}.html`;
const apiRefPackageUrl = apiRefModuleUrl
.replace("/classes/", "/modules/")
.replace(`.${fields.className}.html`, ".html");
const apiRefUrlSuccesses = await Promise.all([
fetchURLStatus(apiRefModuleUrl),
fetchURLStatus(apiRefPackageUrl),
]);
if (apiRefUrlSuccesses.find((s) => !s)) {
console.warn(
"API ref URLs invalid. Please manually ensure they are correct."
);
}
const docTemplate = (await fs.promises.readFile(TEMPLATE_PATH, "utf-8"))
.replaceAll(SIDEBAR_LABEL_PLACEHOLDER, sidebarLabel)
.replaceAll(MODULE_NAME_PLACEHOLDER, fields.className)
.replaceAll(PACKAGE_NAME_PLACEHOLDER, extraFields.packageName)
.replaceAll(FULL_IMPORT_PATH_PLACEHOLDER, extraFields.fullImportPath)
.replaceAll(ENV_VAR_NAME_PLACEHOLDER, extraFields.envVarName)
.replaceAll(PYTHON_DOC_URL_PLACEHOLDER, pyDocUrl)
.replaceAll(API_REF_MODULE_PLACEHOLDER, apiRefModuleUrl)
.replaceAll(API_REF_PACKAGE_PLACEHOLDER, apiRefPackageUrl)
.replace(LOCAL_PLACEHOLDER, extraFields?.local ? "β
" : "β")
.replace(
SERIALIZABLE_PLACEHOLDER,
extraFields?.serializable ? "β
" : "beta"
)
.replace(PY_SUPPORT_PLACEHOLDER, extraFields?.pySupport ? "β
" : "β");
const packageNameShortSnakeCase = fields.className
.replace(/-/g, "_")
.toLowerCase();
const docPath = path.join(
INTEGRATIONS_DOCS_PATH,
`${packageNameShortSnakeCase}.ipynb`
);
await fs.promises.writeFile(docPath, docTemplate);
const prettyDocPath = docPath.split("docs/core_docs/")[1];
const updatePythonDocUrlText = ` ${redBackground(
"- Update the Python documentation URL with the proper URL."
)}`;
const successText = `\nSuccessfully created new chat model integration doc at ${prettyDocPath}.`;
console.log(
`${greenText(successText)}\n
${boldText("Next steps:")}
${extraFields?.pySupport ? updatePythonDocUrlText : ""}
- Run all code cells in the generated doc to record the outputs.
- Add extra sections on integration specific features.\n`
);
}
|
0 | lc_public_repos/langchainjs/libs/langchain-scripts/src/cli | lc_public_repos/langchainjs/libs/langchain-scripts/src/cli/docs/retrievers.ts | import * as path from "node:path";
import * as fs from "node:fs";
import {
boldText,
getUserInput,
greenText,
redBackground,
} from "../utils/get-input.js";
import { fetchURLStatus } from "../utils/fetch-url-status.js";
import {
PACKAGE_NAME_PLACEHOLDER,
MODULE_NAME_PLACEHOLDER,
SIDEBAR_LABEL_PLACEHOLDER,
FULL_IMPORT_PATH_PLACEHOLDER,
PY_SUPPORT_PLACEHOLDER,
API_REF_MODULE_PLACEHOLDER,
PYTHON_DOC_URL_PLACEHOLDER,
} from "../constants.js";
const HAS_CLOUD_OFFERING_PLACEHOLDER = "__has_cloud_offering__";
const CAN_SELF_HOST_PLACEHOLDER = "__can_self_host__";
const TEMPLATE_PATH = path.resolve("./src/cli/docs/templates/retrievers.ipynb");
const INTEGRATIONS_DOCS_PATH = path.resolve(
"../../docs/core_docs/docs/integrations/retrievers"
);
type ExtraFields = {
packageName: string;
fullImportPath?: string;
hasCloudOffering: boolean;
canSelfHost: boolean;
pySupport: boolean;
};
async function promptExtraFields(): Promise<ExtraFields> {
const hasCloudOffering = await getUserInput(
"Does this retriever support self hosting? (y/n) ",
undefined,
true
);
const canSelfHost = await getUserInput(
"Does this retriever have a cloud offering? (y/n) ",
undefined,
true
);
const hasPySupport = await getUserInput(
"Does this integration have Python support? (y/n) ",
undefined,
true
);
const importPath = await getUserInput(
"What is the full import path of the integration? (e.g @langchain/community/retrievers/my_retriever) ",
undefined,
true
);
let packageName = "";
if (importPath.startsWith("langchain/")) {
packageName = "langchain";
} else {
packageName = importPath.split("/").slice(0, 2).join("/");
}
const verifyPackageName = await getUserInput(
`Is ${packageName} the correct package name? (y/n) `,
undefined,
true
);
if (verifyPackageName.toLowerCase() === "n") {
packageName = await getUserInput(
"Please enter the full package name (e.g @langchain/community) ",
undefined,
true
);
}
return {
packageName,
fullImportPath: importPath,
canSelfHost: canSelfHost.toLowerCase() === "y",
hasCloudOffering: hasCloudOffering.toLowerCase() === "y",
pySupport: hasPySupport.toLowerCase() === "y",
};
}
export async function fillRetrieverIntegrationDocTemplate(fields: {
className: string;
}) {
const sidebarLabel = fields.className.replace("Retriever", "");
const pyDocUrl = `https://python.langchain.com/docs/integrations/retrievers/${sidebarLabel.toLowerCase()}/`;
const extraFields = await promptExtraFields();
const { pySupport } = extraFields;
const { canSelfHost } = extraFields;
const { hasCloudOffering } = extraFields;
const { packageName } = extraFields;
const fullImportPath = extraFields.fullImportPath ?? extraFields.packageName;
const apiRefModuleUrl = `https://api.js.langchain.com/classes/${fullImportPath
.replace("@", "")
.replaceAll("/", "_")
.replaceAll("-", "_")}.${fields.className}.html`;
const apiRefPackageUrl = apiRefModuleUrl
.replace("/classes/", "/modules/")
.replace(`.${fields.className}.html`, ".html");
const apiRefUrlSuccesses = await Promise.all([
fetchURLStatus(apiRefModuleUrl),
fetchURLStatus(apiRefPackageUrl),
]);
if (apiRefUrlSuccesses.find((s) => !s)) {
console.warn(
"API ref URLs invalid. Please manually ensure they are correct."
);
}
const docTemplate = (await fs.promises.readFile(TEMPLATE_PATH, "utf-8"))
.replaceAll(PACKAGE_NAME_PLACEHOLDER, packageName)
.replaceAll(MODULE_NAME_PLACEHOLDER, fields.className)
.replaceAll(SIDEBAR_LABEL_PLACEHOLDER, sidebarLabel)
.replaceAll(FULL_IMPORT_PATH_PLACEHOLDER, fullImportPath)
.replace(HAS_CLOUD_OFFERING_PLACEHOLDER, hasCloudOffering ? "β
" : "β")
.replace(CAN_SELF_HOST_PLACEHOLDER, canSelfHost ? "β
" : "β")
.replace(PY_SUPPORT_PLACEHOLDER, pySupport ? "β
" : "β")
.replaceAll(API_REF_MODULE_PLACEHOLDER, apiRefModuleUrl)
.replaceAll(PYTHON_DOC_URL_PLACEHOLDER, pyDocUrl);
const packageNameShortSnakeCase = fields.className
.replace(/-/g, "_")
.toLowerCase();
const docPath = path.join(
INTEGRATIONS_DOCS_PATH,
`${packageNameShortSnakeCase}.ipynb`
);
await fs.promises.writeFile(docPath, docTemplate);
const prettyDocPath = docPath.split("docs/core_docs/")[1];
const updatePythonDocUrlText = ` ${redBackground(
"- Update the Python documentation URL with the proper URL."
)}`;
const successText = `\nSuccessfully created new chat model integration doc at ${prettyDocPath}.`;
console.log(
`${greenText(successText)}\n
${boldText("Next steps:")}
${extraFields?.pySupport ? updatePythonDocUrlText : ""}
- Run all code cells in the generated doc to record the outputs.
- Add extra sections on integration specific features.\n`
);
}
|
0 | lc_public_repos/langchainjs/libs/langchain-scripts/src/cli | lc_public_repos/langchainjs/libs/langchain-scripts/src/cli/docs/kv_store.ts | import * as path from "node:path";
import * as fs from "node:fs";
import {
boldText,
getUserInput,
greenText,
redBackground,
} from "../utils/get-input.js";
import { fetchURLStatus } from "../utils/fetch-url-status.js";
import {
SIDEBAR_LABEL_PLACEHOLDER,
MODULE_NAME_PLACEHOLDER,
PACKAGE_NAME_PLACEHOLDER,
FULL_IMPORT_PATH_PLACEHOLDER,
ENV_VAR_NAME_PLACEHOLDER,
PYTHON_DOC_URL_PLACEHOLDER,
API_REF_MODULE_PLACEHOLDER,
API_REF_PACKAGE_PLACEHOLDER,
LOCAL_PLACEHOLDER,
PY_SUPPORT_PLACEHOLDER,
} from "../constants.js";
const TEMPLATE_PATH = path.resolve("./src/cli/docs/templates/kv_store.ipynb");
const INTEGRATIONS_DOCS_PATH = path.resolve(
"../../docs/core_docs/docs/integrations/stores"
);
type ExtraFields = {
pySupport: boolean;
local: boolean;
envVarName: string;
fullImportPath: string;
packageName: string;
};
async function promptExtraFields(fields: {
envVarGuess: string;
}): Promise<ExtraFields> {
const hasPySupport = await getUserInput(
"Does this integration have Python support? (y/n) ",
undefined,
true
);
const hasLocalSupport = await getUserInput(
"Does this integration support running locally? (y/n) ",
undefined,
true
);
const importPath = await getUserInput(
"What is the full import path of the integration? (e.g @langchain/community/llms/togetherai) ",
undefined,
true
);
let packageName = "";
if (importPath.startsWith("langchain/")) {
packageName = "langchain";
} else {
packageName = importPath.split("/").slice(0, 2).join("/");
}
const verifyPackageName = await getUserInput(
`Is ${packageName} the correct package name? (y/n) `,
undefined,
true
);
if (verifyPackageName.toLowerCase() === "n") {
packageName = await getUserInput(
"Please enter the full package name (e.g @langchain/community) ",
undefined,
true
);
}
const isEnvGuessCorrect = await getUserInput(
`Is the environment variable for the API key named ${fields.envVarGuess}? (y/n) `,
undefined,
true
);
let envVarName = fields.envVarGuess;
if (isEnvGuessCorrect.toLowerCase() === "n") {
envVarName = await getUserInput(
"Please enter the correct environment variable name ",
undefined,
true
);
}
return {
pySupport: hasPySupport.toLowerCase() === "y",
local: hasLocalSupport.toLowerCase() === "y",
envVarName,
fullImportPath: importPath,
packageName,
};
}
export async function fillKVStoreIntegrationDocTemplate(fields: {
className: string;
}) {
// Sidebar labels should match this format "XYZ Store"
let sidebarLabel = "";
if (fields.className.endsWith("KVStore")) {
sidebarLabel = fields.className.replace("KVStore", " Store");
} else if (fields.className.endsWith("ByteStore")) {
sidebarLabel = fields.className.replace("ByteStore", " Store");
} else {
sidebarLabel = fields.className.replace("Store", " Store");
}
const pyDocUrl = `https://python.langchain.com/docs/integrations/stores/${sidebarLabel.toLowerCase()}/`;
let envVarName = `${sidebarLabel.toUpperCase()}_API_KEY`;
const extraFields = await promptExtraFields({
envVarGuess: envVarName,
});
envVarName = extraFields.envVarName;
const importPathEnding = extraFields.fullImportPath.split("/").pop() ?? "";
const apiRefModuleUrl = `https://api.js.langchain.com/classes/${extraFields.fullImportPath
.replace("@", "")
.replaceAll("/", "_")
.replaceAll("-", "_")}.${fields.className}.html`;
const apiRefPackageUrl = apiRefModuleUrl
.replace("/classes/", "/modules/")
.replace(`.${fields.className}.html`, ".html");
const apiRefUrlSuccesses = await Promise.all([
fetchURLStatus(apiRefModuleUrl),
fetchURLStatus(apiRefPackageUrl),
]);
if (apiRefUrlSuccesses.find((s) => !s)) {
console.warn(
"API ref URLs invalid. Please manually ensure they are correct."
);
}
const docTemplate = (await fs.promises.readFile(TEMPLATE_PATH, "utf-8"))
.replaceAll(SIDEBAR_LABEL_PLACEHOLDER, sidebarLabel)
.replaceAll(MODULE_NAME_PLACEHOLDER, fields.className)
.replaceAll(PACKAGE_NAME_PLACEHOLDER, extraFields.packageName)
.replaceAll(FULL_IMPORT_PATH_PLACEHOLDER, extraFields.fullImportPath)
.replaceAll(ENV_VAR_NAME_PLACEHOLDER, envVarName)
.replaceAll(PYTHON_DOC_URL_PLACEHOLDER, pyDocUrl)
.replaceAll(API_REF_MODULE_PLACEHOLDER, apiRefModuleUrl)
.replaceAll(API_REF_PACKAGE_PLACEHOLDER, apiRefPackageUrl)
.replaceAll(LOCAL_PLACEHOLDER, extraFields?.local ? "β
" : "β")
.replaceAll(PY_SUPPORT_PLACEHOLDER, extraFields?.pySupport ? "β
" : "β");
const docPath = path.join(
INTEGRATIONS_DOCS_PATH,
`${importPathEnding}.ipynb`
);
await fs.promises.writeFile(docPath, docTemplate);
const prettyDocPath = docPath.split("docs/core_docs/")[1];
const updatePythonDocUrlText = ` ${redBackground(
"- Update the Python documentation URL with the proper URL."
)}`;
const successText = `\nSuccessfully created new document loader integration doc at ${prettyDocPath}.`;
console.log(
`${greenText(successText)}\n
${boldText("Next steps:")}
${extraFields?.pySupport ? updatePythonDocUrlText : ""}
- Run all code cells in the generated doc to record the outputs.
- Add extra sections on integration specific features.\n`
);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.