index
int64
0
0
repo_id
stringclasses
596 values
file_path
stringlengths
31
168
content
stringlengths
1
6.2M
0
lc_public_repos/langchainjs/libs/langchain-community/src/experimental
lc_public_repos/langchainjs/libs/langchain-community/src/experimental/llms/chrome_ai.ts
/* eslint-disable no-restricted-globals */ import type { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base"; import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager"; import { GenerationChunk } from "@langchain/core/outputs"; import { IterableReadableStream } from "@langchain/core/utils/stream"; import { BaseLLMParams, LLM } from "@langchain/core/language_models/llms"; export interface AILanguageModelFactory { create(options?: AILanguageModelCreateOptions): Promise<AILanguageModel>; capabilities(): Promise<AILanguageModelCapabilities>; } export interface AILanguageModel extends EventTarget { prompt( input: AILanguageModelPromptInput, options?: AILanguageModelPromptOptions ): Promise<string>; promptStreaming( input: AILanguageModelPromptInput, options?: AILanguageModelPromptOptions ): ReadableStream; countPromptTokens( input: AILanguageModelPromptInput, options?: AILanguageModelPromptOptions ): Promise<number>; get maxTokens(): number; get tokensSoFar(): number; get tokensLeft(): number; get topK(): number; get temperature(): number; oncontextoverflow: (event: Event) => void; clone(options?: AILanguageModelCloneOptions): Promise<AILanguageModel>; destroy(): void; } interface AILanguageModelCapabilities { readonly available: AICapabilityAvailability; languageAvailable(languageTag: string): AICapabilityAvailability; get defaultTopK(): number | undefined; get maxTopK(): number | undefined; get defaultTemperature(): number | undefined; get maxTemperature(): number | undefined; } interface AILanguageModelCreateOptions { signal?: AbortSignal; monitor?: AICreateMonitorCallback; systemPrompt?: string; initialPrompts?: AILanguageModelInitialPrompt[]; topK: number; temperature: number; } export interface AILanguageModelInitialPrompt { role: AILanguageModelInitialPromptRole; content: string; } export interface AILanguageModelPrompt { role: AILanguageModelPromptRole; content: string; } export interface AILanguageModelPromptOptions { signal?: AbortSignal; } export interface AILanguageModelCloneOptions { signal?: AbortSignal; } export type AILanguageModelPromptInput = | string | AILanguageModelPrompt | AILanguageModelPrompt[]; enum AILanguageModelInitialPromptRole { "system", "user", "assistant", } enum AILanguageModelPromptRole { "user", "assistant", } export type AICapabilityAvailability = "yes" | "no"; export type AICreateMonitorCallback = () => void; export interface ChromeAIInputs extends BaseLLMParams { topK?: number; temperature?: number; systemPrompt?: string; } export interface ChromeAICallOptions extends BaseLanguageModelCallOptions {} /** * To use this model you need to have the `Built-in AI Early Preview Program` * for Chrome. You can find more information about the program here: * @link https://developer.chrome.com/docs/ai/built-in * * @example * ```typescript * // Initialize the ChromeAI model. * const model = new ChromeAI({ * temperature: 0.5, // Optional. Default is 0.5. * topK: 40, // Optional. Default is 40. * }); * * // Call the model with a message and await the response. * const response = await model.invoke([ * new HumanMessage({ content: "My name is John." }), * ]); * ``` */ export class ChromeAI extends LLM<ChromeAICallOptions> { temperature?: number; topK?: number; systemPrompt?: string; static lc_name() { return "ChromeAI"; } constructor(inputs?: ChromeAIInputs) { super({ ...inputs, }); this.temperature = inputs?.temperature ?? this.temperature; this.topK = inputs?.topK ?? this.topK; this.systemPrompt = inputs?.systemPrompt; } _llmType() { return "chrome_ai"; } /** * Initialize the model. This method may be called before invoking the model * to set up a chat session in advance. */ protected async createSession() { // eslint-disable-next-line @typescript-eslint/no-explicit-any let aiInstance: any; try { // eslint-disable-next-line @typescript-eslint/ban-ts-comment // @ts-ignore Experimental browser-only global aiInstance = ai; // eslint-disable-next-line @typescript-eslint/no-explicit-any } catch (e: any) { throw new Error( `Could not initialize ChromeAI instance. Make sure you are running a version of Chrome with the proper experimental flags enabled.\n\nError message: ${e.message}` ); } const { available } = await aiInstance.languageModel.capabilities(); if (available === "no") { throw new Error("The AI model is not available."); } else if (available === "after-download") { throw new Error("The AI model is not yet downloaded."); } const session = await aiInstance.languageModel.create({ systemPrompt: this.systemPrompt, topK: this.topK, temperature: this.temperature, }); return session; } async *_streamResponseChunks( prompt: string, _options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): AsyncGenerator<GenerationChunk> { let session; try { session = await this.createSession(); const stream = session.promptStreaming(prompt); const iterableStream = // eslint-disable-next-line @typescript-eslint/no-explicit-any IterableReadableStream.fromReadableStream<any>(stream); let previousContent = ""; for await (const chunk of iterableStream) { const newContent = chunk.slice(previousContent.length); previousContent += newContent; yield new GenerationChunk({ text: newContent, }); await runManager?.handleLLMNewToken(newContent); } } finally { session?.destroy(); } } async _call( prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): Promise<string> { const chunks = []; for await (const chunk of this._streamResponseChunks( prompt, options, runManager )) { chunks.push(chunk.text); } return chunks.join(""); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/experimental
lc_public_repos/langchainjs/libs/langchain-community/src/experimental/tools/pyinterpreter.ts
// eslint-disable-next-line import/no-extraneous-dependencies import { loadPyodide, type PyodideInterface } from "pyodide"; import { Tool, ToolParams } from "@langchain/core/tools"; export type PythonInterpreterToolParams = Parameters<typeof loadPyodide>[0] & ToolParams & { instance: PyodideInterface; }; export class PythonInterpreterTool extends Tool { static lc_name() { return "PythonInterpreterTool"; } name = "python_interpreter"; description = `Evaluates python code in a sandbox environment. The environment resets on every execution. You must send the whole script every time and print your outputs. Script should be pure python code that can be evaluated. Packages available: ${this.availableDefaultPackages}`; pyodideInstance: PyodideInterface; stdout = ""; stderr = ""; constructor(options: PythonInterpreterToolParams) { super(options); this.pyodideInstance = options.instance; this.pyodideInstance.setStderr({ batched: (text: string) => { this.stderr += text; }, }); this.pyodideInstance.setStdout({ batched: (text: string) => { this.stdout += text; }, }); } async addPackage(packageName: string) { await this.pyodideInstance.loadPackage(packageName); this.description += `, ${packageName}`; } get availableDefaultPackages(): string { return [ "asciitree", "astropy", "atomicwrites", "attrs", "autograd", "awkward-cpp", "bcrypt", "beautifulsoup4", "biopython", "bitarray", "bitstring", "bleach", "bokeh", "boost-histogram", "brotli", "cachetools", "Cartopy", "cbor-diag", "certifi", "cffi", "cffi_example", "cftime", "click", "cligj", "cloudpickle", "cmyt", "colorspacious", "contourpy", "coolprop", "coverage", "cramjam", "cryptography", "cssselect", "cycler", "cytoolz", "decorator", "demes", "deprecation", "distlib", "docutils", "exceptiongroup", "fastparquet", "fiona", "fonttools", "freesasa", "fsspec", "future", "galpy", "gensim", "geopandas", "gmpy2", "gsw", "h5py", "html5lib", "idna", "igraph", "imageio", "iniconfig", "jedi", "Jinja2", "joblib", "jsonschema", "kiwisolver", "lazy-object-proxy", "lazy_loader", "lightgbm", "logbook", "lxml", "MarkupSafe", "matplotlib", "matplotlib-pyodide", "micropip", "mne", "more-itertools", "mpmath", "msgpack", "msprime", "multidict", "munch", "mypy", "netcdf4", "networkx", "newick", "nlopt", "nltk", "nose", "numcodecs", "numpy", "opencv-python", "optlang", "orjson", "packaging", "pandas", "parso", "patsy", "peewee", "Pillow", "pillow_heif", "pkgconfig", "pluggy", "protobuf", "py", "pyb2d", "pyclipper", "pycparser", "pycryptodome", "pydantic", "pyerfa", "Pygments", "pyheif", "pyinstrument", "pynacl", "pyodide-http", "pyodide-tblib", "pyparsing", "pyproj", "pyrsistent", "pyshp", "pytest", "pytest-benchmark", "python-dateutil", "python-magic", "python-sat", "python_solvespace", "pytz", "pywavelets", "pyxel", "pyyaml", "rebound", "reboundx", "regex", "retrying", "RobotRaconteur", "ruamel.yaml", "rust-panic-test", "scikit-image", "scikit-learn", "scipy", "screed", "setuptools", "shapely", "simplejson", "six", "smart_open", "soupsieve", "sourmash", "sparseqr", "sqlalchemy", "statsmodels", "svgwrite", "swiglpk", "sympy", "termcolor", "texttable", "threadpoolctl", "tomli", "tomli-w", "toolz", "tqdm", "traits", "tskit", "typing-extensions", "uncertainties", "unyt", "webencodings", "wordcloud", "wrapt", "xarray", "xgboost", "xlrd", "xyzservices", "yarl", "yt", "zarr", ].join(", "); } static async initialize( options: Omit<PythonInterpreterToolParams, "instance"> ) { const instance = await loadPyodide(options); return new this({ ...options, instance }); } async _call(script: string) { this.stdout = ""; this.stderr = ""; await this.pyodideInstance.runPythonAsync(script); return JSON.stringify({ stdout: this.stdout, stderr: this.stderr }); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/experimental/tools
lc_public_repos/langchainjs/libs/langchain-community/src/experimental/tools/tests/pyinterpreter.int.test.ts
import { test, expect } from "@jest/globals"; import { StringOutputParser } from "@langchain/core/output_parsers"; import { OpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; import { PythonInterpreterTool } from "../pyinterpreter.js"; describe("Python Interpreter testsuite", () => { test("hello langchain", async () => { const prompt = PromptTemplate.fromTemplate( `Can you generate python code that: {input}? Do not generate anything else.` ); const model = new OpenAI({}); const interpreter = await PythonInterpreterTool.initialize({ indexURL: "../node_modules/pyodide", }); const chain = prompt .pipe(model) .pipe(new StringOutputParser()) .pipe(interpreter); const result = await chain.invoke({ input: `prints "Hello LangChain"`, }); expect(JSON.parse(result).stdout).toBe("Hello LangChain"); }); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/experimental
lc_public_repos/langchainjs/libs/langchain-community/src/experimental/graph_transformers/llm.ts
import { z } from "zod"; import { zodToJsonSchema } from "zod-to-json-schema"; import { BaseLanguageModel } from "@langchain/core/language_models/base"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { Document } from "@langchain/core/documents"; import { Node, Relationship, GraphDocument, } from "../../graphs/graph_document.js"; export const SYSTEM_PROMPT = ` # Knowledge Graph Instructions for GPT-4\n ## 1. Overview\n You are a top-tier algorithm designed for extracting information in structured formats to build a knowledge graph.\n Try to capture as much information from the text as possible without sacrifing accuracy. Do not add any information that is not explicitly mentioned in the text\n" - **Nodes** represent entities and concepts.\n" - The aim is to achieve simplicity and clarity in the knowledge graph, making it\n accessible for a vast audience.\n ## 2. Labeling Nodes\n - **Consistency**: Ensure you use available types for node labels.\n Ensure you use basic or elementary types for node labels.\n - For example, when you identify an entity representing a person, always label it as **'person'**. Avoid using more specific terms like 'mathematician' or 'scientist' - **Node IDs**: Never utilize integers as node IDs. Node IDs should be names or human-readable identifiers found in the text.\n - **Relationships** represent connections between entities or concepts.\n Ensure consistency and generality in relationship types when constructing knowledge graphs. Instead of using specific and momentary types such as 'BECAME_PROFESSOR', use more general and timeless relationship types like 'PROFESSOR'. Make sure to use general and timeless relationship types!\n ## 3. Coreference Resolution\n - **Maintain Entity Consistency**: When extracting entities, it's vital to ensure consistency.\n If an entity, such as "John Doe", is mentioned multiple times in the text but is referred to by different names or pronouns (e.g., "Joe", "he"), always use the most complete identifier for that entity throughout the knowledge graph. In this example, use "John Doe" as the entity ID.\n Remember, the knowledge graph should be coherent and easily understandable, so maintaining consistency in entity references is crucial.\n ## 4. Strict Compliance\n Adhere to the rules strictly. Non-compliance will result in termination. `; const DEFAULT_PROMPT = /* #__PURE__ */ ChatPromptTemplate.fromMessages([ ["system", SYSTEM_PROMPT], [ "human", "Tip: Make sure to answer in the correct format and do not include any explanations. Use the given format to extract information from the following input: {input}", ], ]); interface OptionalEnumFieldProps { enumValues?: string[]; description: string; isRel?: boolean; fieldKwargs?: object; } interface SchemaProperty { key: string; value: string; } function toTitleCase(str: string): string { return str .split(" ") .map((w) => w[0].toUpperCase() + w.substring(1).toLowerCase()) .join(""); } function createOptionalEnumType({ enumValues = undefined, description = "", isRel = false, }: OptionalEnumFieldProps): z.ZodTypeAny { let schema; if (enumValues && enumValues.length) { schema = z .enum(enumValues as [string, ...string[]]) .describe( `${description} Available options are: ${enumValues.join(", ")}.` ); } else { const nodeInfo = "Ensure you use basic or elementary types for node labels.\n" + "For example, when you identify an entity representing a person, " + "always label it as **'Person'**. Avoid using more specific terms " + "like 'Mathematician' or 'Scientist'"; const relInfo = "Instead of using specific and momentary types such as " + "'BECAME_PROFESSOR', use more general and timeless relationship types like " + "'PROFESSOR'. However, do not sacrifice any accuracy for generality"; const additionalInfo = isRel ? relInfo : nodeInfo; schema = z.string().describe(description + additionalInfo); } return schema; } function createNodeSchema(allowedNodes: string[], nodeProperties: string[]) { const nodeSchema = z.object({ id: z.string(), type: createOptionalEnumType({ enumValues: allowedNodes, description: "The type or label of the node.", }), }); return nodeProperties.length > 0 ? nodeSchema.extend({ properties: z .array( z.object({ key: createOptionalEnumType({ enumValues: nodeProperties, description: "Property key.", }), value: z.string().describe("Extracted value."), }) ) .describe(`List of node properties`), }) : nodeSchema; } function createRelationshipSchema( allowedNodes: string[], allowedRelationships: string[], relationshipProperties: string[] ) { const relationshipSchema = z.object({ sourceNodeId: z.string(), sourceNodeType: createOptionalEnumType({ enumValues: allowedNodes, description: "The source node of the relationship.", }), relationshipType: createOptionalEnumType({ enumValues: allowedRelationships, description: "The type of the relationship.", isRel: true, }), targetNodeId: z.string(), targetNodeType: createOptionalEnumType({ enumValues: allowedNodes, description: "The target node of the relationship.", }), }); return relationshipProperties.length > 0 ? relationshipSchema.extend({ properties: z .array( z.object({ key: createOptionalEnumType({ enumValues: relationshipProperties, description: "Property key.", }), value: z.string().describe("Extracted value."), }) ) .describe(`List of relationship properties`), }) : relationshipSchema; } function createSchema( allowedNodes: string[], allowedRelationships: string[], nodeProperties: string[], relationshipProperties: string[] ) { const nodeSchema = createNodeSchema(allowedNodes, nodeProperties); const relationshipSchema = createRelationshipSchema( allowedNodes, allowedRelationships, relationshipProperties ); const dynamicGraphSchema = z.object({ nodes: z.array(nodeSchema).describe("List of nodes"), relationships: z .array(relationshipSchema) .describe("List of relationships."), }); return dynamicGraphSchema; } function convertPropertiesToRecord( properties: SchemaProperty[] ): Record<string, string> { return properties.reduce((accumulator: Record<string, string>, prop) => { accumulator[prop.key] = prop.value; return accumulator; }, {}); } // eslint-disable-next-line @typescript-eslint/no-explicit-any function mapToBaseNode(node: any): Node { return new Node({ id: node.id, type: node.type ? toTitleCase(node.type) : "", properties: node.properties ? convertPropertiesToRecord(node.properties) : {}, }); } // eslint-disable-next-line @typescript-eslint/no-explicit-any function mapToBaseRelationship(relationship: any): Relationship { return new Relationship({ source: new Node({ id: relationship.sourceNodeId, type: relationship.sourceNodeType ? toTitleCase(relationship.sourceNodeType) : "", }), target: new Node({ id: relationship.targetNodeId, type: relationship.targetNodeType ? toTitleCase(relationship.targetNodeType) : "", }), type: relationship.relationshipType.replace(" ", "_").toUpperCase(), properties: relationship.properties ? convertPropertiesToRecord(relationship.properties) : {}, }); } export interface LLMGraphTransformerProps { llm: BaseLanguageModel; allowedNodes?: string[]; allowedRelationships?: string[]; prompt?: ChatPromptTemplate; strictMode?: boolean; nodeProperties?: string[]; relationshipProperties?: string[]; } export class LLMGraphTransformer { // eslint-disable-next-line @typescript-eslint/no-explicit-any chain: any; allowedNodes: string[] = []; allowedRelationships: string[] = []; strictMode: boolean; nodeProperties: string[]; relationshipProperties: string[]; constructor({ llm, allowedNodes = [], allowedRelationships = [], prompt = DEFAULT_PROMPT, strictMode = true, nodeProperties = [], relationshipProperties = [], }: LLMGraphTransformerProps) { if (typeof llm.withStructuredOutput !== "function") { throw new Error( "The specified LLM does not support the 'withStructuredOutput'. Please ensure you are using an LLM that supports this feature." ); } this.allowedNodes = allowedNodes; this.allowedRelationships = allowedRelationships; this.strictMode = strictMode; this.nodeProperties = nodeProperties; this.relationshipProperties = relationshipProperties; // Define chain const schema = createSchema( allowedNodes, allowedRelationships, nodeProperties, relationshipProperties ); const structuredLLM = llm.withStructuredOutput(zodToJsonSchema(schema)); this.chain = prompt.pipe(structuredLLM); } /** * Method that processes a single document, transforming it into a graph * document using an LLM based on the model's schema and constraints. * @param document The document to process. * @returns A promise that resolves to a graph document. */ async processResponse(document: Document) { const text = document.pageContent; const rawSchema = await this.chain.invoke({ input: text }); let nodes: Node[] = []; if (rawSchema?.nodes) { nodes = rawSchema.nodes.map(mapToBaseNode); } let relationships: Relationship[] = []; if (rawSchema?.relationships) { relationships = rawSchema.relationships.map(mapToBaseRelationship); } if ( this.strictMode && (this.allowedNodes.length > 0 || this.allowedRelationships.length > 0) ) { if (this.allowedNodes.length > 0) { const allowedNodesLowerCase = this.allowedNodes.map((node) => node.toLowerCase() ); // For nodes, compare lowercased types nodes = nodes.filter((node) => allowedNodesLowerCase.includes(node.type.toLowerCase()) ); // For relationships, compare lowercased types for both source and target nodes relationships = relationships.filter( (rel) => allowedNodesLowerCase.includes(rel.source.type.toLowerCase()) && allowedNodesLowerCase.includes(rel.target.type.toLowerCase()) ); } if (this.allowedRelationships.length > 0) { // For relationships, compare lowercased types relationships = relationships.filter((rel) => this.allowedRelationships .map((rel) => rel.toLowerCase()) .includes(rel.type.toLowerCase()) ); } } return new GraphDocument({ nodes, relationships, source: document, }); } /** * Method that converts an array of documents into an array of graph * documents using the `processResponse` method. * @param documents The array of documents to convert. * @returns A promise that resolves to an array of graph documents. */ async convertToGraphDocuments( documents: Document[] ): Promise<GraphDocument[]> { const results: GraphDocument[] = []; for (const document of documents) { const graphDocument = await this.processResponse(document); results.push(graphDocument); } return results; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/experimental
lc_public_repos/langchainjs/libs/langchain-community/src/experimental/graph_transformers/llm.int.test.ts
import { ChatOpenAI } from "@langchain/openai"; import { Document } from "@langchain/core/documents"; import { LLMGraphTransformer } from "./llm.js"; import { GraphDocument, Node, Relationship, } from "../../graphs/graph_document.js"; test.skip("convertToGraphDocuments", async () => { const model = new ChatOpenAI({ temperature: 0, modelName: "gpt-4o-mini", }); const llmGraphTransformer = new LLMGraphTransformer({ llm: model, }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const result = await llmGraphTransformer.convertToGraphDocuments([ new Document({ pageContent: "Elon Musk is suing OpenAI" }), ]); }); test("convertToGraphDocuments with allowed", async () => { const model = new ChatOpenAI({ temperature: 0, modelName: "gpt-4o-mini", }); const llmGraphTransformer = new LLMGraphTransformer({ llm: model, allowedNodes: ["PERSON", "ORGANIZATION"], allowedRelationships: ["SUES"], }); const result = await llmGraphTransformer.convertToGraphDocuments([ new Document({ pageContent: "Elon Musk is suing OpenAI" }), ]); expect(result).toEqual([ new GraphDocument({ nodes: [ new Node({ id: "Elon Musk", type: "Person" }), new Node({ id: "OpenAI", type: "Organization" }), ], relationships: [ new Relationship({ source: new Node({ id: "Elon Musk", type: "Person" }), target: new Node({ id: "OpenAI", type: "Organization" }), type: "SUES", }), ], source: new Document({ pageContent: "Elon Musk is suing OpenAI", metadata: {}, }), }), ]); }); test("convertToGraphDocuments with allowed lowercased", async () => { const model = new ChatOpenAI({ temperature: 0, modelName: "gpt-4o-mini", }); const llmGraphTransformer = new LLMGraphTransformer({ llm: model, allowedNodes: ["Person", "Organization"], allowedRelationships: ["SUES"], }); const result = await llmGraphTransformer.convertToGraphDocuments([ new Document({ pageContent: "Elon Musk is suing OpenAI" }), ]); expect(result).toEqual([ new GraphDocument({ nodes: [ new Node({ id: "Elon Musk", type: "Person" }), new Node({ id: "OpenAI", type: "Organization" }), ], relationships: [ new Relationship({ source: new Node({ id: "Elon Musk", type: "Person" }), target: new Node({ id: "OpenAI", type: "Organization" }), type: "SUES", }), ], source: new Document({ pageContent: "Elon Musk is suing OpenAI", metadata: {}, }), }), ]); }); test("convertToGraphDocuments with node properties", async () => { const model = new ChatOpenAI({ temperature: 0, modelName: "gpt-4o-mini", }); const llmGraphTransformer = new LLMGraphTransformer({ llm: model, allowedNodes: ["Person"], allowedRelationships: ["KNOWS"], nodeProperties: ["age", "country"], }); const result = await llmGraphTransformer.convertToGraphDocuments([ new Document({ pageContent: "John is 30 years old and lives in Spain" }), ]); expect(result).toEqual([ new GraphDocument({ nodes: [ new Node({ id: "John", type: "Person", properties: { age: "30", country: "Spain", }, }), ], relationships: [], source: new Document({ pageContent: "John is 30 years old and lives in Spain", metadata: {}, }), }), ]); }); test("convertToGraphDocuments with relationship properties", async () => { const model = new ChatOpenAI({ temperature: 0, modelName: "gpt-4o-mini", }); const llmGraphTransformer = new LLMGraphTransformer({ llm: model, allowedNodes: ["Person"], allowedRelationships: ["KNOWS"], relationshipProperties: ["since"], }); const result = await llmGraphTransformer.convertToGraphDocuments([ new Document({ pageContent: "John has known Mary since 2020" }), ]); expect(result).toEqual([ new GraphDocument({ nodes: [ new Node({ id: "John", type: "Person" }), new Node({ id: "Mary", type: "Person" }), ], relationships: [ new Relationship({ source: new Node({ id: "John", type: "Person" }), target: new Node({ id: "Mary", type: "Person" }), type: "KNOWS", properties: { since: "2020", }, }), ], source: new Document({ pageContent: "John has known Mary since 2020", metadata: {}, }), }), ]); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/experimental
lc_public_repos/langchainjs/libs/langchain-community/src/experimental/multimodal_embeddings/googlevertexai.ts
import { GoogleAuth, GoogleAuthOptions } from "google-auth-library"; import { Embeddings, EmbeddingsParams } from "@langchain/core/embeddings"; import { AsyncCallerCallOptions } from "@langchain/core/utils/async_caller"; import { GoogleVertexAIBaseLLMInput, GoogleVertexAIBasePrediction, GoogleVertexAILLMPredictions, } from "../../types/googlevertexai-types.js"; import { GoogleVertexAILLMConnection, GoogleVertexAILLMResponse, } from "../../utils/googlevertexai-connection.js"; /** * Parameters for the GoogleVertexAIMultimodalEmbeddings class, extending * both EmbeddingsParams and GoogleVertexAIConnectionParams. */ export interface GoogleVertexAIMultimodalEmbeddingsParams extends EmbeddingsParams, GoogleVertexAIBaseLLMInput<GoogleAuthOptions> {} /** * Options for the GoogleVertexAIMultimodalEmbeddings class, extending * AsyncCallerCallOptions. */ interface GoogleVertexAIMultimodalEmbeddingsOptions extends AsyncCallerCallOptions {} /** * An instance of media (text or image) that can be used for generating * embeddings. */ interface GoogleVertexAIMultimodalEmbeddingsInstance { text?: string; image?: { bytesBase64Encoded: string; }; } /** * The results of generating embeddings, extending * GoogleVertexAIBasePrediction. It includes text and image embeddings. */ interface GoogleVertexAIMultimodalEmbeddingsResults extends GoogleVertexAIBasePrediction { textEmbedding?: number[]; imageEmbedding?: number[]; } /** * The media should have a text property, an image property, or both. */ export type GoogleVertexAIMedia = | { text: string; image?: Buffer; } | { text?: string; image: Buffer; }; export type MediaEmbeddings = { text?: number[]; image?: number[]; }; /** * Class for generating embeddings for text and images using Google's * Vertex AI. It extends the Embeddings base class and implements the * GoogleVertexAIMultimodalEmbeddingsParams interface. */ export class GoogleVertexAIMultimodalEmbeddings extends Embeddings implements GoogleVertexAIMultimodalEmbeddingsParams { model = "multimodalembedding@001"; private connection: GoogleVertexAILLMConnection< GoogleVertexAIMultimodalEmbeddingsOptions, GoogleVertexAIMultimodalEmbeddingsInstance, GoogleVertexAIMultimodalEmbeddingsResults, GoogleAuthOptions >; constructor(fields?: GoogleVertexAIMultimodalEmbeddingsParams) { super(fields ?? {}); this.model = fields?.model ?? this.model; this.connection = new GoogleVertexAILLMConnection( { ...fields, ...this }, this.caller, new GoogleAuth({ scopes: "https://www.googleapis.com/auth/cloud-platform", ...fields?.authOptions, }) ); } /** * Converts media (text or image) to an instance that can be used for * generating embeddings. * @param media The media (text or image) to be converted. * @returns An instance of media that can be used for generating embeddings. */ mediaToInstance( media: GoogleVertexAIMedia ): GoogleVertexAIMultimodalEmbeddingsInstance { const ret: GoogleVertexAIMultimodalEmbeddingsInstance = {}; if (media?.text) { ret.text = media.text; } if (media.image) { ret.image = { bytesBase64Encoded: media.image.toString("base64"), }; } return ret; } /** * Converts the response from Google Vertex AI to embeddings. * @param response The response from Google Vertex AI. * @returns An array of media embeddings. */ responseToEmbeddings( response: GoogleVertexAILLMResponse<GoogleVertexAIMultimodalEmbeddingsResults> ): MediaEmbeddings[] { return ( response?.data as GoogleVertexAILLMPredictions<GoogleVertexAIMultimodalEmbeddingsResults> ).predictions.map((r) => ({ text: r.textEmbedding, image: r.imageEmbedding, })); } /** * Generates embeddings for multiple media instances. * @param media An array of media instances. * @returns A promise that resolves to an array of media embeddings. */ async embedMedia(media: GoogleVertexAIMedia[]): Promise<MediaEmbeddings[]> { // Only one media embedding request is allowed return Promise.all(media.map((m) => this.embedMediaQuery(m))); } /** * Generates embeddings for a single media instance. * @param media A single media instance. * @returns A promise that resolves to a media embedding. */ async embedMediaQuery(media: GoogleVertexAIMedia): Promise<MediaEmbeddings> { const instance: GoogleVertexAIMultimodalEmbeddingsInstance = this.mediaToInstance(media); const instances = [instance]; const parameters = {}; const options = {}; const responses = await this.connection.request( instances, parameters, options ); const result = this.responseToEmbeddings(responses); return result[0]; } /** * Generates embeddings for multiple images. * @param images An array of images. * @returns A promise that resolves to an array of image embeddings. */ async embedImage(images: Buffer[]): Promise<number[][]> { return this.embedMedia(images.map((image) => ({ image }))).then( (embeddings) => embeddings.map((e) => e.image ?? []) ); } /** * Generates embeddings for a single image. * @param image A single image. * @returns A promise that resolves to an image embedding. */ async embedImageQuery(image: Buffer): Promise<number[]> { return this.embedMediaQuery({ image, }).then((embeddings) => embeddings.image ?? []); } /** * Generates embeddings for multiple text documents. * @param documents An array of text documents. * @returns A promise that resolves to an array of text document embeddings. */ async embedDocuments(documents: string[]): Promise<number[][]> { return this.embedMedia(documents.map((text) => ({ text }))).then( (embeddings) => embeddings.map((e) => e.text ?? []) ); } /** * Generates embeddings for a single text document. * @param document A single text document. * @returns A promise that resolves to a text document embedding. */ async embedQuery(document: string): Promise<number[]> { return this.embedMediaQuery({ text: document, }).then((embeddings) => embeddings.text ?? []); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/experimental/multimodal_embeddings
lc_public_repos/langchainjs/libs/langchain-community/src/experimental/multimodal_embeddings/tests/googlevertexai.test.ts
import { test, expect } from "@jest/globals"; import { GoogleVertexAIMultimodalEmbeddings, GoogleVertexAIMedia, } from "../googlevertexai.js"; test("mediaToInstance text", async () => { const e = new GoogleVertexAIMultimodalEmbeddings(); const media: GoogleVertexAIMedia = { text: "just text", }; const instance = e.mediaToInstance(media); expect(instance.text).toEqual("just text"); expect(instance.image).toBeUndefined(); }); test("mediaToInstance image", async () => { const e = new GoogleVertexAIMultimodalEmbeddings(); const media: GoogleVertexAIMedia = { image: Buffer.from("abcd"), }; const instance = e.mediaToInstance(media); expect(instance.image?.bytesBase64Encoded).toEqual("YWJjZA=="); expect(instance.text).toBeUndefined(); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/experimental/multimodal_embeddings
lc_public_repos/langchainjs/libs/langchain-community/src/experimental/multimodal_embeddings/tests/googlevertexai.int.test.ts
import fs from "fs"; import * as path from "node:path"; import { fileURLToPath } from "node:url"; import { test, expect } from "@jest/globals"; import { Document } from "@langchain/core/documents"; import { FaissStore } from "../../../vectorstores/faiss.js"; import { GoogleVertexAIMultimodalEmbeddings } from "../googlevertexai.js"; test.skip("embedding text", async () => { const e = new GoogleVertexAIMultimodalEmbeddings(); const vector: number[] = await e.embedQuery("test 1"); expect(vector).toHaveLength(1408); // console.log(vector); }); test.skip("embedding multiple texts", async () => { const e = new GoogleVertexAIMultimodalEmbeddings(); const docs = ["test 1", "test 2"]; const vector: number[][] = await e.embedDocuments(docs); expect(vector).toHaveLength(2); expect(vector[0]).toHaveLength(1408); expect(vector[1]).toHaveLength(1408); // console.log(vector); }); test.skip("embedding image", async () => { const e = new GoogleVertexAIMultimodalEmbeddings(); const pathname = path.join( path.dirname(fileURLToPath(import.meta.url)), "files", "parrot.jpeg" ); const img = fs.readFileSync(pathname); const vector: number[] = await e.embedImageQuery(img); expect(vector).toHaveLength(1408); // console.log(vector); }); test.skip("embedding image with text in a vector store", async () => { const e = new GoogleVertexAIMultimodalEmbeddings(); const vectorStore = await FaissStore.fromTexts( ["dog", "cat", "horse", "seagull"], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }], e ); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const resultOne = await vectorStore.similaritySearch("bird", 2); // console.log(resultOne); const pathname = path.join( path.dirname(fileURLToPath(import.meta.url)), "files", "parrot.jpeg" ); const img = fs.readFileSync(pathname); const vector: number[] = await e.embedImageQuery(img); const document = new Document({ pageContent: img.toString("base64"), metadata: { id: 5, mediaType: "image", }, }); await vectorStore.addVectors([vector], [document]); const pathname2 = path.join( path.dirname(fileURLToPath(import.meta.url)), "files", "parrot-icon.png" ); const img2 = fs.readFileSync(pathname2); const vector2: number[] = await e.embedImageQuery(img2); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const resultTwo = await vectorStore.similaritySearchVectorWithScore( vector2, 2 ); // console.log(resultTwo); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/experimental/callbacks
lc_public_repos/langchainjs/libs/langchain-community/src/experimental/callbacks/handlers/datadog.ts
import { BaseCallbackHandlerInput } from "@langchain/core/callbacks/base"; import { BaseTracer, Run } from "@langchain/core/tracers/base"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { Document } from "@langchain/core/documents"; import { BaseMessage, isAIMessage } from "@langchain/core/messages"; import { ChatGeneration } from "@langchain/core/outputs"; import { KVMap } from "langsmith/schemas"; export type DatadogLLMObsSpanKind = | "llm" | "workflow" | "agent" | "tool" | "task" | "embedding" | "retrieval"; export type DatadogLLMObsIO = | { value: string } | { documents: { text?: string; id?: string; name?: string; score: string | number; }[]; } | { messages: { content: string; role?: string }[] }; export interface DatadogLLMObsSpan { span_id: string; trace_id: string; parent_id: string; session_id?: string; name: string; start_ns: number; duration: number; error: number; status: string; tags?: string[]; meta: { kind: DatadogLLMObsSpanKind; model_name?: string; model_provider?: string; temperature?: string; input: DatadogLLMObsIO; output: DatadogLLMObsIO | undefined; }; metrics: { [key: string]: number }; } export interface DatadogLLMObsRequestBody { data: { type: "span"; attributes: { ml_app: string; tags: string[]; spans: DatadogLLMObsSpan[]; session_id?: string; }; }; } export type FormatDocument< // eslint-disable-next-line @typescript-eslint/no-explicit-any Metadata extends Record<string, any> = Record<string, any> > = (document: Document<Metadata>) => { text: string; id: string; name: string; score: number; }; export interface DatadogLLMObsTracerFields extends BaseCallbackHandlerInput { mlApp: string; userId?: string; userHandle?: string; sessionId?: string; env?: string; service?: string; tags?: Record<string, string | undefined>; ddApiKey?: string; ddLLMObsEndpoint?: string; formatDocument?: FormatDocument; } export class DatadogLLMObsTracer extends BaseTracer implements DatadogLLMObsTracerFields { name = "datadog_tracer"; ddLLMObsEndpoint?: string; protected endpoint = getEnvironmentVariable("DD_LLMOBS_ENDPOINT") || "https://api.datadoghq.com/api/unstable/llm-obs/v1/trace/spans"; protected headers: Record<string, string> = { "Content-Type": "application/json", }; mlApp: string; sessionId?: string; tags: Record<string, string | undefined> = {}; formatDocument?: FormatDocument; constructor(fields: DatadogLLMObsTracerFields) { super(fields); const { mlApp, userHandle, userId, sessionId, service, env, tags, ddLLMObsEndpoint, ddApiKey, formatDocument, } = fields; const apiKey = ddApiKey || getEnvironmentVariable("DD_API_KEY"); if (apiKey) { this.headers["DD-API-KEY"] = apiKey; } this.mlApp = mlApp; this.sessionId = sessionId; this.ddLLMObsEndpoint = ddLLMObsEndpoint; this.formatDocument = formatDocument; this.tags = { ...tags, env: env || "not-set", service: service || "not-set", user_handle: userHandle, user_id: userId, }; } protected async persistRun(_run: Run): Promise<void> { try { const spans = this.convertRunToDDSpans(_run); const response = await fetch(this.ddLLMObsEndpoint || this.endpoint, { method: "POST", headers: this.headers, body: JSON.stringify(this.formatRequestBody(spans)), }); if (!response.ok) { const error = await response.text(); throw new Error(error); } } catch (error) { console.error(`Error writing spans to Datadog: ${error}`); } } protected convertRunToDDSpans(run: Run): DatadogLLMObsSpan[] { const spans = [this.langchainRunToDatadogLLMObsSpan(run)]; if (run.child_runs) { run.child_runs.forEach((childRun) => { spans.push(...this.convertRunToDDSpans(childRun)); }); } return spans.flatMap((span) => (span ? [span] : [])); } protected formatRequestBody( spans: DatadogLLMObsSpan[] ): DatadogLLMObsRequestBody { return { data: { type: "span", attributes: { ml_app: this.mlApp, tags: Object.entries(this.tags) .filter(([, value]) => value) .map(([key, value]) => `${key}:${value}`), spans, session_id: this.sessionId, }, }, }; } protected uuidToBigInt(uuid: string): string { const hexString = uuid.replace(/-/g, ""); const first64Bits = hexString.slice(0, 16); const bigIntValue = BigInt("0x" + first64Bits).toString(); return bigIntValue; } protected milisecondsToNanoseconds(ms: number): number { return ms * 1e6; } protected toDatadogSpanKind(kind: string): DatadogLLMObsSpanKind | null { switch (kind) { case "llm": return "llm"; case "tool": return "tool"; case "chain": return "workflow"; case "retriever": return "retrieval"; default: return null; } } protected transformInput( inputs: KVMap, spanKind: DatadogLLMObsSpanKind ): DatadogLLMObsIO { if (spanKind === "llm") { if (inputs?.messages) { return { messages: inputs?.messages?.flatMap((messages: BaseMessage[]) => messages.map((message) => ({ content: message.content, role: message?._getType?.() ?? undefined, })) ), }; } if (inputs?.prompts) { return { value: inputs.prompts.join("\n") }; } } return { value: JSON.stringify(inputs) }; } protected transformOutput( outputs: KVMap | undefined, spanKind: DatadogLLMObsSpanKind ): { output: DatadogLLMObsIO | undefined; tokensMetadata: Record<string, number>; } { const tokensMetadata: Record<string, number> = {}; if (!outputs) { return { output: undefined, tokensMetadata }; } if (spanKind === "llm") { return { output: { messages: outputs?.generations?.flatMap( (generations: ChatGeneration[]) => generations.map(({ message, text }) => { if (isAIMessage(message) && message?.usage_metadata) { tokensMetadata.prompt_tokens = message.usage_metadata.input_tokens; tokensMetadata.completion_tokens = message.usage_metadata.output_tokens; tokensMetadata.total_tokens = message.usage_metadata.total_tokens; } return { content: message?.content ?? text, role: message?._getType?.(), }; }) ), }, tokensMetadata, }; } if (spanKind === "retrieval") { return { output: { documents: outputs?.documents.map((document: Document) => { if (typeof this.formatDocument === "function") { return this.formatDocument(document); } return { text: document.pageContent, id: document.metadata?.id, name: document.metadata?.name, score: document.metadata?.score, }; }), }, tokensMetadata, }; } if (outputs?.output) { return { output: { value: JSON.stringify(outputs.output) }, tokensMetadata, }; } return { output: { value: JSON.stringify(outputs) }, tokensMetadata }; } protected langchainRunToDatadogLLMObsSpan( run: Run ): DatadogLLMObsSpan | null { if (!run.end_time || !run.trace_id) { return null; } const spanId = this.uuidToBigInt(run.id); const traceId = this.uuidToBigInt(run.trace_id); const parentId = run.parent_run_id ? this.uuidToBigInt(run.parent_run_id) : "undefined"; const spanKind = this.toDatadogSpanKind(run.run_type); if (spanKind === null) { return null; } const input = this.transformInput(run.inputs, spanKind); const { output, tokensMetadata } = this.transformOutput( run.outputs, spanKind ); const startTimeNs = Number(this.milisecondsToNanoseconds(run.start_time)); const endTimeNs = Number(this.milisecondsToNanoseconds(run.end_time)); const durationNs = endTimeNs - startTimeNs; if (durationNs <= 0) { return null; } const spanName = (run.serialized as { kwargs: { name?: string } })?.kwargs?.name ?? run.name; const spanError = run.error ? 1 : 0; const spanStatus = run.error ? "error" : "ok"; const meta = { kind: spanKind, input, output, model_name: run.extra?.metadata?.ls_model_name, model_provider: run.extra?.metadata?.ls_provider, temperature: run.extra?.metadata?.ls_temperature, }; return { parent_id: parentId, trace_id: traceId, span_id: spanId, name: spanName, error: spanError, status: spanStatus, tags: [...(run.tags?.length ? run.tags : [])], meta, start_ns: startTimeNs, duration: durationNs, metrics: tokensMetadata, }; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/experimental/callbacks/handlers
lc_public_repos/langchainjs/libs/langchain-community/src/experimental/callbacks/handlers/tests/datadog.test.ts
import { test, jest, expect } from "@jest/globals"; import * as uuid from "uuid"; import { Run } from "@langchain/core/tracers/base"; import { HumanMessage, AIMessage } from "@langchain/core/messages"; import { DatadogLLMObsRequestBody, DatadogLLMObsSpan, DatadogLLMObsTracer, } from "../datadog.js"; const _DATE = 1620000000000; const _END_DATE = _DATE + 1000; Date.now = jest.fn(() => _DATE); const BASE_URL = "http://datadog-endpoint"; class FakeDatadogLLMObsTracer extends DatadogLLMObsTracer { public persistRun(_run: Run) { return super.persistRun(_run); } public uuidToBigInt(uuid: string) { return super.uuidToBigInt(uuid); } public milisecondsToNanoseconds(ms: number) { return super.milisecondsToNanoseconds(ms); } } beforeEach(() => { const oldFetch = global.fetch; // eslint-disable-next-line @typescript-eslint/no-explicit-any global.fetch = jest.fn().mockImplementation(async (url: any, init?: any) => { if (!url.startsWith(BASE_URL)) return await oldFetch(url, init); const resp: Response = new Response(); return resp; // eslint-disable-next-line @typescript-eslint/no-explicit-any }) as any; }); afterEach(() => { jest.restoreAllMocks(); }); const runId = uuid.v4(); const traceId = uuid.v4(); const baseRun = { id: runId, trace_id: traceId, parent_run_id: undefined, name: "test", start_time: _DATE, end_time: _END_DATE, execution_order: 1, child_execution_order: 0, child_runs: [], extra: {}, tags: [], events: [], }; const createBaseSpan = (tracer: FakeDatadogLLMObsTracer) => ({ span_id: tracer.uuidToBigInt(runId), trace_id: tracer.uuidToBigInt(traceId), parent_id: "undefined", name: "test", start_ns: tracer.milisecondsToNanoseconds(_DATE), duration: tracer.milisecondsToNanoseconds(_END_DATE - _DATE), error: 0, status: "ok", metrics: {}, }); const tracerConfig = { mlApp: "test", userHandle: "test", userId: "test", sessionId: "test", service: "test", env: "test", tags: {}, ddLLMObsEndpoint: BASE_URL, }; test("Test llm span with message input", async () => { const tracer = new FakeDatadogLLMObsTracer(tracerConfig); const run: Run = { ...baseRun, run_type: "llm", inputs: { messages: [[new HumanMessage("test")]], }, outputs: { generations: [ [ { message: new AIMessage("test"), }, ], ], }, }; const compareSpan: DatadogLLMObsSpan = { ...createBaseSpan(tracer), meta: { kind: "llm", input: { messages: [{ content: "test", role: "human" }], }, output: { messages: [{ content: "test", role: "ai" }], }, }, }; const requestBody: DatadogLLMObsRequestBody = { data: { type: "span", attributes: { ml_app: "test", tags: ["env:test", "service:test", "user_handle:test", "user_id:test"], spans: [compareSpan], session_id: "test", }, }, }; await tracer.persistRun(run); expect(fetch).toBeCalledWith(expect.any(String), { body: expect.any(String), headers: expect.any(Object), method: "POST", }); const { body } = (fetch as jest.Mock).mock.calls[0][1] as { body: string }; const parsedBody = JSON.parse(body) as DatadogLLMObsRequestBody; expect(parsedBody).toMatchObject( requestBody as unknown as Record<string, unknown> ); }); test("Test llm span with prompt input", async () => { const tracer = new FakeDatadogLLMObsTracer(tracerConfig); const run: Run = { ...baseRun, run_type: "llm", inputs: { prompts: ["Hello", "World"], }, outputs: { generations: [ [ { message: new AIMessage("Hi"), }, ], ], }, }; const compareSpan: DatadogLLMObsSpan = { ...createBaseSpan(tracer), meta: { kind: "llm", input: { value: "Hello\nWorld", }, output: { messages: [{ content: "Hi" }], }, }, }; const requestBody: DatadogLLMObsRequestBody = { data: { type: "span", attributes: { ml_app: "test", tags: ["env:test", "service:test", "user_handle:test", "user_id:test"], spans: [compareSpan], session_id: "test", }, }, }; await tracer.persistRun(run); expect(fetch).toBeCalledWith(expect.any(String), { body: expect.any(String), headers: expect.any(Object), method: "POST", }); const { body } = (fetch as jest.Mock).mock.calls[0][1] as { body: string }; const parsedBody = JSON.parse(body) as DatadogLLMObsRequestBody; expect(parsedBody).toMatchObject( requestBody as unknown as Record<string, unknown> ); }); test("Test workflow span", async () => { const tracer = new FakeDatadogLLMObsTracer(tracerConfig); const run: Run = { ...baseRun, run_type: "chain", inputs: { question: "test", }, outputs: { output: "test", }, tags: ["seq:test"], }; const compareSpan: DatadogLLMObsSpan = { ...createBaseSpan(tracer), meta: { kind: "workflow", input: { value: JSON.stringify(run.inputs), }, output: { value: JSON.stringify(run.outputs?.output), }, }, tags: run.tags, }; const requestBody: DatadogLLMObsRequestBody = { data: { type: "span", attributes: { ml_app: "test", tags: ["env:test", "service:test", "user_handle:test", "user_id:test"], spans: [compareSpan], session_id: "test", }, }, }; await tracer.persistRun(run); expect(fetch).toBeCalledWith(expect.any(String), { body: expect.any(String), headers: expect.any(Object), method: "POST", }); const { body } = (fetch as jest.Mock).mock.calls[0][1] as { body: string }; const parsedBody = JSON.parse(body) as DatadogLLMObsRequestBody; expect(parsedBody).toMatchObject( requestBody as unknown as Record<string, unknown> ); }); test("Test tool span", async () => { const tracer = new FakeDatadogLLMObsTracer(tracerConfig); const run: Run = { ...baseRun, run_type: "tool", inputs: { input: { query: "test" }, }, outputs: { output: "test", }, }; const compareSpan: DatadogLLMObsSpan = { ...createBaseSpan(tracer), meta: { kind: "tool", input: { value: JSON.stringify(run.inputs), }, output: { value: JSON.stringify(run.outputs?.output), }, }, }; const requestBody: DatadogLLMObsRequestBody = { data: { type: "span", attributes: { ml_app: "test", tags: ["env:test", "service:test", "user_handle:test", "user_id:test"], spans: [compareSpan], session_id: "test", }, }, }; await tracer.persistRun(run); expect(fetch).toBeCalledWith(expect.any(String), { body: expect.any(String), headers: expect.any(Object), method: "POST", }); const { body } = (fetch as jest.Mock).mock.calls[0][1] as { body: string }; const parsedBody = JSON.parse(body) as DatadogLLMObsRequestBody; expect(parsedBody).toMatchObject( requestBody as unknown as Record<string, unknown> ); }); test("Test retrieval span", async () => { const tracer = new FakeDatadogLLMObsTracer(tracerConfig); const run: Run = { ...baseRun, run_type: "retriever", inputs: { input: { query: "test" }, }, outputs: { documents: [ { pageContent: "test", metadata: { id: "1", name: "test", score: 0.1 }, }, ], }, }; const compareSpan: DatadogLLMObsSpan = { ...createBaseSpan(tracer), meta: { kind: "retrieval", input: { value: JSON.stringify(run.inputs), }, output: { documents: [ { text: "test", id: "1", name: "test", score: 0.1, }, ], }, }, }; const requestBody: DatadogLLMObsRequestBody = { data: { type: "span", attributes: { ml_app: "test", tags: ["env:test", "service:test", "user_handle:test", "user_id:test"], spans: [compareSpan], session_id: "test", }, }, }; await tracer.persistRun(run); expect(fetch).toBeCalledWith(expect.any(String), { body: expect.any(String), headers: expect.any(Object), method: "POST", }); const { body } = (fetch as jest.Mock).mock.calls[0][1] as { body: string }; const parsedBody = JSON.parse(body) as DatadogLLMObsRequestBody; expect(parsedBody).toMatchObject( requestBody as unknown as Record<string, unknown> ); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/experimental
lc_public_repos/langchainjs/libs/langchain-community/src/experimental/chat_models/ollama_functions.ts
import { AIMessage, BaseMessage } from "@langchain/core/messages"; import { ChatResult } from "@langchain/core/outputs"; import { BaseChatModel, BaseChatModelParams, } from "@langchain/core/language_models/chat_models"; import { SystemMessagePromptTemplate } from "@langchain/core/prompts"; import { BaseFunctionCallOptions } from "@langchain/core/language_models/base"; import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager"; import { type ChatOllamaInput, ChatOllama } from "../../chat_models/ollama.js"; const DEFAULT_TOOL_SYSTEM_TEMPLATE = `You have access to the following tools: {tools} You must always select one of the above tools and respond with only a JSON object matching the following schema: {{ "tool": <name of the selected tool>, "tool_input": <parameters for the selected tool, matching the tool's JSON schema> }}`; /** * @deprecated Deprecated in favor of the `@langchain/ollama` package. Import `ChatOllama` from `@langchain/ollama` instead. */ export interface ChatOllamaFunctionsCallOptions extends BaseFunctionCallOptions {} /** * @deprecated Deprecated in favor of the `@langchain/ollama` package. Import `ChatOllama` from `@langchain/ollama` instead. */ export type OllamaFunctionsInput = Partial<ChatOllamaInput> & BaseChatModelParams & { llm?: ChatOllama; toolSystemPromptTemplate?: string; }; /** * @deprecated Deprecated in favor of the `@langchain/ollama` package. Import `ChatOllama` from `@langchain/ollama` instead. */ export class OllamaFunctions extends BaseChatModel<ChatOllamaFunctionsCallOptions> { llm: ChatOllama; toolSystemPromptTemplate: string = DEFAULT_TOOL_SYSTEM_TEMPLATE; protected defaultResponseFunction = { name: "__conversational_response", description: "Respond conversationally if no other tools should be called for a given query.", parameters: { type: "object", properties: { response: { type: "string", description: "Conversational response to the user.", }, }, required: ["response"], }, }; lc_namespace = ["langchain", "experimental", "chat_models"]; static lc_name(): string { return "OllamaFunctions"; } constructor(fields?: OllamaFunctionsInput) { super(fields ?? {}); this.llm = fields?.llm ?? new ChatOllama({ ...fields, format: "json" }); this.toolSystemPromptTemplate = fields?.toolSystemPromptTemplate ?? this.toolSystemPromptTemplate; } invocationParams() { return this.llm.invocationParams(); } /** @ignore */ _identifyingParams() { return this.llm._identifyingParams(); } async _generate( messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun | undefined ): Promise<ChatResult> { let functions = options.functions ?? []; if (options.function_call !== undefined) { functions = functions.filter( (fn) => fn.name === options.function_call?.name ); if (!functions.length) { throw new Error( `If "function_call" is specified, you must also pass a matching function in "functions".` ); } } else if (functions.length === 0) { functions.push(this.defaultResponseFunction); } const systemPromptTemplate = SystemMessagePromptTemplate.fromTemplate( this.toolSystemPromptTemplate ); const systemMessage = await systemPromptTemplate.format({ tools: JSON.stringify(functions, null, 2), }); const chatResult = await this.llm._generate( [systemMessage, ...messages], options, runManager ); const chatGenerationContent = chatResult.generations[0].message.content; if (typeof chatGenerationContent !== "string") { throw new Error("OllamaFunctions does not support non-string output."); } let parsedChatResult; try { parsedChatResult = JSON.parse(chatGenerationContent); } catch (e) { throw new Error( `"${this.llm.model}" did not respond with valid JSON. Please try again.` ); } const calledToolName = parsedChatResult.tool; const calledToolArguments = parsedChatResult.tool_input; const calledTool = functions.find((fn) => fn.name === calledToolName); if (calledTool === undefined) { throw new Error( `Failed to parse a function call from ${this.llm.model} output: ${chatGenerationContent}` ); } if (calledTool.name === this.defaultResponseFunction.name) { return { generations: [ { message: new AIMessage({ content: calledToolArguments.response, }), text: calledToolArguments.response, }, ], }; } const responseMessageWithFunctions = new AIMessage({ content: "", additional_kwargs: { function_call: { name: calledToolName, arguments: calledToolArguments ? JSON.stringify(calledToolArguments) : "", }, }, }); return { generations: [{ message: responseMessageWithFunctions, text: "" }], }; } _llmType(): string { return "ollama_functions"; } /** @ignore */ _combineLLMOutput() { return []; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/experimental/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/experimental/chat_models/tests/ollama_functions.int.test.ts
/* eslint-disable no-process-env */ /* eslint-disable @typescript-eslint/no-non-null-assertion */ import { test } from "@jest/globals"; import { HumanMessage } from "@langchain/core/messages"; import { OllamaFunctions } from "../ollama_functions.js"; test.skip("Test OllamaFunctions", async () => { const chat = new OllamaFunctions({ model: "mistral" }); const message = new HumanMessage("Hello!"); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chat.invoke([message]); // console.log(JSON.stringify(res)); }); test.skip("Test OllamaFunctions with functions", async () => { const chat = new OllamaFunctions({ model: "mistral", temperature: 0.1, }).bind({ functions: [ { name: "get_current_weather", description: "Get the current weather in a given location", parameters: { type: "object", properties: { location: { type: "string", description: "The city and state, e.g. San Francisco, CA", }, unit: { type: "string", enum: ["celsius", "fahrenheit"], }, }, required: ["location"], }, }, ], }); const message = new HumanMessage("What is the weather in San Francisco?"); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chat.invoke([message]); // console.log(JSON.stringify(res)); }); test.skip("Test OllamaFunctions with a forced function call", async () => { const chat = new OllamaFunctions({ model: "mistral", temperature: 0.1, }).bind({ functions: [ { name: "extract_data", description: "Return information about the input", parameters: { type: "object", properties: { sentiment: { type: "string", description: "Whether the input is positive or negative", }, aggressiveness: { type: "integer", description: "How aggressive the input is from 1 to 10", }, language: { type: "string", description: "The language the input is in", }, }, required: ["sentiment", "aggressiveness"], }, }, ], function_call: { name: "extract_data" }, }); const message = new HumanMessage( "Extract the desired information from the following passage:\n\nthis is really cool" ); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chat.invoke([message]); // console.log(JSON.stringify(res)); });
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers/zep.ts
import { MemorySearchPayload, MemorySearchResult, NotFoundError, ZepClient, } from "@getzep/zep-js"; import { BaseRetriever, BaseRetrieverInput } from "@langchain/core/retrievers"; import { Document } from "@langchain/core/documents"; /** * Configuration interface for the ZepRetriever class. Extends the * BaseRetrieverInput interface. * * @argument {string} sessionId - The ID of the Zep session. * @argument {string} url - The URL of the Zep API. * @argument {number} [topK] - The number of results to return. * @argument {string} [apiKey] - The API key for the Zep API. * @argument [searchScope] [searchScope] - The scope of the search: "messages" or "summary". * @argument [searchType] [searchType] - The type of search to perform: "similarity" or "mmr". * @argument {number} [mmrLambda] - The lambda value for the MMR search. * @argument {Record<string, unknown>} [filter] - The metadata filter to apply to the search. */ export interface ZepRetrieverConfig extends BaseRetrieverInput { sessionId: string; url: string; topK?: number; apiKey?: string; searchScope?: "messages" | "summary"; searchType?: "similarity" | "mmr"; mmrLambda?: number; filter?: Record<string, unknown>; } /** * Class for retrieving information from a Zep long-term memory store. * Extends the BaseRetriever class. * @example * ```typescript * const retriever = new ZepRetriever({ * url: "http: * sessionId: "session_exampleUUID", * topK: 3, * }); * const query = "Can I drive red cars in France?"; * const docs = await retriever.getRelevantDocuments(query); * ``` */ export class ZepRetriever extends BaseRetriever { static lc_name() { return "ZepRetriever"; } lc_namespace = ["langchain", "retrievers", "zep"]; get lc_secrets(): { [key: string]: string } | undefined { return { apiKey: "ZEP_API_KEY", url: "ZEP_API_URL", }; } get lc_aliases(): { [key: string]: string } | undefined { return { apiKey: "api_key" }; } zepClientPromise: Promise<ZepClient>; private sessionId: string; private topK?: number; private searchScope?: "messages" | "summary"; private searchType?: "similarity" | "mmr"; private mmrLambda?: number; private filter?: Record<string, unknown>; constructor(config: ZepRetrieverConfig) { super(config); this.sessionId = config.sessionId; this.topK = config.topK; this.searchScope = config.searchScope; this.searchType = config.searchType; this.mmrLambda = config.mmrLambda; this.filter = config.filter; this.zepClientPromise = ZepClient.init(config.url, config.apiKey); } /** * Converts an array of message search results to an array of Document objects. * @param {MemorySearchResult[]} results - The array of search results. * @returns {Document[]} An array of Document objects representing the search results. */ private searchMessageResultToDoc(results: MemorySearchResult[]): Document[] { return results .filter((r) => r.message) .map( ({ message: { content, metadata: messageMetadata } = {}, dist, ...rest }) => new Document({ pageContent: content ?? "", metadata: { score: dist, ...messageMetadata, ...rest }, }) ); } /** * Converts an array of summary search results to an array of Document objects. * @param {MemorySearchResult[]} results - The array of search results. * @returns {Document[]} An array of Document objects representing the search results. */ private searchSummaryResultToDoc(results: MemorySearchResult[]): Document[] { return results .filter((r) => r.summary) .map( ({ summary: { content, metadata: summaryMetadata } = {}, dist, ...rest }) => new Document({ pageContent: content ?? "", metadata: { score: dist, ...summaryMetadata, ...rest }, }) ); } /** * Retrieves the relevant documents based on the given query. * @param {string} query - The query string. * @returns {Promise<Document[]>} A promise that resolves to an array of relevant Document objects. */ async _getRelevantDocuments(query: string): Promise<Document[]> { const payload: MemorySearchPayload = { text: query, metadata: this.filter, search_scope: this.searchScope, search_type: this.searchType, mmr_lambda: this.mmrLambda, }; // Wait for ZepClient to be initialized const zepClient = await this.zepClientPromise; if (!zepClient) { throw new Error("ZepClient is not initialized"); } try { const results: MemorySearchResult[] = await zepClient.memory.searchMemory( this.sessionId, payload, this.topK ); return this.searchScope === "summary" ? this.searchSummaryResultToDoc(results) : this.searchMessageResultToDoc(results); } catch (error) { // eslint-disable-next-line no-instanceof/no-instanceof if (error instanceof NotFoundError) { return Promise.resolve([]); // Return an empty Document array } // If it's not a NotFoundError, throw the error again throw error; } } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers/chaindesk.ts
import { BaseRetriever, type BaseRetrieverInput, } from "@langchain/core/retrievers"; import { Document } from "@langchain/core/documents"; import { AsyncCaller, type AsyncCallerParams, } from "@langchain/core/utils/async_caller"; export interface ChaindeskRetrieverArgs extends AsyncCallerParams, BaseRetrieverInput { datastoreId: string; topK?: number; filter?: Record<string, unknown>; apiKey?: string; } interface Berry { text: string; score: number; source?: string; [key: string]: unknown; } /** * @example * ```typescript * const retriever = new ChaindeskRetriever({ * datastoreId: "DATASTORE_ID", * apiKey: "CHAINDESK_API_KEY", * topK: 8, * }); * const docs = await retriever.getRelevantDocuments("hello"); * ``` */ export class ChaindeskRetriever extends BaseRetriever { static lc_name() { return "ChaindeskRetriever"; } lc_namespace = ["langchain", "retrievers", "chaindesk"]; caller: AsyncCaller; datastoreId: string; topK?: number; filter?: Record<string, unknown>; apiKey?: string; constructor({ datastoreId, apiKey, topK, filter, ...rest }: ChaindeskRetrieverArgs) { super(); this.caller = new AsyncCaller(rest); this.datastoreId = datastoreId; this.apiKey = apiKey; this.topK = topK; this.filter = filter; } async getRelevantDocuments(query: string): Promise<Document[]> { const r = await this.caller.call( fetch, `https://app.chaindesk.ai/api/datastores/${this.datastoreId}/query`, { method: "POST", body: JSON.stringify({ query, ...(this.topK ? { topK: this.topK } : {}), ...(this.filter ? { filters: this.filter } : {}), }), headers: { "Content-Type": "application/json", ...(this.apiKey ? { Authorization: `Bearer ${this.apiKey}` } : {}), }, } ); const { results } = (await r.json()) as { results: Berry[] }; return results.map( ({ text, score, source, ...rest }) => new Document({ pageContent: text, metadata: { score, source, ...rest, }, }) ); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers/zep_cloud.ts
import { ZepClient } from "@getzep/zep-cloud"; import { SearchScope, SearchType, MemorySearchResult, NotFoundError, } from "@getzep/zep-cloud/api"; import { BaseRetriever, BaseRetrieverInput } from "@langchain/core/retrievers"; import { Document } from "@langchain/core/documents"; /** * Configuration interface for the ZepRetriever class. Extends the * BaseRetrieverInput interface. * * @argument {string} sessionId - The ID of the Zep session. * @argument {string} [apiKey] - The Zep Cloud Project Key. * @argument {number} [topK] - The number of results to return. * @argument [searchScope] [searchScope] - The scope of the search: "messages" or "summary". * @argument [searchType] [searchType] - The type of search to perform: "similarity" or "mmr". * @argument {number} [mmrLambda] - The lambda value for the MMR search. * @argument {Record<string, unknown>} [filter] - The metadata filter to apply to the search. */ export interface ZepCloudRetrieverConfig extends BaseRetrieverInput { sessionId: string; topK?: number; apiKey: string; searchScope?: SearchScope; searchType?: SearchType; mmrLambda?: number; filter?: Record<string, unknown>; } /** * Class for retrieving information from a Zep Cloud long-term memory store. * Extends the BaseRetriever class. * @example * ```typescript * const retriever = new ZepCloudRetriever({ * apiKey: "<zep cloud project api key>", * sessionId: "session_exampleUUID", * topK: 3, * }); * const query = "Can I drive red cars in France?"; * const docs = await retriever.getRelevantDocuments(query); * ``` */ export class ZepCloudRetriever extends BaseRetriever { static lc_name() { return "ZepRetriever"; } lc_namespace = ["langchain", "retrievers", "zep"]; get lc_secrets(): { [key: string]: string } | undefined { return { apiKey: "ZEP_API_KEY", }; } get lc_aliases(): { [key: string]: string } | undefined { return { apiKey: "api_key" }; } client: ZepClient; private sessionId: string; private topK?: number; private searchScope?: SearchScope; private searchType?: SearchType; private mmrLambda?: number; private filter?: Record<string, unknown>; constructor(config: ZepCloudRetrieverConfig) { super(config); this.sessionId = config.sessionId; this.topK = config.topK; this.searchScope = config.searchScope; this.searchType = config.searchType; this.mmrLambda = config.mmrLambda; this.filter = config.filter; this.client = new ZepClient({ apiKey: config.apiKey }); } /** * Converts an array of message search results to an array of Document objects. * @param {MemorySearchResult[]} results - The array of search results. * @returns {Document[]} An array of Document objects representing the search results. */ private searchMessageResultToDoc(results: MemorySearchResult[]): Document[] { return results .filter((r) => r.message) .map( ({ message: { content, metadata: messageMetadata } = {}, score, ...rest }) => new Document({ pageContent: content ?? "", metadata: { score, ...messageMetadata, ...rest }, }) ); } /** * Converts an array of summary search results to an array of Document objects. * @param {MemorySearchResult[]} results - The array of search results. * @returns {Document[]} An array of Document objects representing the search results. */ private searchSummaryResultToDoc(results: MemorySearchResult[]): Document[] { return results .filter((r) => r.summary) .map( ({ summary: { content, metadata: summaryMetadata } = {}, score, ...rest }) => new Document({ pageContent: content ?? "", metadata: { score, ...summaryMetadata, ...rest }, }) ); } /** * Retrieves the relevant documents based on the given query. * @param {string} query - The query string. * @returns {Promise<Document[]>} A promise that resolves to an array of relevant Document objects. */ async _getRelevantDocuments(query: string): Promise<Document[]> { try { const results: MemorySearchResult[] = await this.client.memory.search( this.sessionId, { text: query, metadata: this.filter, searchScope: this.searchScope, searchType: this.searchType, mmrLambda: this.mmrLambda, limit: this.topK, } ); return this.searchScope === "summary" ? this.searchSummaryResultToDoc(results) : this.searchMessageResultToDoc(results); } catch (error) { // eslint-disable-next-line no-instanceof/no-instanceof if (error instanceof NotFoundError) { return Promise.resolve([]); // Return an empty Document array } // If it's not a NotFoundError, throw the error again throw error; } } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers/bm25.ts
import { BaseRetriever, BaseRetrieverInput } from "@langchain/core/retrievers"; import { Document } from "@langchain/core/documents"; import { BM25 } from "../utils/@furkantoprak/bm25/BM25.js"; export type BM25RetrieverOptions = { docs: Document[]; k: number; includeScore?: boolean; } & BaseRetrieverInput; /** * A retriever that uses the BM25 algorithm to rank documents based on their * similarity to a query. It uses the "okapibm25" package for BM25 scoring. * The k parameter determines the number of documents to return for each query. */ export class BM25Retriever extends BaseRetriever { includeScore = false; static lc_name() { return "BM25Retriever"; } lc_namespace = ["langchain", "retrievers", "bm25_retriever"]; static fromDocuments( documents: Document[], options: Omit<BM25RetrieverOptions, "docs"> ) { return new this({ ...options, docs: documents }); } docs: Document[]; k: number; constructor(options: BM25RetrieverOptions) { super(options); this.docs = options.docs; this.k = options.k; this.includeScore = options.includeScore ?? this.includeScore; } private preprocessFunc(text: string): string[] { return text.toLowerCase().split(/\s+/); } async _getRelevantDocuments(query: string) { const processedQuery = this.preprocessFunc(query); const documents = this.docs.map((doc) => doc.pageContent); const scores = BM25(documents, processedQuery) as number[]; const scoredDocs = this.docs.map((doc, index) => ({ document: doc, score: scores[index], })); scoredDocs.sort((a, b) => b.score - a.score); return scoredDocs.slice(0, this.k).map((item) => { if (this.includeScore) { return new Document({ ...(item.document.id && { id: item.document.id }), pageContent: item.document.pageContent, metadata: { bm25Score: item.score, ...item.document.metadata, }, }); } else { return item.document; } }); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers/amazon_kendra.ts
import { AttributeFilter, DocumentAttribute, DocumentAttributeValue, KendraClient, KendraClientConfig, QueryCommand, QueryCommandOutput, QueryResultItem, RetrieveCommand, RetrieveCommandOutput, RetrieveResultItem, } from "@aws-sdk/client-kendra"; import { BaseRetriever } from "@langchain/core/retrievers"; import { Document } from "@langchain/core/documents"; /** * @deprecated The AmazonKendraRetriever integration has been moved to the `@langchain/aws` package. Import from `@langchain/aws` instead. * * Interface for the arguments required to initialize an * AmazonKendraRetriever instance. */ export interface AmazonKendraRetrieverArgs { indexId: string; topK: number; region: string; attributeFilter?: AttributeFilter; clientOptions?: KendraClientConfig; } /** * @deprecated The AmazonKendraRetriever integration has been moved to the `@langchain/aws` package. Import from `@langchain/aws` instead. * * Class for interacting with Amazon Kendra, an intelligent search service * provided by AWS. Extends the BaseRetriever class. * @example * ```typescript * const retriever = new AmazonKendraRetriever({ * topK: 10, * indexId: "YOUR_INDEX_ID", * region: "us-east-2", * clientOptions: { * credentials: { * accessKeyId: "YOUR_ACCESS_KEY_ID", * secretAccessKey: "YOUR_SECRET_ACCESS_KEY", * }, * }, * }); * * const docs = await retriever.getRelevantDocuments("How are clouds formed?"); * ``` */ export class AmazonKendraRetriever extends BaseRetriever { static lc_name() { return "AmazonKendraRetriever"; } lc_namespace = ["langchain", "retrievers", "amazon_kendra"]; indexId: string; topK: number; kendraClient: KendraClient; attributeFilter?: AttributeFilter; constructor({ indexId, topK = 10, clientOptions, attributeFilter, region, }: AmazonKendraRetrieverArgs) { super(); if (!region) { throw new Error("Please pass regionName field to the constructor!"); } if (!indexId) { throw new Error("Please pass Kendra Index Id to the constructor"); } this.topK = topK; this.kendraClient = new KendraClient({ region, ...clientOptions, }); this.attributeFilter = attributeFilter; this.indexId = indexId; } // A method to combine title and excerpt into a single string. /** * Combines title and excerpt into a single string. * @param title The title of the document. * @param excerpt An excerpt from the document. * @returns A single string combining the title and excerpt. */ combineText(title?: string, excerpt?: string): string { let text = ""; if (title) { text += `Document Title: ${title}\n`; } if (excerpt) { text += `Document Excerpt: \n${excerpt}\n`; } return text; } // A method to clean the result text by replacing sequences of whitespace with a single space and removing ellipses. /** * Cleans the result text by replacing sequences of whitespace with a * single space and removing ellipses. * @param resText The result text to clean. * @returns The cleaned result text. */ cleanResult(resText: string) { const res = resText.replace(/\s+/g, " ").replace(/\.\.\./g, ""); return res; } // A method to extract the attribute value from a DocumentAttributeValue object. /** * Extracts the attribute value from a DocumentAttributeValue object. * @param value The DocumentAttributeValue object to extract the value from. * @returns The extracted attribute value. */ getDocAttributeValue(value: DocumentAttributeValue) { if (value.DateValue) { return value.DateValue; } if (value.LongValue) { return value.LongValue; } if (value.StringListValue) { return value.StringListValue; } if (value.StringValue) { return value.StringValue; } return ""; } // A method to extract the attribute key-value pairs from an array of DocumentAttribute objects. /** * Extracts the attribute key-value pairs from an array of * DocumentAttribute objects. * @param documentAttributes The array of DocumentAttribute objects to extract the key-value pairs from. * @returns An object containing the extracted attribute key-value pairs. */ getDocAttributes(documentAttributes?: DocumentAttribute[]): { [key: string]: unknown; } { const attributes: { [key: string]: unknown } = {}; if (documentAttributes) { for (const attr of documentAttributes) { if (attr.Key && attr.Value) { attributes[attr.Key] = this.getDocAttributeValue(attr.Value); } } } return attributes; } // A method to convert a RetrieveResultItem object into a Document object. /** * Converts a RetrieveResultItem object into a Document object. * @param item The RetrieveResultItem object to convert. * @returns A Document object. */ convertRetrieverItem(item: RetrieveResultItem) { const title = item.DocumentTitle || ""; const excerpt = item.Content ? this.cleanResult(item.Content) : ""; const pageContent = this.combineText(title, excerpt); const source = item.DocumentURI; const attributes = this.getDocAttributes(item.DocumentAttributes); const metadata = { source, title, excerpt, document_attributes: attributes, }; return new Document({ pageContent, metadata }); } // A method to extract the top-k documents from a RetrieveCommandOutput object. /** * Extracts the top-k documents from a RetrieveCommandOutput object. * @param response The RetrieveCommandOutput object to extract the documents from. * @param pageSize The number of documents to extract. * @returns An array of Document objects. */ getRetrieverDocs( response: RetrieveCommandOutput, pageSize: number ): Document[] { if (!response.ResultItems) return []; const { length } = response.ResultItems; const count = length < pageSize ? length : pageSize; return response.ResultItems.slice(0, count).map((item) => this.convertRetrieverItem(item) ); } // A method to extract the excerpt text from a QueryResultItem object. /** * Extracts the excerpt text from a QueryResultItem object. * @param item The QueryResultItem object to extract the excerpt text from. * @returns The extracted excerpt text. */ getQueryItemExcerpt(item: QueryResultItem) { if ( item.AdditionalAttributes && item.AdditionalAttributes.length && item.AdditionalAttributes[0].Key === "AnswerText" ) { if (!item.AdditionalAttributes) { return ""; } if (!item.AdditionalAttributes[0]) { return ""; } return this.cleanResult( item.AdditionalAttributes[0].Value?.TextWithHighlightsValue?.Text || "" ); } else if (item.DocumentExcerpt) { return this.cleanResult(item.DocumentExcerpt.Text || ""); } else { return ""; } } // A method to convert a QueryResultItem object into a Document object. /** * Converts a QueryResultItem object into a Document object. * @param item The QueryResultItem object to convert. * @returns A Document object. */ convertQueryItem(item: QueryResultItem) { const title = item.DocumentTitle?.Text || ""; const excerpt = this.getQueryItemExcerpt(item); const pageContent = this.combineText(title, excerpt); const source = item.DocumentURI; const attributes = this.getDocAttributes(item.DocumentAttributes); const metadata = { source, title, excerpt, document_attributes: attributes, }; return new Document({ pageContent, metadata }); } // A method to extract the top-k documents from a QueryCommandOutput object. /** * Extracts the top-k documents from a QueryCommandOutput object. * @param response The QueryCommandOutput object to extract the documents from. * @param pageSize The number of documents to extract. * @returns An array of Document objects. */ getQueryDocs(response: QueryCommandOutput, pageSize: number) { if (!response.ResultItems) return []; const { length } = response.ResultItems; const count = length < pageSize ? length : pageSize; return response.ResultItems.slice(0, count).map((item) => this.convertQueryItem(item) ); } // A method to send a retrieve or query request to Kendra and return the top-k documents. /** * Sends a retrieve or query request to Kendra and returns the top-k * documents. * @param query The query to send to Kendra. * @param topK The number of top documents to return. * @param attributeFilter Optional filter to apply when retrieving documents. * @returns A Promise that resolves to an array of Document objects. */ async queryKendra( query: string, topK: number, attributeFilter?: AttributeFilter ) { const retrieveCommand = new RetrieveCommand({ IndexId: this.indexId, QueryText: query, PageSize: topK, AttributeFilter: attributeFilter, }); const retrieveResponse = await this.kendraClient.send(retrieveCommand); const retriveLength = retrieveResponse.ResultItems?.length; if (retriveLength === 0) { // Retrieve API returned 0 results, call query API const queryCommand = new QueryCommand({ IndexId: this.indexId, QueryText: query, PageSize: topK, AttributeFilter: attributeFilter, }); const queryResponse = await this.kendraClient.send(queryCommand); return this.getQueryDocs(queryResponse, this.topK); } else { return this.getRetrieverDocs(retrieveResponse, this.topK); } } async _getRelevantDocuments(query: string): Promise<Document[]> { const docs = await this.queryKendra(query, this.topK, this.attributeFilter); return docs; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers/dria.ts
import { BaseRetriever, type BaseRetrieverInput, } from "@langchain/core/retrievers"; import { Document } from "@langchain/core/documents"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import type { DriaParams, SearchOptions as DriaSearchOptions } from "dria"; import { Dria } from "dria"; /** * Configurations for Dria retriever. * * - `contractId`: a Dria knowledge's contract ID. * - `apiKey`: a Dria API key; if omitted, the retriever will check for `DRIA_API_KEY` environment variable. * * The retrieval can be configured with the following options: * * - `topK`: number of results to return, max 20. (default: 10) * - `rerank`: re-rank the results from most to least semantically relevant to the given search query. (default: true) * - `level`: level of detail for the search, must be an integer from 0 to 5 (inclusive). (default: 1) * - `field`: CSV field name, only relevant for the CSV files. */ export interface DriaRetrieverArgs extends DriaParams, BaseRetrieverInput, DriaSearchOptions {} /** * Class for retrieving documents from knowledge uploaded to Dria. * * @example * ```typescript * // contract of TypeScript Handbook v4.9 uploaded to Dria * const contractId = "-B64DjhUtCwBdXSpsRytlRQCu-bie-vSTvTIT8Ap3g0"; * const retriever = new DriaRetriever({ contractId }); * * const docs = await retriever.getRelevantDocuments("What is a union type?"); * console.log(docs); * ``` */ export class DriaRetriever extends BaseRetriever { static lc_name() { return "DriaRetriever"; } lc_namespace = ["langchain", "retrievers", "dria"]; get lc_secrets() { return { apiKey: "DRIA_API_KEY" }; } get lc_aliases() { return { apiKey: "api_key" }; } apiKey: string; public driaClient: Dria; private searchOptions: DriaSearchOptions; constructor(fields: DriaRetrieverArgs) { super(fields); const apiKey = fields.apiKey ?? getEnvironmentVariable("DRIA_API_KEY"); if (!apiKey) throw new Error("Missing DRIA_API_KEY."); this.apiKey = apiKey; this.searchOptions = { topK: fields.topK, field: fields.field, rerank: fields.rerank, level: fields.level, }; this.driaClient = new Dria({ contractId: fields.contractId, apiKey: this.apiKey, }); } /** * Currently connected knowledge on Dria. * * Retriever will use this contract ID while retrieving documents, * and will throw an error if `undefined`. * * In the case that this is `undefined`, the user is expected to * set contract ID manually, such as after creating a new knowledge & inserting * data there with the Dria client. */ get contractId(): string | undefined { return this.driaClient.contractId; } set contractId(value: string) { this.driaClient.contractId = value; } /** * Retrieves documents from Dria with respect to the configured contract ID, based on * the given query string. * * @param query The query string * @returns A promise that resolves to an array of documents, with page content as text, * along with `id` and the relevance `score` within the metadata. */ async _getRelevantDocuments(query: string): Promise<Document[]> { const docs = await this.driaClient.search(query, this.searchOptions); return docs.map( (d) => new Document({ // dria.search returns a string within the metadata as the content pageContent: d.metadata, metadata: { id: d.id, score: d.score, }, }) ); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers/supabase.ts
import type { SupabaseClient } from "@supabase/supabase-js"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { Document } from "@langchain/core/documents"; import { BaseRetriever, type BaseRetrieverInput, } from "@langchain/core/retrievers"; import { CallbackManagerForRetrieverRun, Callbacks, } from "@langchain/core/callbacks/manager"; interface SearchEmbeddingsParams { query_embedding: number[]; match_count: number; // int filter?: Record<string, unknown>; // jsonb } interface SearchKeywordParams { query_text: string; match_count: number; // int } interface SearchResponseRow { id: number; content: string; metadata: object; similarity: number; } type SearchResult = [Document, number, number]; export interface SupabaseLibArgs extends BaseRetrieverInput { client: SupabaseClient; /** * The table name on Supabase. Defaults to "documents". */ tableName?: string; /** * The name of the Similarity search function on Supabase. Defaults to "match_documents". */ similarityQueryName?: string; /** * The name of the Keyword search function on Supabase. Defaults to "kw_match_documents". */ keywordQueryName?: string; /** * The number of documents to return from the similarity search. Defaults to 2. */ similarityK?: number; /** * The number of documents to return from the keyword search. Defaults to 2. */ keywordK?: number; } export interface SupabaseHybridSearchParams { query: string; similarityK: number; keywordK: number; } /** * Class for performing hybrid search operations on a Supabase database. * It extends the `BaseRetriever` class and implements methods for * similarity search, keyword search, and hybrid search. */ export class SupabaseHybridSearch extends BaseRetriever { static lc_name() { return "SupabaseHybridSearch"; } lc_namespace = ["langchain", "retrievers", "supabase"]; similarityK: number; query: string; keywordK: number; similarityQueryName: string; client: SupabaseClient; tableName: string; keywordQueryName: string; embeddings: EmbeddingsInterface; constructor(embeddings: EmbeddingsInterface, args: SupabaseLibArgs) { super(args); this.embeddings = embeddings; this.client = args.client; this.tableName = args.tableName || "documents"; this.similarityQueryName = args.similarityQueryName || "match_documents"; this.keywordQueryName = args.keywordQueryName || "kw_match_documents"; this.similarityK = args.similarityK || 2; this.keywordK = args.keywordK || 2; } /** * Performs a similarity search on the Supabase database using the * provided query and returns the top 'k' similar documents. * @param query The query to use for the similarity search. * @param k The number of top similar documents to return. * @param _callbacks Optional callbacks to pass to the embedQuery method. * @returns A promise that resolves to an array of search results. Each result is a tuple containing a Document, its similarity score, and its ID. */ protected async similaritySearch( query: string, k: number, _callbacks?: Callbacks // implement passing to embedQuery later ): Promise<SearchResult[]> { const embeddedQuery = await this.embeddings.embedQuery(query); const matchDocumentsParams: SearchEmbeddingsParams = { query_embedding: embeddedQuery, match_count: k, }; if (Object.keys(this.metadata ?? {}).length > 0) { matchDocumentsParams.filter = this.metadata; } const { data: searches, error } = await this.client.rpc( this.similarityQueryName, matchDocumentsParams ); if (error) { throw new Error( `Error searching for documents: ${error.code} ${error.message} ${error.details}` ); } return (searches as SearchResponseRow[]).map((resp) => [ new Document({ metadata: resp.metadata, pageContent: resp.content, }), resp.similarity, resp.id, ]); } /** * Performs a keyword search on the Supabase database using the provided * query and returns the top 'k' documents that match the keywords. * @param query The query to use for the keyword search. * @param k The number of top documents to return that match the keywords. * @returns A promise that resolves to an array of search results. Each result is a tuple containing a Document, its similarity score multiplied by 10, and its ID. */ protected async keywordSearch( query: string, k: number ): Promise<SearchResult[]> { const kwMatchDocumentsParams: SearchKeywordParams = { query_text: query, match_count: k, }; const { data: searches, error } = await this.client.rpc( this.keywordQueryName, kwMatchDocumentsParams ); if (error) { throw new Error( `Error searching for documents: ${error.code} ${error.message} ${error.details}` ); } return (searches as SearchResponseRow[]).map((resp) => [ new Document({ metadata: resp.metadata, pageContent: resp.content, }), resp.similarity * 10, resp.id, ]); } /** * Combines the results of the `similaritySearch` and `keywordSearch` * methods and returns the top 'k' documents based on a combination of * similarity and keyword matching. * @param query The query to use for the hybrid search. * @param similarityK The number of top similar documents to return. * @param keywordK The number of top documents to return that match the keywords. * @param callbacks Optional callbacks to pass to the similaritySearch method. * @returns A promise that resolves to an array of search results. Each result is a tuple containing a Document, its combined score, and its ID. */ protected async hybridSearch( query: string, similarityK: number, keywordK: number, callbacks?: Callbacks ): Promise<SearchResult[]> { const similarity_search = this.similaritySearch( query, similarityK, callbacks ); const keyword_search = this.keywordSearch(query, keywordK); return Promise.all([similarity_search, keyword_search]) .then((results) => results.flat()) .then((results) => { const picks = new Map<number, SearchResult>(); results.forEach((result) => { const id = result[2]; const nextScore = result[1]; const prevScore = picks.get(id)?.[1]; if (prevScore === undefined || nextScore > prevScore) { picks.set(id, result); } }); return Array.from(picks.values()); }) .then((results) => results.sort((a, b) => b[1] - a[1])); } async _getRelevantDocuments( query: string, runManager?: CallbackManagerForRetrieverRun ): Promise<Document[]> { const searchResults = await this.hybridSearch( query, this.similarityK, this.keywordK, runManager?.getChild("hybrid_search") ); return searchResults.map(([doc]) => doc); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers/databerry.ts
import { BaseRetriever, type BaseRetrieverInput, } from "@langchain/core/retrievers"; import { Document } from "@langchain/core/documents"; import { AsyncCaller, AsyncCallerParams, } from "@langchain/core/utils/async_caller"; /** * Interface for the arguments required to create a new instance of * DataberryRetriever. */ export interface DataberryRetrieverArgs extends AsyncCallerParams, BaseRetrieverInput { datastoreUrl: string; topK?: number; apiKey?: string; } /** * Interface for the structure of a Berry object returned by the Databerry * API. */ interface Berry { text: string; score: number; source?: string; [key: string]: unknown; } /** * A specific implementation of a document retriever for the Databerry * API. It extends the BaseRetriever class, which is an abstract base * class for a document retrieval system in LangChain. */ /** @deprecated Use "langchain/retrievers/chaindesk" instead */ export class DataberryRetriever extends BaseRetriever { static lc_name() { return "DataberryRetriever"; } lc_namespace = ["langchain", "retrievers", "databerry"]; get lc_secrets() { return { apiKey: "DATABERRY_API_KEY" }; } get lc_aliases() { return { apiKey: "api_key" }; } caller: AsyncCaller; datastoreUrl: string; topK?: number; apiKey?: string; constructor(fields: DataberryRetrieverArgs) { super(fields); const { datastoreUrl, apiKey, topK, ...rest } = fields; this.caller = new AsyncCaller(rest); this.datastoreUrl = datastoreUrl; this.apiKey = apiKey; this.topK = topK; } async _getRelevantDocuments(query: string): Promise<Document[]> { const r = await this.caller.call(fetch, this.datastoreUrl, { method: "POST", body: JSON.stringify({ query, ...(this.topK ? { topK: this.topK } : {}), }), headers: { "Content-Type": "application/json", ...(this.apiKey ? { Authorization: `Bearer ${this.apiKey}` } : {}), }, }); const { results } = (await r.json()) as { results: Berry[] }; return results.map( ({ text, score, source, ...rest }) => new Document({ pageContent: text, metadata: { score, source, ...rest, }, }) ); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers/amazon_knowledge_base.ts
import { RetrieveCommand, BedrockAgentRuntimeClient, BedrockAgentRuntimeClientConfig, } from "@aws-sdk/client-bedrock-agent-runtime"; import { BaseRetriever } from "@langchain/core/retrievers"; import { Document } from "@langchain/core/documents"; /** * @deprecated The AmazonKnowledgeBaseRetriever integration has been moved to the `@langchain/aws` package. Import from `@langchain/aws` instead. * * Interface for the arguments required to initialize an * AmazonKnowledgeBaseRetriever instance. */ export interface AmazonKnowledgeBaseRetrieverArgs { knowledgeBaseId: string; topK: number; region: string; clientOptions?: BedrockAgentRuntimeClientConfig; } /** * @deprecated The AmazonKnowledgeBaseRetriever integration has been moved to the `@langchain/aws` package. Import from `@langchain/aws` instead. * * Class for interacting with Amazon Bedrock Knowledge Bases, a RAG workflow oriented service * provided by AWS. Extends the BaseRetriever class. * @example * ```typescript * const retriever = new AmazonKnowledgeBaseRetriever({ * topK: 10, * knowledgeBaseId: "YOUR_KNOWLEDGE_BASE_ID", * region: "us-east-2", * clientOptions: { * credentials: { * accessKeyId: "YOUR_ACCESS_KEY_ID", * secretAccessKey: "YOUR_SECRET_ACCESS_KEY", * }, * }, * }); * * const docs = await retriever.getRelevantDocuments("How are clouds formed?"); * ``` */ export class AmazonKnowledgeBaseRetriever extends BaseRetriever { static lc_name() { return "AmazonKnowledgeBaseRetriever"; } lc_namespace = ["langchain", "retrievers", "amazon_bedrock_knowledge_base"]; knowledgeBaseId: string; topK: number; bedrockAgentRuntimeClient: BedrockAgentRuntimeClient; constructor({ knowledgeBaseId, topK = 10, clientOptions, region, }: AmazonKnowledgeBaseRetrieverArgs) { super(); this.topK = topK; this.bedrockAgentRuntimeClient = new BedrockAgentRuntimeClient({ region, ...clientOptions, }); this.knowledgeBaseId = knowledgeBaseId; } /** * Cleans the result text by replacing sequences of whitespace with a * single space and removing ellipses. * @param resText The result text to clean. * @returns The cleaned result text. */ cleanResult(resText: string) { const res = resText.replace(/\s+/g, " ").replace(/\.\.\./g, ""); return res; } async queryKnowledgeBase(query: string, topK: number) { const retrieveCommand = new RetrieveCommand({ knowledgeBaseId: this.knowledgeBaseId, retrievalQuery: { text: query, }, retrievalConfiguration: { vectorSearchConfiguration: { numberOfResults: topK, }, }, }); const retrieveResponse = await this.bedrockAgentRuntimeClient.send( retrieveCommand ); return ( retrieveResponse.retrievalResults?.map((result) => ({ pageContent: this.cleanResult(result.content?.text || ""), metadata: { source: result.location?.s3Location?.uri, score: result.score, ...result.metadata, }, })) ?? ([] as Array<Document>) ); } async _getRelevantDocuments(query: string): Promise<Document[]> { const docs = await this.queryKnowledgeBase(query, this.topK); return docs; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers/vectara_summary.ts
import { Document } from "@langchain/core/documents"; import { BaseRetriever, type BaseRetrieverInput, } from "@langchain/core/retrievers"; import { CallbackManagerForRetrieverRun } from "@langchain/core/callbacks/manager"; import { VectaraStore, type VectaraSummary, type VectaraFilter, DEFAULT_FILTER, } from "../vectorstores/vectara.js"; export interface VectaraRetrieverInput extends BaseRetrieverInput { vectara: VectaraStore; filter?: VectaraFilter; topK?: number; summaryConfig?: VectaraSummary; } export class VectaraSummaryRetriever extends BaseRetriever { static lc_name() { return "VectaraSummaryRetriever"; } lc_namespace = ["langchain", "retrievers"]; private filter = DEFAULT_FILTER; private vectara: VectaraStore; private topK: number; private summaryConfig: VectaraSummary; constructor(fields: VectaraRetrieverInput) { super(fields); this.vectara = fields.vectara; this.topK = fields.topK ?? 10; this.filter = fields.filter ?? DEFAULT_FILTER; this.summaryConfig = fields.summaryConfig ?? { enabled: false, maxSummarizedResults: 0, responseLang: "eng", }; } async _getRelevantDocuments( query: string, _callbacks?: CallbackManagerForRetrieverRun ): Promise<Document[]> { const summaryResult = await this.vectara.vectaraQuery( query, this.topK, this.filter, this.summaryConfig ? this.summaryConfig : undefined ); const docs = summaryResult.documents; if (this.summaryConfig.enabled) { docs.push( new Document({ pageContent: summaryResult.summary, metadata: { summary: true }, }) ); } return docs; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers/tavily_search_api.ts
import { Document } from "@langchain/core/documents"; import { CallbackManagerForRetrieverRun } from "@langchain/core/callbacks/manager"; import { BaseRetriever, type BaseRetrieverInput, } from "@langchain/core/retrievers"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; /** * Options for the TavilySearchAPIRetriever class, which includes a BaseLanguageModel * instance, a VectorStore instance, and an optional promptTemplate which * can either be a BasePromptTemplate instance or a PromptKey. */ export type TavilySearchAPIRetrieverFields = BaseRetrieverInput & { k?: number; includeGeneratedAnswer?: boolean; includeRawContent?: boolean; includeImages?: boolean; searchDepth?: "basic" | "advanced"; includeDomains?: string[]; excludeDomains?: string[]; kwargs?: Record<string, unknown>; apiKey?: string; }; /** * A class for retrieving documents related to a given search term * using the Tavily Search API. */ export class TavilySearchAPIRetriever extends BaseRetriever { static lc_name() { return "TavilySearchAPIRetriever"; } get lc_namespace(): string[] { return ["langchain", "retrievers", "tavily_search_api"]; } k = 10; includeGeneratedAnswer = false; includeRawContent = false; includeImages = false; searchDepth = "basic"; includeDomains?: string[]; excludeDomains?: string[]; kwargs: Record<string, unknown> = {}; apiKey?: string; constructor(fields?: TavilySearchAPIRetrieverFields) { super(fields); this.k = fields?.k ?? this.k; this.includeGeneratedAnswer = fields?.includeGeneratedAnswer ?? this.includeGeneratedAnswer; this.includeRawContent = fields?.includeRawContent ?? this.includeRawContent; this.includeImages = fields?.includeImages ?? this.includeImages; this.searchDepth = fields?.searchDepth ?? this.searchDepth; this.includeDomains = fields?.includeDomains ?? this.includeDomains; this.excludeDomains = fields?.excludeDomains ?? this.excludeDomains; this.kwargs = fields?.kwargs ?? this.kwargs; this.apiKey = fields?.apiKey ?? getEnvironmentVariable("TAVILY_API_KEY"); if (this.apiKey === undefined) { throw new Error( `No Tavily API key found. Either set an environment variable named "TAVILY_API_KEY" or pass an API key as "apiKey".` ); } } async _getRelevantDocuments( query: string, _runManager?: CallbackManagerForRetrieverRun ): Promise<Document[]> { const body: Record<string, unknown> = { query, include_answer: this.includeGeneratedAnswer, include_raw_content: this.includeRawContent, include_images: this.includeImages, max_results: this.k, search_depth: this.searchDepth, api_key: this.apiKey, }; if (this.includeDomains) { body.include_domains = this.includeDomains; } if (this.excludeDomains) { body.exclude_domains = this.excludeDomains; } const response = await fetch("https://api.tavily.com/search", { method: "POST", headers: { "content-type": "application/json", }, body: JSON.stringify({ ...body, ...this.kwargs }), }); const json = await response.json(); if (!response.ok) { throw new Error( `Request failed with status code ${response.status}: ${json.error}` ); } if (!Array.isArray(json.results)) { throw new Error(`Could not parse Tavily results. Please try again.`); } // eslint-disable-next-line @typescript-eslint/no-explicit-any const docs: Document[] = json.results.map((result: any) => { const pageContent = this.includeRawContent ? result.raw_content : result.content; const metadata = { title: result.title, source: result.url, ...Object.fromEntries( Object.entries(result).filter( ([k]) => !["content", "title", "url", "raw_content"].includes(k) ) ), images: json.images, }; return new Document({ pageContent, metadata }); }); if (this.includeGeneratedAnswer) { docs.push( new Document({ pageContent: json.answer, metadata: { title: "Suggested Answer", source: "https://tavily.com/", }, }) ); } return docs; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers/metal.ts
import Metal from "@getmetal/metal-sdk"; import { BaseRetriever, BaseRetrieverInput } from "@langchain/core/retrievers"; import { Document } from "@langchain/core/documents"; /** * Interface for the fields required during the initialization of a * `MetalRetriever` instance. It extends the `BaseRetrieverInput` * interface and adds a `client` field of type `Metal`. */ export interface MetalRetrieverFields extends BaseRetrieverInput { client: Metal; } /** * Interface to represent a response item from the Metal service. It * contains a `text` field and an index signature to allow for additional * unknown properties. */ interface ResponseItem { text: string; [key: string]: unknown; } /** * Class used to interact with the Metal service, a managed retrieval & * memory platform. It allows you to index your data into Metal and run * semantic search and retrieval on it. It extends the `BaseRetriever` * class and requires a `Metal` instance and a dictionary of parameters to * pass to the Metal API during its initialization. * @example * ```typescript * const retriever = new MetalRetriever({ * client: new Metal( * process.env.METAL_API_KEY, * process.env.METAL_CLIENT_ID, * process.env.METAL_INDEX_ID, * ), * }); * const docs = await retriever.getRelevantDocuments("hello"); * ``` */ export class MetalRetriever extends BaseRetriever { static lc_name() { return "MetalRetriever"; } lc_namespace = ["langchain", "retrievers", "metal"]; private client: Metal; constructor(fields: MetalRetrieverFields) { super(fields); this.client = fields.client; } async _getRelevantDocuments(query: string): Promise<Document[]> { const res = await this.client.search({ text: query }); const items = ("data" in res ? res.data : res) as ResponseItem[]; return items.map( ({ text, metadata }) => new Document({ pageContent: text, metadata: metadata as Record<string, unknown>, }) ); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers/vespa.ts
import { Document, type DocumentInterface } from "@langchain/core/documents"; import { RemoteRetriever, RemoteRetrieverValues, RemoteRetrieverParams, } from "./remote/base.js"; export interface VespaRetrieverParams extends RemoteRetrieverParams { /** * The body of the query to send to Vespa */ query_body: object; /** * The name of the field the content resides in */ content_field: string; } /** * Class responsible for retrieving data from Vespa. It extends the * `RemoteRetriever` class and includes methods for creating the JSON body * for a query and processing the JSON response from Vespa. * @example * ```typescript * const retriever = new VespaRetriever({ * url: "https: * auth: false, * query_body: { * yql: "select content from paragraph where userQuery()", * hits: 5, * ranking: "documentation", * locale: "en-us", * }, * content_field: "content", * }); * const result = await retriever.getRelevantDocuments("what is vespa?"); * ``` */ export class VespaRetriever extends RemoteRetriever { static lc_name() { return "VespaRetriever"; } lc_namespace = ["langchain", "retrievers", "vespa"]; query_body: object; content_field: string; constructor(fields: VespaRetrieverParams) { super(fields); this.query_body = fields.query_body; this.content_field = fields.content_field; this.url = `${this.url}/search/?`; } /** * Method that takes a query string as input and returns a JSON object * that includes the query and the original `query_body`. * @param query The query string to be sent to Vespa. * @returns A JSON object that includes the query and the original `query_body`. */ createJsonBody(query: string): RemoteRetrieverValues { return { ...this.query_body, query, }; } /** * Method that processes the JSON response from Vespa into an array of * `Document` instances. Each `Document` instance includes the content * from the specified `content_field` and the document's ID. * @param json The JSON response from Vespa. * @returns An array of `Document` instances. */ processJsonResponse(json: RemoteRetrieverValues): DocumentInterface[] { return json.root.children.map( (doc: { id: string; relevance: number; source: string; fields: Record<string, unknown>; }) => new Document({ pageContent: doc.fields[this.content_field] as string, metadata: { id: doc.id }, }) ); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers/tests/amazon_kendra.int.test.ts
/* eslint-disable no-process-env */ /* eslint-disable @typescript-eslint/no-non-null-assertion */ import { test } from "@jest/globals"; import { AmazonKendraRetriever } from "../amazon_kendra.js"; test.skip("AmazonKendraRetriever", async () => { const retriever = new AmazonKendraRetriever({ topK: 10, indexId: "5c0fcb10-9573-42df-8846-e30d69004ec5", region: "us-east-2", clientOptions: { credentials: { accessKeyId: process.env.AWS_ACCESS_KEY_ID!, secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY!, }, }, }); const docs = await retriever.getRelevantDocuments("How are clouds formed?"); expect(docs.length).toBeGreaterThan(0); // console.log(docs); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers/tests/amazon_knowledge_base.int.test.ts
/* eslint-disable no-process-env */ /* eslint-disable @typescript-eslint/no-non-null-assertion */ import { test } from "@jest/globals"; import { AmazonKnowledgeBaseRetriever } from "../amazon_knowledge_base.js"; test.skip("AmazonKnowledgeBaseRetriever", async () => { const retriever = new AmazonKnowledgeBaseRetriever({ topK: 10, knowledgeBaseId: process.env.AMAZON_KNOWLEDGE_BASE_ID || "", region: process.env.AWS_REGION || "us-east-1", clientOptions: { credentials: { accessKeyId: process.env.AWS_ACCESS_KEY_ID!, secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY!, sessionToken: process.env.AWS_SESSION_TOKEN!, }, }, }); const docs = await retriever.getRelevantDocuments("How are clouds formed?"); expect(docs.length).toBeGreaterThan(0); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers/tests/metal.int.test.ts
/* eslint-disable no-process-env */ /* eslint-disable @typescript-eslint/no-non-null-assertion */ import { test, expect } from "@jest/globals"; import Metal from "@getmetal/metal-sdk"; import { MetalRetriever } from "../metal.js"; test("MetalRetriever", async () => { const MetalSDK = Metal; const client = new MetalSDK( process.env.METAL_API_KEY!, process.env.METAL_CLIENT_ID!, process.env.METAL_INDEX_ID ); const retriever = new MetalRetriever({ client }); const docs = await retriever.getRelevantDocuments("hello"); expect(docs.length).toBeGreaterThan(0); // console.log(docs); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers/tests/zep.int.test.ts
/* eslint-disable no-process-env */ /* eslint-disable @typescript-eslint/no-non-null-assertion */ import { test, expect } from "@jest/globals"; import { promisify } from "util"; import { randomUUID } from "crypto"; import { ZepRetriever } from "../zep.js"; import { ZepMemory } from "../../memory/zep.js"; const baseURL = process.env.ZEP_API_URL || "http://localhost:8000"; test.skip("ZepRetriever - memory exists", async () => { const sessionId = randomUUID(); const topK = 2; // The number of documents to retrieve const zepMemory = new ZepMemory({ sessionId, baseURL }); const zepRetriever = new ZepRetriever({ sessionId, url: baseURL, topK }); await zepMemory.saveContext( { input: "Who was Octavia Butler?" }, { response: "Octavia Estelle Butler (June 22, 1947 – " + "February 24, 2006) was an American science fiction author.", } ); // 2-second delay to wait for memory to be embedded // note that this may not be sufficient if OpenAI's API is slow const sleep = promisify(setTimeout); await sleep(2000); const docs = await zepRetriever.getRelevantDocuments("hello"); expect(docs.length).toBeGreaterThanOrEqual(2); // console.log(docs); }); test.skip("ZepRetriever - does not exist", async () => { const sessionId = randomUUID(); const topK = 2; // The number of documents to retrieve const zepRetriever = new ZepRetriever({ sessionId, url: baseURL, topK }); const docs = await zepRetriever.getRelevantDocuments("hello"); expect(docs.length).toBe(0); // console.log(docs); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers/tests/supabase.int.test.ts
/* eslint-disable no-process-env */ /* eslint-disable @typescript-eslint/no-non-null-assertion */ import { test, expect } from "@jest/globals"; import { createClient } from "@supabase/supabase-js"; import { OpenAIEmbeddings } from "@langchain/openai"; import { SupabaseHybridSearch } from "../supabase.js"; test.skip("Supabase hybrid keyword search", async () => { const client = createClient( process.env.SUPABASE_URL!, process.env.SUPABASE_PRIVATE_KEY! ); const embeddings = new OpenAIEmbeddings(); const retriever = new SupabaseHybridSearch(embeddings, { client, similarityK: 2, keywordK: 2, }); expect(retriever).toBeDefined(); const results = await retriever.getRelevantDocuments("hello bye"); expect(results.length).toBeGreaterThan(0); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers/tests/tavily_search_api.int.test.ts
/* eslint-disable no-process-env */ /* eslint-disable @typescript-eslint/no-non-null-assertion */ import { test, expect } from "@jest/globals"; import { TavilySearchAPIRetriever } from "../tavily_search_api.js"; test.skip("TavilySearchAPIRetriever", async () => { const retriever = new TavilySearchAPIRetriever({ includeImages: true, includeRawContent: true, }); const docs = await retriever.getRelevantDocuments("what bear is best?"); expect(docs.length).toBeGreaterThan(0); // console.log(docs); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers/tests/vespa.int.test.ts
/* eslint-disable no-process-env */ /* eslint-disable @typescript-eslint/no-non-null-assertion */ import { test, expect } from "@jest/globals"; import { VespaRetriever } from "../vespa.js"; test.skip("VespaRetriever", async () => { const url = process.env.VESPA_URL!; const query_body = { yql: "select * from music where album contains 'head';", hits: 5, locale: "en-us", }; const content_field = "album"; const retriever = new VespaRetriever({ url, auth: false, query_body, content_field, }); const docs = await retriever.getRelevantDocuments("what is vespa?"); expect(docs.length).toBeGreaterThan(0); // console.log(docs); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers/tests/bm25.test.ts
import { expect, test } from "@jest/globals"; import { Document } from "@langchain/core/documents"; import { BM25Retriever } from "../bm25.js"; test("BM25Retriever", async () => { const docs = [ new Document({ pageContent: "The quick brown fox jumps over the lazy dog", }), new Document({ pageContent: "A lazy dog sleeps all day", }), new Document({ pageContent: "The brown fox is quick and clever", }), ]; const retriever = BM25Retriever.fromDocuments(docs, { k: 2, }); const results = await retriever.invoke("the fox and the dog"); expect(results).toHaveLength(2); expect(results[0].pageContent).toBe( "The quick brown fox jumps over the lazy dog" ); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers/tests/dria.int.test.ts
import { test, expect } from "@jest/globals"; import { DriaRetriever } from "../dria.js"; test.skip("DriaRetriever", async () => { // contract of TypeScript Handbook v4.9 uploaded to Dria // https://dria.co/knowledge/-B64DjhUtCwBdXSpsRytlRQCu-bie-vSTvTIT8Ap3g0 const contractId = "-B64DjhUtCwBdXSpsRytlRQCu-bie-vSTvTIT8Ap3g0"; const topK = 10; const retriever = new DriaRetriever({ contractId, topK }); const docs = await retriever.getRelevantDocuments("What is a union type?"); expect(docs.length).toBe(topK); // console.log(docs[0].pageContent); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers/remote/index.ts
export { RemoteRetriever, type RemoteRetrieverParams, type RemoteRetrieverAuth, type RemoteRetrieverValues, } from "./base.js";
0
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers
lc_public_repos/langchainjs/libs/langchain-community/src/retrievers/remote/base.ts
import { BaseRetriever, type BaseRetrieverInput, } from "@langchain/core/retrievers"; import { AsyncCaller, type AsyncCallerParams, } from "@langchain/core/utils/async_caller"; import type { DocumentInterface } from "@langchain/core/documents"; /** * Type for the authentication method used by the RemoteRetriever. It can * either be false (no authentication) or an object with a bearer token. */ export type RemoteRetrieverAuth = false | { bearer: string }; /** * Type for the JSON response values from the remote server. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any export type RemoteRetrieverValues = Record<string, any>; /** * Interface for the parameters required to initialize a RemoteRetriever * instance. */ export interface RemoteRetrieverParams extends AsyncCallerParams, BaseRetrieverInput { /** * The URL of the remote retriever server */ url: string; /** * The authentication method to use, currently implemented is * - false: no authentication * - { bearer: string }: Bearer token authentication */ auth: RemoteRetrieverAuth; } /** * Abstract class for interacting with a remote server to retrieve * relevant documents based on a given query. */ export abstract class RemoteRetriever extends BaseRetriever implements RemoteRetrieverParams { get lc_secrets(): { [key: string]: string } | undefined { return { "auth.bearer": "REMOTE_RETRIEVER_AUTH_BEARER", }; } url: string; auth: RemoteRetrieverAuth; headers: Record<string, string>; asyncCaller: AsyncCaller; constructor(fields: RemoteRetrieverParams) { super(fields); const { url, auth, ...rest } = fields; this.url = url; this.auth = auth; this.headers = { Accept: "application/json", "Content-Type": "application/json", ...(this.auth && this.auth.bearer ? { Authorization: `Bearer ${this.auth.bearer}` } : {}), }; this.asyncCaller = new AsyncCaller(rest); } /** * Abstract method that should be implemented by subclasses to create the * JSON body of the request based on the given query. * @param query The query based on which the JSON body of the request is created. * @returns The JSON body of the request. */ abstract createJsonBody(query: string): RemoteRetrieverValues; /** * Abstract method that should be implemented by subclasses to process the * JSON response from the server and convert it into an array of Document * instances. * @param json The JSON response from the server. * @returns An array of Document instances. */ abstract processJsonResponse( json: RemoteRetrieverValues ): DocumentInterface[]; async _getRelevantDocuments(query: string): Promise<DocumentInterface[]> { const body = this.createJsonBody(query); const response = await this.asyncCaller.call(() => fetch(this.url, { method: "POST", headers: this.headers, body: JSON.stringify(body), }) ); if (!response.ok) { throw new Error( `Failed to retrieve documents from ${this.url}: ${response.status} ${response.statusText}` ); } const json = await response.json(); return this.processJsonResponse(json); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/document_transformers/html_to_text.ts
import { htmlToText, type HtmlToTextOptions } from "html-to-text"; import { MappingDocumentTransformer, Document, } from "@langchain/core/documents"; /** * A transformer that converts HTML content to plain text. * @example * ```typescript * const loader = new CheerioWebBaseLoader("https://example.com/some-page"); * const docs = await loader.load(); * * const splitter = new RecursiveCharacterTextSplitter({ * maxCharacterCount: 1000, * }); * const transformer = new HtmlToTextTransformer(); * * // The sequence of text splitting followed by HTML to text transformation * const sequence = splitter.pipe(transformer); * * // Processing the loaded documents through the sequence * const newDocuments = await sequence.invoke(docs); * * console.log(newDocuments); * ``` */ export class HtmlToTextTransformer extends MappingDocumentTransformer { static lc_name() { return "HtmlToTextTransformer"; } constructor(protected options: HtmlToTextOptions = {}) { super(options); } async _transformDocument(document: Document): Promise<Document> { const extractedContent = htmlToText(document.pageContent, this.options); return new Document({ pageContent: extractedContent, metadata: { ...document.metadata }, }); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/document_transformers/mozilla_readability.ts
import { Readability } from "@mozilla/readability"; import { JSDOM } from "jsdom"; import type { Options } from "mozilla-readability"; import { MappingDocumentTransformer, Document, } from "@langchain/core/documents"; /** * A transformer that uses the Mozilla Readability library to extract the * main content from a web page. * @example * ```typescript * const loader = new HTMLWebBaseLoader("https://example.com/article"); * const docs = await loader.load(); * * const splitter = new RecursiveCharacterTextSplitter({ * maxCharacterCount: 5000, * }); * const transformer = new MozillaReadabilityTransformer(); * * // The sequence processes the loaded documents through the splitter and then the transformer. * const sequence = transformer.pipe(splitter); * * // Invoke the sequence to transform the documents into a more readable format. * const newDocuments = await sequence.invoke(docs); * * console.log(newDocuments); * ``` */ export class MozillaReadabilityTransformer extends MappingDocumentTransformer { static lc_name() { return "MozillaReadabilityTransformer"; } constructor(protected options: Options = {}) { super(options); } async _transformDocument(document: Document): Promise<Document> { const doc = new JSDOM(document.pageContent); const readability = new Readability(doc.window.document, this.options); const result = readability.parse(); return new Document({ pageContent: result?.textContent ?? "", metadata: { ...document.metadata, }, }); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_transformers
lc_public_repos/langchainjs/libs/langchain-community/src/document_transformers/tests/mozilla_readability.test.ts
import { expect, test } from "@jest/globals"; import { Document } from "@langchain/core/documents"; import { MozillaReadabilityTransformer } from "../mozilla_readability.js"; test("Test HTML to text transformer", async () => { const webpageText = `<!DOCTYPE html> <html> <head> <title>🦜️🔗 LangChain</title> <style> body { font-family: Arial, sans-serif; } h1 { color: darkblue; } </style> </head> <body> <div> <h1>🦜️🔗 LangChain</h1> <p>⚡ Building applications with LLMs through composability ⚡</p> </div> <div> As an open source project in a rapidly developing field, we are extremely open to contributions. </div> </body> </html>`; const documents = [ new Document({ pageContent: webpageText, }), new Document({ pageContent: "<div>Mitochondria is the powerhouse of the cell.</div>", metadata: { reliable: false }, }), ]; const transformer = new MozillaReadabilityTransformer(); const newDocuments = await transformer.transformDocuments(documents); expect(newDocuments.length).toBe(2); expect(newDocuments[0].pageContent.length).toBeLessThan(webpageText.length); expect(newDocuments[1].pageContent).toBe( "Mitochondria is the powerhouse of the cell." ); expect(newDocuments[1].metadata).toEqual({ reliable: false }); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_transformers
lc_public_repos/langchainjs/libs/langchain-community/src/document_transformers/tests/html_to_text.int.test.ts
import { expect, test } from "@jest/globals"; import { Document } from "@langchain/core/documents"; import { HtmlToTextTransformer } from "../html_to_text.js"; test("Test HTML to text transformer", async () => { const webpageText = `<!DOCTYPE html> <html> <head> <title>🦜️🔗 LangChain</title> <style> body { font-family: Arial, sans-serif; } h1 { color: darkblue; } </style> </head> <body> <div> <h1>🦜️🔗 LangChain</h1> <p>⚡ Building applications with LLMs through composability ⚡</p> </div> <div> As an open source project in a rapidly developing field, we are extremely open to contributions. </div> </body> </html>`; const documents = [ new Document({ pageContent: webpageText, }), new Document({ pageContent: "<div>Mitochondria is the powerhouse of the cell.</div>", metadata: { reliable: false }, }), ]; const transformer = new HtmlToTextTransformer(); const newDocuments = await transformer.transformDocuments(documents); expect(newDocuments.length).toBe(2); expect(newDocuments[0].pageContent.length).toBeLessThan(webpageText.length); expect(newDocuments[1].pageContent).toBe( "Mitochondria is the powerhouse of the cell." ); expect(newDocuments[1].metadata).toEqual({ reliable: false }); });
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/utils/iflytek_websocket_stream.ts
export interface WebSocketConnection< T extends Uint8Array | string = Uint8Array | string > { readable: ReadableStream<T>; writable: WritableStream<T>; protocol: string; extensions: string; } export interface WebSocketCloseInfo { code?: number; reason?: string; } export interface WebSocketStreamOptions { protocols?: string[]; signal?: AbortSignal; } /** * [WebSocket](https://developer.mozilla.org/en-US/docs/Web/API/WebSocket) with [Streams API](https://developer.mozilla.org/en-US/docs/Web/API/Streams_API) * * @see https://web.dev/websocketstream/ */ export abstract class BaseWebSocketStream< T extends Uint8Array | string = Uint8Array | string > { readonly url: string; readonly connection: Promise<WebSocketConnection<T>>; readonly closed: Promise<WebSocketCloseInfo>; readonly close: (closeInfo?: WebSocketCloseInfo) => void; constructor(url: string, options: WebSocketStreamOptions = {}) { if (options.signal?.aborted) { throw new DOMException("This operation was aborted", "AbortError"); } this.url = url; const ws = this.openWebSocket(url, options); const closeWithInfo = ({ code, reason }: WebSocketCloseInfo = {}) => ws.close(code, reason); this.connection = new Promise((resolve, reject) => { ws.onopen = () => { resolve({ readable: new ReadableStream<T>({ start(controller) { ws.onmessage = ({ data }) => controller.enqueue(data); ws.onerror = (e) => controller.error(e); }, cancel: closeWithInfo, }), writable: new WritableStream<T>({ write(chunk) { ws.send(chunk); }, abort() { ws.close(); }, close: closeWithInfo, }), protocol: ws.protocol, extensions: ws.extensions, }); ws.removeEventListener("error", reject); }; ws.addEventListener("error", reject); }); this.closed = new Promise<WebSocketCloseInfo>((resolve, reject) => { ws.onclose = ({ code, reason }) => { resolve({ code, reason }); ws.removeEventListener("error", reject); }; ws.addEventListener("error", reject); }); if (options.signal) { // eslint-disable-next-line no-param-reassign options.signal.onabort = () => ws.close(); } this.close = closeWithInfo; } abstract openWebSocket( url: string, options: WebSocketStreamOptions ): WebSocket; }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/utils/convex.ts
/* eslint-disable spaced-comment */ // eslint-disable-next-line import/no-extraneous-dependencies import { internalQueryGeneric as internalQuery, internalMutationGeneric as internalMutation, } from "convex/server"; // eslint-disable-next-line import/no-extraneous-dependencies import { GenericId, v } from "convex/values"; export const get = /*#__PURE__*/ internalQuery({ args: { id: /*#__PURE__*/ v.string(), }, handler: async (ctx, args) => { const result = await ctx.db.get(args.id as GenericId<string>); return result; }, }); export const insert = /*#__PURE__*/ internalMutation({ args: { table: /*#__PURE__*/ v.string(), document: /*#__PURE__*/ v.any(), }, handler: async (ctx, args) => { await ctx.db.insert(args.table, args.document); }, }); export const lookup = /*#__PURE__*/ internalQuery({ args: { table: /*#__PURE__*/ v.string(), index: /*#__PURE__*/ v.string(), keyField: /*#__PURE__*/ v.string(), key: /*#__PURE__*/ v.string(), }, handler: async (ctx, args) => { const result = await ctx.db .query(args.table) .withIndex(args.index, (q) => q.eq(args.keyField, args.key)) .collect(); return result; }, }); export const upsert = /*#__PURE__*/ internalMutation({ args: { table: /*#__PURE__*/ v.string(), index: /*#__PURE__*/ v.string(), keyField: /*#__PURE__*/ v.string(), key: /*#__PURE__*/ v.string(), document: /*#__PURE__*/ v.any(), }, handler: async (ctx, args) => { const existing = await ctx.db .query(args.table) .withIndex(args.index, (q) => q.eq(args.keyField, args.key)) .unique(); if (existing !== null) { await ctx.db.replace(existing._id, args.document); } else { await ctx.db.insert(args.table, args.document); } }, }); export const deleteMany = /*#__PURE__*/ internalMutation({ args: { table: /*#__PURE__*/ v.string(), index: /*#__PURE__*/ v.string(), keyField: /*#__PURE__*/ v.string(), key: /*#__PURE__*/ v.string(), }, handler: async (ctx, args) => { const existing = await ctx.db .query(args.table) .withIndex(args.index, (q) => q.eq(args.keyField, args.key)) .collect(); await Promise.all(existing.map((doc) => ctx.db.delete(doc._id))); }, });
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/utils/llama_cpp.ts
/* eslint-disable import/no-extraneous-dependencies */ import { LlamaModel, LlamaContext, LlamaChatSession, LlamaJsonSchemaGrammar, LlamaGrammar, type LlamaModelOptions, LlamaContextOptions, GbnfJsonSchema, Llama, } from "node-llama-cpp"; /** * Note that the modelPath is the only required parameter. For testing you * can set this in the environment variable `LLAMA_PATH`. */ export interface LlamaBaseCppInputs { /** Prompt processing batch size. */ batchSize?: number; /** Text context size. */ contextSize?: number; /** Embedding mode only. */ embedding?: boolean; /** Use fp16 for KV cache. */ f16Kv?: boolean; /** Number of layers to store in VRAM. */ gpuLayers?: number; /** The llama_eval() call computes all logits, not just the last one. */ logitsAll?: boolean; /** */ maxTokens?: number; /** Path to the model on the filesystem. */ modelPath: string; /** Add the begining of sentence token. */ prependBos?: boolean; /** If null, a random seed will be used. */ seed?: null | number; /** The randomness of the responses, e.g. 0.1 deterministic, 1.5 creative, 0.8 balanced, 0 disables. */ temperature?: number; /** Number of threads to use to evaluate tokens. */ threads?: number; /** Trim whitespace from the end of the generated text Disabled by default. */ trimWhitespaceSuffix?: boolean; /** Consider the n most likely tokens, where n is 1 to vocabulary size, 0 disables (uses full vocabulary). Note: only applies when `temperature` > 0. */ topK?: number; /** Selects the smallest token set whose probability exceeds P, where P is between 0 - 1, 1 disables. Note: only applies when `temperature` > 0. */ topP?: number; /** Force system to keep model in RAM. */ useMlock?: boolean; /** Use mmap if possible. */ useMmap?: boolean; /** Only load the vocabulary, no weights. */ vocabOnly?: boolean; /** JSON schema to be used to format output. Also known as `grammar`. */ jsonSchema?: object; /** GBNF string to be used to format output. Also known as `grammar`. */ gbnf?: string; } export async function createLlamaModel( inputs: LlamaBaseCppInputs, llama: Llama ): Promise<LlamaModel> { const options: LlamaModelOptions = { gpuLayers: inputs?.gpuLayers, modelPath: inputs.modelPath, useMlock: inputs?.useMlock, useMmap: inputs?.useMmap, vocabOnly: inputs?.vocabOnly, }; return llama.loadModel(options); } export async function createLlamaContext( model: LlamaModel, inputs: LlamaBaseCppInputs ): Promise<LlamaContext> { const options: LlamaContextOptions = { batchSize: inputs?.batchSize, contextSize: inputs?.contextSize, threads: inputs?.threads, }; return model.createContext(options); } export function createLlamaSession(context: LlamaContext): LlamaChatSession { return new LlamaChatSession({ contextSequence: context.getSequence() }); } export async function createLlamaJsonSchemaGrammar( schemaString: object | undefined, llama: Llama ): Promise<LlamaJsonSchemaGrammar<GbnfJsonSchema> | undefined> { if (schemaString === undefined) { return undefined; } const schemaJSON = schemaString as GbnfJsonSchema; return await llama.createGrammarForJsonSchema(schemaJSON); } export async function createCustomGrammar( filePath: string | undefined, llama: Llama ): Promise<LlamaGrammar | undefined> { if (filePath === undefined) { return undefined; } return llama.createGrammar({ grammar: filePath, }); }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/utils/extname.ts
export const extname = (path: string) => `.${path.split(".").pop()}`;
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/utils/googlevertexai-connection.ts
import { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base"; import { AsyncCaller, AsyncCallerCallOptions, } from "@langchain/core/utils/async_caller"; import { GenerationChunk } from "@langchain/core/outputs"; import type { GoogleVertexAIBaseLLMInput, GoogleVertexAIBasePrediction, GoogleVertexAIConnectionParams, GoogleVertexAILLMPredictions, GoogleVertexAIModelParams, GoogleResponse, GoogleAbstractedClient, GoogleAbstractedClientOps, GoogleAbstractedClientOpsMethod, } from "../types/googlevertexai-types.js"; export abstract class GoogleConnection< CallOptions extends AsyncCallerCallOptions, ResponseType extends GoogleResponse > { caller: AsyncCaller; client: GoogleAbstractedClient; streaming: boolean; constructor( caller: AsyncCaller, client: GoogleAbstractedClient, streaming?: boolean ) { this.caller = caller; this.client = client; this.streaming = streaming ?? false; } abstract buildUrl(): Promise<string>; abstract buildMethod(): GoogleAbstractedClientOpsMethod; async _request( data: unknown | undefined, options: CallOptions ): Promise<ResponseType> { const url = await this.buildUrl(); const method = this.buildMethod(); const opts: GoogleAbstractedClientOps = { url, method, }; if (data && method === "POST") { opts.data = data; } if (this.streaming) { opts.responseType = "stream"; } else { opts.responseType = "json"; } const callResponse = await this.caller.callWithOptions( { signal: options?.signal }, async () => this.client.request(opts) ); const response: unknown = callResponse; // Done for typecast safety, I guess return <ResponseType>response; } } export abstract class GoogleVertexAIConnection< CallOptions extends AsyncCallerCallOptions, ResponseType extends GoogleResponse, AuthOptions > extends GoogleConnection<CallOptions, ResponseType> implements GoogleVertexAIConnectionParams<AuthOptions> { endpoint = "us-central1-aiplatform.googleapis.com"; location = "us-central1"; apiVersion = "v1"; constructor( fields: GoogleVertexAIConnectionParams<AuthOptions> | undefined, caller: AsyncCaller, client: GoogleAbstractedClient, streaming?: boolean ) { super(caller, client, streaming); this.caller = caller; this.endpoint = fields?.endpoint ?? this.endpoint; this.location = fields?.location ?? this.location; this.apiVersion = fields?.apiVersion ?? this.apiVersion; this.client = client; } buildMethod(): GoogleAbstractedClientOpsMethod { return "POST"; } } export function complexValue(value: unknown): unknown { if (value === null || typeof value === "undefined") { // I dunno what to put here. An error, probably return undefined; } else if (typeof value === "object") { if (Array.isArray(value)) { return { list_val: value.map((avalue) => complexValue(avalue)), }; } else { const ret: Record<string, unknown> = {}; // eslint-disable-next-line @typescript-eslint/no-explicit-any const v: Record<string, any> = value; Object.keys(v).forEach((key) => { ret[key] = complexValue(v[key]); }); return { struct_val: ret }; } } else if (typeof value === "number") { if (Number.isInteger(value)) { return { int_val: value }; } else { return { float_val: value }; } } else { return { string_val: [value], }; } } export function simpleValue(val: unknown): unknown { if (val && typeof val === "object" && !Array.isArray(val)) { // eslint-disable-next-line no-prototype-builtins if (val.hasOwnProperty("stringVal")) { return (val as { stringVal: string[] }).stringVal[0]; // eslint-disable-next-line no-prototype-builtins } else if (val.hasOwnProperty("boolVal")) { return (val as { boolVal: boolean[] }).boolVal[0]; // eslint-disable-next-line no-prototype-builtins } else if (val.hasOwnProperty("listVal")) { const { listVal } = val as { listVal: unknown[] }; return listVal.map((aval) => simpleValue(aval)); // eslint-disable-next-line no-prototype-builtins } else if (val.hasOwnProperty("structVal")) { const ret: Record<string, unknown> = {}; const struct = (val as { structVal: Record<string, unknown> }).structVal; Object.keys(struct).forEach((key) => { ret[key] = simpleValue(struct[key]); }); return ret; } else { const ret: Record<string, unknown> = {}; const struct = val as Record<string, unknown>; Object.keys(struct).forEach((key) => { ret[key] = simpleValue(struct[key]); }); return ret; } } else if (Array.isArray(val)) { return val.map((aval) => simpleValue(aval)); } else { return val; } } export class GoogleVertexAILLMConnection< CallOptions extends BaseLanguageModelCallOptions, InstanceType, PredictionType extends GoogleVertexAIBasePrediction, AuthOptions > extends GoogleVertexAIConnection< CallOptions, GoogleVertexAILLMResponse<PredictionType>, AuthOptions > implements GoogleVertexAIBaseLLMInput<AuthOptions> { model: string; client: GoogleAbstractedClient; customModelURL: string; constructor( fields: GoogleVertexAIBaseLLMInput<AuthOptions> | undefined, caller: AsyncCaller, client: GoogleAbstractedClient, streaming?: boolean ) { super(fields, caller, client, streaming); this.client = client; this.model = fields?.model ?? this.model; this.customModelURL = fields?.customModelURL ?? ""; } async buildUrl(): Promise<string> { const method = this.streaming ? "serverStreamingPredict" : "predict"; if (this.customModelURL.trim() !== "") { return `${this.customModelURL}:${method}`; } const projectId = await this.client.getProjectId(); return `https://${this.endpoint}/v1/projects/${projectId}/locations/${this.location}/publishers/google/models/${this.model}:${method}`; } formatStreamingData( inputs: InstanceType[], parameters: GoogleVertexAIModelParams ): unknown { return { inputs: [inputs.map((i) => complexValue(i))], parameters: complexValue(parameters), }; } formatStandardData( instances: InstanceType[], parameters: GoogleVertexAIModelParams ): unknown { return { instances, parameters, }; } formatData( instances: InstanceType[], parameters: GoogleVertexAIModelParams ): unknown { return this.streaming ? this.formatStreamingData(instances, parameters) : this.formatStandardData(instances, parameters); } async request( instances: InstanceType[], parameters: GoogleVertexAIModelParams, options: CallOptions ): Promise<GoogleVertexAILLMResponse<PredictionType>> { const data = this.formatData(instances, parameters); const response = await this._request(data, options); return response; } } export interface GoogleVertexAILLMResponse< PredictionType extends GoogleVertexAIBasePrediction > extends GoogleResponse { data: GoogleVertexAIStream | GoogleVertexAILLMPredictions<PredictionType>; } export class GoogleVertexAIStream { _buffer = ""; _bufferOpen = true; _firstRun = true; /** * Add data to the buffer. This may cause chunks to be generated, if available. * @param data */ appendBuffer(data: string): void { this._buffer += data; // Our first time, skip to the opening of the array if (this._firstRun) { this._skipTo("["); this._firstRun = false; } this._parseBuffer(); } /** * Indicate there is no more data that will be added to the text buffer. * This should be called when all the data has been read and added to indicate * that we should process everything remaining in the buffer. */ closeBuffer(): void { this._bufferOpen = false; this._parseBuffer(); } /** * Skip characters in the buffer till we get to the start of an object. * Then attempt to read a full object. * If we do read a full object, turn it into a chunk and send it to the chunk handler. * Repeat this for as much as we can. */ _parseBuffer(): void { let obj = null; do { this._skipTo("{"); obj = this._getFullObject(); if (obj !== null) { const chunk = this._simplifyObject(obj); this._handleChunk(chunk); } } while (obj !== null); if (!this._bufferOpen) { // No more data will be added, and we have parsed everything we could, // so everything else is garbage. this._handleChunk(null); this._buffer = ""; } } /** * If the string is present, move the start of the buffer to the first occurrence * of that string. This is useful for skipping over elements or parts that we're not * really interested in parsing. (ie - the opening characters, comma separators, etc.) * @param start The string to start the buffer with */ _skipTo(start: string): void { const index = this._buffer.indexOf(start); if (index > 0) { this._buffer = this._buffer.slice(index); } } /** * Given what is in the buffer, parse a single object out of it. * If a complete object isn't available, return null. * Assumes that we are at the start of an object to parse. */ _getFullObject(): object | null { let ret: object | null = null; // Loop while we don't have something to return AND we have something in the buffer let index = 0; while (ret === null && this._buffer.length > index) { // Advance to the next close bracket after our current index index = this._buffer.indexOf("}", index + 1); // If we don't find one, exit with null if (index === -1) { return null; } // If we have one, try to turn it into an object to return try { const objStr = this._buffer.substring(0, index + 1); ret = JSON.parse(objStr); // We only get here if it parsed it ok // If we did turn it into an object, remove it from the buffer this._buffer = this._buffer.slice(index + 1); } catch (xx) { // It didn't parse it correctly, so we swallow the exception and continue } } return ret; } _simplifyObject(obj: unknown): object { return simpleValue(obj) as object; } // Set up a potential Promise that the handler can resolve. // eslint-disable-next-line @typescript-eslint/no-explicit-any _chunkResolution: (chunk: any) => void; // If there is no Promise (it is null), the handler must add it to the queue // eslint-disable-next-line @typescript-eslint/no-explicit-any _chunkPending: Promise<any> | null = null; // A queue that will collect chunks while there is no Promise // eslint-disable-next-line @typescript-eslint/no-explicit-any _chunkQueue: any[] = []; /** * Register that we have another chunk available for consumption. * If we are waiting for a chunk, resolve the promise waiting for it immediately. * If not, then add it to the queue. * @param chunk */ // eslint-disable-next-line @typescript-eslint/no-explicit-any _handleChunk(chunk: any): void { if (this._chunkPending) { this._chunkResolution(chunk); this._chunkPending = null; } else { this._chunkQueue.push(chunk); } } /** * Get the next chunk that is coming from the stream. * This chunk may be null, usually indicating the last chunk in the stream. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any async nextChunk(): Promise<any> { if (this._chunkQueue.length > 0) { // If there is data in the queue, return the next queue chunk return this._chunkQueue.shift() as GenerationChunk; } else { // Otherwise, set up a promise that handleChunk will cause to be resolved this._chunkPending = new Promise((resolve) => { this._chunkResolution = resolve; }); return this._chunkPending; } } /** * Is the stream done? * A stream is only done if all of the following are true: * - There is no more data to be added to the text buffer * - There is no more data in the text buffer * - There are no chunks that are waiting to be consumed */ get streamDone(): boolean { return ( !this._bufferOpen && this._buffer.length === 0 && this._chunkQueue.length === 0 && this._chunkPending === null ); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/utils/event_source_parse.ts
/* eslint-disable prefer-template */ /* eslint-disable default-case */ /* eslint-disable no-plusplus */ // Adapted from https://github.com/gfortaine/fetch-event-source/blob/main/src/parse.ts // due to a packaging issue in the original. // MIT License import { type Readable } from "stream"; import { IterableReadableStream } from "@langchain/core/utils/stream"; export const EventStreamContentType = "text/event-stream"; /** * Represents a message sent in an event stream * https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format */ export interface EventSourceMessage { /** The event ID to set the EventSource object's last event ID value. */ id: string; /** A string identifying the type of event described. */ event: string; /** The event data */ data: string; /** The reconnection interval (in milliseconds) to wait before retrying the connection */ retry?: number; } function isNodeJSReadable(x: unknown): x is Readable { return x != null && typeof x === "object" && "on" in x; } /** * Converts a ReadableStream into a callback pattern. * @param stream The input ReadableStream. * @param onChunk A function that will be called on each new byte chunk in the stream. * @returns {Promise<void>} A promise that will be resolved when the stream closes. */ export async function getBytes( stream: ReadableStream<Uint8Array>, onChunk: (arr: Uint8Array, flush?: boolean) => void ) { // stream is a Node.js Readable / PassThrough stream // this can happen if node-fetch is polyfilled if (isNodeJSReadable(stream)) { return new Promise<void>((resolve) => { stream.on("readable", () => { let chunk; // eslint-disable-next-line no-constant-condition while (true) { chunk = stream.read(); if (chunk == null) { onChunk(new Uint8Array(), true); break; } onChunk(chunk); } resolve(); }); }); } const reader = stream.getReader(); // CHANGED: Introduced a "flush" mechanism to process potential pending messages when the stream ends. // This change is essential to ensure that we capture every last piece of information from streams, // such as those from Azure OpenAI, which may not terminate with a blank line. Without this // mechanism, we risk ignoring a possibly significant last message. // See https://github.com/langchain-ai/langchainjs/issues/1299 for details. // eslint-disable-next-line no-constant-condition while (true) { const result = await reader.read(); if (result.done) { onChunk(new Uint8Array(), true); break; } onChunk(result.value); } } const enum ControlChars { NewLine = 10, CarriageReturn = 13, Space = 32, Colon = 58, } /** * Parses arbitary byte chunks into EventSource line buffers. * Each line should be of the format "field: value" and ends with \r, \n, or \r\n. * @param onLine A function that will be called on each new EventSource line. * @returns A function that should be called for each incoming byte chunk. */ export function getLines( onLine: (line: Uint8Array, fieldLength: number, flush?: boolean) => void ) { let buffer: Uint8Array | undefined; let position: number; // current read position let fieldLength: number; // length of the `field` portion of the line let discardTrailingNewline = false; // return a function that can process each incoming byte chunk: return function onChunk(arr: Uint8Array, flush?: boolean) { if (flush) { onLine(arr, 0, true); return; } if (buffer === undefined) { buffer = arr; position = 0; fieldLength = -1; } else { // we're still parsing the old line. Append the new bytes into buffer: buffer = concat(buffer, arr); } const bufLength = buffer.length; let lineStart = 0; // index where the current line starts while (position < bufLength) { if (discardTrailingNewline) { if (buffer[position] === ControlChars.NewLine) { lineStart = ++position; // skip to next char } discardTrailingNewline = false; } // start looking forward till the end of line: let lineEnd = -1; // index of the \r or \n char for (; position < bufLength && lineEnd === -1; ++position) { switch (buffer[position]) { case ControlChars.Colon: if (fieldLength === -1) { // first colon in line fieldLength = position - lineStart; } break; // eslint-disable-next-line @typescript-eslint/ban-ts-comment // @ts-ignore:7029 \r case below should fallthrough to \n: case ControlChars.CarriageReturn: discardTrailingNewline = true; // eslint-disable-next-line no-fallthrough case ControlChars.NewLine: lineEnd = position; break; } } if (lineEnd === -1) { // We reached the end of the buffer but the line hasn't ended. // Wait for the next arr and then continue parsing: break; } // we've reached the line end, send it out: onLine(buffer.subarray(lineStart, lineEnd), fieldLength); lineStart = position; // we're now on the next line fieldLength = -1; } if (lineStart === bufLength) { buffer = undefined; // we've finished reading it } else if (lineStart !== 0) { // Create a new view into buffer beginning at lineStart so we don't // need to copy over the previous lines when we get the new arr: buffer = buffer.subarray(lineStart); position -= lineStart; } }; } /** * Parses line buffers into EventSourceMessages. * @param onId A function that will be called on each `id` field. * @param onRetry A function that will be called on each `retry` field. * @param onMessage A function that will be called on each message. * @returns A function that should be called for each incoming line buffer. */ export function getMessages( onMessage?: (msg: EventSourceMessage) => void, onId?: (id: string) => void, onRetry?: (retry: number) => void ) { let message = newMessage(); const decoder = new TextDecoder(); // return a function that can process each incoming line buffer: return function onLine( line: Uint8Array, fieldLength: number, flush?: boolean ) { if (flush) { if (!isEmpty(message)) { onMessage?.(message); message = newMessage(); } return; } if (line.length === 0) { // empty line denotes end of message. Trigger the callback and start a new message: onMessage?.(message); message = newMessage(); } else if (fieldLength > 0) { // exclude comments and lines with no values // line is of format "<field>:<value>" or "<field>: <value>" // https://html.spec.whatwg.org/multipage/server-sent-events.html#event-stream-interpretation const field = decoder.decode(line.subarray(0, fieldLength)); const valueOffset = fieldLength + (line[fieldLength + 1] === ControlChars.Space ? 2 : 1); const value = decoder.decode(line.subarray(valueOffset)); switch (field) { case "data": // if this message already has data, append the new value to the old. // otherwise, just set to the new value: message.data = message.data ? message.data + "\n" + value : value; // otherwise, break; case "event": message.event = value; break; case "id": onId?.((message.id = value)); break; case "retry": { const retry = parseInt(value, 10); if (!Number.isNaN(retry)) { // per spec, ignore non-integers onRetry?.((message.retry = retry)); } break; } } } }; } function concat(a: Uint8Array, b: Uint8Array) { const res = new Uint8Array(a.length + b.length); res.set(a); res.set(b, a.length); return res; } function newMessage(): EventSourceMessage { // data, event, and id must be initialized to empty strings: // https://html.spec.whatwg.org/multipage/server-sent-events.html#event-stream-interpretation // retry should be initialized to undefined so we return a consistent shape // to the js engine all the time: https://mathiasbynens.be/notes/shapes-ics#takeaways return { data: "", event: "", id: "", retry: undefined, }; } export function convertEventStreamToIterableReadableDataStream( stream: ReadableStream ) { const dataStream = new ReadableStream({ async start(controller) { const enqueueLine = getMessages((msg) => { if (msg.data) controller.enqueue(msg.data); }); const onLine = ( line: Uint8Array, fieldLength: number, flush?: boolean ) => { enqueueLine(line, fieldLength, flush); if (flush) controller.close(); }; await getBytes(stream, getLines(onLine)); }, }); return IterableReadableStream.fromReadableStream(dataStream); } function isEmpty(message: EventSourceMessage): boolean { return ( message.data === "" && message.event === "" && message.id === "" && message.retry === undefined ); }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/utils/momento.ts
/* eslint-disable no-instanceof/no-instanceof */ import { ICacheClient, CreateCache } from "@gomomento/sdk-core"; /** * Utility function to ensure that a Momento cache exists. * If the cache does not exist, it is created. * * @param client The Momento cache client. * @param cacheName The name of the cache to ensure exists. */ export async function ensureCacheExists( client: ICacheClient, cacheName: string ): Promise<void> { const createResponse = await client.createCache(cacheName); if ( createResponse instanceof CreateCache.Success || createResponse instanceof CreateCache.AlreadyExists ) { // pass } else if (createResponse instanceof CreateCache.Error) { throw createResponse.innerException(); } else { throw new Error(`Unknown response type: ${createResponse.toString()}`); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/utils/sqlite_where_builder.ts
import { InStatement, InValue } from "@libsql/client"; export type WhereCondition< // eslint-disable-next-line @typescript-eslint/no-explicit-any Metadata extends Record<string, any> = Record<string, any> > = { [Key in keyof Metadata]: | { operator: "=" | ">" | "<" | ">=" | "<=" | "<>" | "LIKE"; value: InValue; } | { operator: "IN"; value: InValue[]; }; }; type WhereInStatement = Exclude<InStatement, string>; export class SqliteWhereBuilder { private conditions: WhereCondition; constructor(conditions: WhereCondition) { this.conditions = conditions; } buildWhereClause(): WhereInStatement { const sqlParts: string[] = []; const args: Record<string, InValue> = {}; for (const [column, condition] of Object.entries(this.conditions)) { const { operator, value } = condition; if (operator === "IN") { const placeholders = value .map((_, index) => `:${column}${index}`) .join(", "); sqlParts.push( `json_extract(metadata, '$.${column}') IN (${placeholders})` ); const values = value.reduce( (previousValue: Record<string, InValue>, currentValue, index) => { return { ...previousValue, [`${column}${index}`]: currentValue }; }, {} ); Object.assign(args, values); } else { sqlParts.push( `json_extract(metadata, '$.${column}') ${operator} :${column}` ); args[column] = value; } } const sql = sqlParts.length ? `${sqlParts.join(" AND ")}` : ""; return { sql, args }; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/utils/cassandra.ts
import { AsyncCaller, AsyncCallerParams, } from "@langchain/core/utils/async_caller"; import { Client, DseClientOptions, types as driverTypes, } from "cassandra-driver"; import fs from "node:fs/promises"; import * as path from "node:path"; import * as os from "node:os"; /* ===================================================================================================================== * ===================================================================================================================== * Cassandra Client Factory * ===================================================================================================================== * ===================================================================================================================== */ /** * Defines the configuration options for connecting to Astra DB, DataStax's cloud-native Cassandra-as-a-Service. * This interface specifies the necessary parameters required to establish a connection with an Astra DB instance, * including authentication and targeting specific data centers or regions. * * Properties: * - `token`: The authentication token required for accessing the Astra DB instance. Essential for establishing a secure connection. * - `endpoint`: Optional. The URL or network address of the Astra DB instance. Can be used to directly specify the connection endpoint. * - `datacenterID`: Optional. The unique identifier of the data center to connect to. Used to compute the endpoint. * - `regionName`: Optional. The region name of the Astra DB instance. Used to compute the endpoint. Default to the primary region. * - `bundleUrlTemplate`: Optional. The URL template for downloading the secure connect bundle. Used to customize the bundle URL. "database_id" variable will be resolved at runtime. * * Either `endpoint` or `datacenterID` must be provided to establish a connection to Astra DB. */ export interface AstraServiceProviderArgs { token: string; endpoint?: string | URL; datacenterID?: string; regionName?: string; bundleUrlTemplate?: string; } /** * Encapsulates the service provider-specific arguments required for creating a Cassandra client. * This interface acts as a wrapper for configurations pertaining to various Cassandra service providers, * allowing for extensible and flexible client configuration. * * Currently, it supports: * - `astra`: Optional. Configuration parameters specific to Astra DB, DataStax's cloud-native Cassandra service. * Utilizing this property enables tailored connections to Astra DB instances with custom configurations. * * This structure is designed to be extended with additional service providers in the future, ensuring adaptability * and extensibility for connecting to various Cassandra services with distinct configuration requirements. */ export interface CassandraServiceProviderArgs { astra?: AstraServiceProviderArgs; } /** * Extends the DataStax driver's client options with additional configurations for service providers, * enabling the customization of Cassandra client instances based on specific service requirements. * This interface integrates native driver configurations with custom extensions, facilitating the * connection to Cassandra databases, including managed services like Astra DB. * * - `serviceProviderArgs`: Optional. Contains the connection arguments for specific Cassandra service providers, * such as Astra DB. This allows for detailed and service-specific client configurations, * enhancing connectivity and functionality across different Cassandra environments. * * Incorporating this interface into client creation processes ensures a comprehensive setup, encompassing both * standard and extended options for robust and versatile Cassandra database interactions. */ export interface CassandraClientArgs extends DseClientOptions { serviceProviderArgs?: CassandraServiceProviderArgs; } /** * Provides a centralized and streamlined factory for creating and configuring instances of the Cassandra client. * This class abstracts the complexities involved in instantiating and configuring Cassandra client instances, * enabling straightforward integration with Cassandra databases. It supports customization through various * configuration options, allowing for the creation of clients tailored to specific needs, such as connecting * to different clusters or utilizing specialized authentication and connection options. * * Key Features: * - Simplifies the Cassandra client creation process with method-based configurations. * - Supports customization for connecting to various Cassandra environments, including cloud-based services like Astra. * - Ensures consistent and optimal client configuration, incorporating best practices. * * Example Usage (Apache Cassandra®): * ``` * const cassandraArgs = { * contactPoints: ['h1', 'h2'], * localDataCenter: 'datacenter1', * credentials: { * username: <...> as string, * password: <...> as string, * }, * }; * const cassandraClient = CassandraClientFactory.getClient(cassandraArgs); * ``` * * Example Usage (DataStax AstraDB): * ``` * const astraArgs = { * serviceProviderArgs: { * astra: { * token: <...> as string, * endpoint: <...> as string, * }, * }, * }; * const cassandraClient = CassandraClientFactory.getClient(astraArgs); * ``` * */ export class CassandraClientFactory { /** * Asynchronously obtains a configured Cassandra client based on the provided arguments. * This method processes the given CassandraClientArgs to produce a configured Client instance * from the cassandra-driver, suitable for interacting with Cassandra databases. * * @param args The configuration arguments for the Cassandra client, including any service provider-specific options. * @returns A Promise resolving to a Client object configured according to the specified arguments. */ public static async getClient(args: CassandraClientArgs): Promise<Client> { const modifiedArgs = await this.processArgs(args); return new Client(modifiedArgs); } /** * Processes the provided CassandraClientArgs for creating a Cassandra client. * * @param args The arguments for creating the Cassandra client, including service provider configurations. * @returns A Promise resolving to the processed CassandraClientArgs, ready for client initialization. * @throws Error if the configuration is unsupported, specifically if serviceProviderArgs are provided * but do not include valid configurations for Astra. */ private static processArgs( args: CassandraClientArgs ): Promise<CassandraClientArgs> { if (!args.serviceProviderArgs) { return Promise.resolve(args); } if (args.serviceProviderArgs && args.serviceProviderArgs.astra) { return CassandraClientFactory.processAstraArgs(args); } throw new Error("Unsupported configuration for Cassandra client."); } /** * Asynchronously processes and validates the Astra service provider arguments within the * Cassandra client configuration. This includes ensuring the presence of necessary Astra * configurations like endpoint or datacenterID, setting up default secure connect bundle paths, * and initializing default credentials if not provided. * * @param args The arguments for creating the Cassandra client with Astra configurations. * @returns A Promise resolving to the modified CassandraClientArgs with Astra configurations processed. * @throws Error if Astra configuration is incomplete or if both endpoint and datacenterID are missing. */ private static async processAstraArgs( args: CassandraClientArgs ): Promise<CassandraClientArgs> { const astraArgs = args.serviceProviderArgs?.astra; if (!astraArgs) { throw new Error("Astra configuration is not provided in args."); } if (!astraArgs.endpoint && !astraArgs.datacenterID) { throw new Error( "Astra endpoint or datacenterID must be provided in args." ); } // Extract datacenterID and regionName from endpoint if provided if (astraArgs.endpoint) { const endpoint = new URL(astraArgs.endpoint.toString()); const hostnameParts = endpoint.hostname.split("-"); const domainSuffix = ".apps.astra.datastax.com"; if (hostnameParts[hostnameParts.length - 1].endsWith(domainSuffix)) { astraArgs.datacenterID = astraArgs.datacenterID || hostnameParts.slice(0, 5).join("-"); // Extract regionName by joining elements from index 5 to the end, and then remove the domain suffix const fullRegionName = hostnameParts.slice(5).join("-"); astraArgs.regionName = astraArgs.regionName || fullRegionName.replace(domainSuffix, ""); } } // Initialize cloud configuration if not already defined const modifiedArgs = { ...args, cloud: args.cloud || { secureConnectBundle: "" }, }; // Set default bundle location if it is not set if (!modifiedArgs.cloud.secureConnectBundle) { modifiedArgs.cloud.secureConnectBundle = await CassandraClientFactory.getAstraDefaultBundleLocation(astraArgs); } // Ensure secure connect bundle exists await CassandraClientFactory.setAstraBundle( astraArgs, modifiedArgs.cloud.secureConnectBundle ); // Ensure credentials are set modifiedArgs.credentials = modifiedArgs.credentials || { username: "token", password: astraArgs.token, }; return modifiedArgs; } /** * Get the default bundle filesystem location for the Astra Secure Connect Bundle. * * @param astraArgs The Astra service provider arguments. * @returns The default bundle file path. */ private static async getAstraDefaultBundleLocation( astraArgs: AstraServiceProviderArgs ): Promise<string> { const dir = path.join(os.tmpdir(), "cassandra-astra"); await fs.mkdir(dir, { recursive: true }); let scbFileName = `astra-secure-connect-${astraArgs.datacenterID}`; if (astraArgs.regionName) { scbFileName += `-${astraArgs.regionName}`; } scbFileName += ".zip"; const scbPath = path.join(dir, scbFileName); return scbPath; } /** * Ensures the Astra secure connect bundle specified by the path exists and is up to date. * If the file does not exist or is deemed outdated (more than 360 days old), a new secure * connect bundle is downloaded and saved to the specified path. * * @param astraArgs The Astra service provider arguments, including the datacenterID and optional regionName. * @param scbPath The path (or URL) where the secure connect bundle is expected to be located. * @returns A Promise that resolves when the secure connect bundle is verified or updated successfully. * @throws Error if the bundle cannot be retrieved or saved to the specified path. */ private static async setAstraBundle( astraArgs: AstraServiceProviderArgs, scbPath: string | URL ): Promise<void> { // If scbPath is a URL, we assume the URL is correct and do nothing further. // But if it is a string, we need to check if the file exists and download it if necessary. if (typeof scbPath === "string") { try { // Check if the file exists const stats = await fs.stat(scbPath); // Calculate the age of the file in days const fileAgeInDays = (Date.now() - stats.mtime.getTime()) / (1000 * 60 * 60 * 24); // File is more than 360 days old, download a fresh copy if (fileAgeInDays > 360) { await CassandraClientFactory.downloadAstraSecureConnectBundle( astraArgs, scbPath ); } } catch (error: unknown) { if ( typeof error === "object" && error !== null && "code" in error && error.code === "ENOENT" ) { // Handle file not found error (ENOENT) await CassandraClientFactory.downloadAstraSecureConnectBundle( astraArgs, scbPath ); } else { throw error; } } } } /** * Downloads the Astra secure connect bundle based on the provided Astra service provider arguments * and saves it to the specified file path. If a regionName is specified and matches one of the * available bundles, the regional bundle is preferred. Otherwise, the first available bundle URL is used. * * @param astraArgs - The Astra service provider arguments, including datacenterID and optional regionName. * @param scbPath - The file path where the secure connect bundle should be saved. * @returns A promise that resolves once the secure connect bundle is successfully downloaded and saved. * @throws Error if there's an issue retrieving the bundle URLs or saving the bundle to the file path. */ private static async downloadAstraSecureConnectBundle( astraArgs: AstraServiceProviderArgs, scbPath: string ): Promise<void> { if (!astraArgs.datacenterID) { throw new Error("Astra datacenterID is not provided in args."); } // First POST request gets all bundle locations for the database_id const bundleURLTemplate = astraArgs.bundleUrlTemplate ? astraArgs.bundleUrlTemplate : "https://api.astra.datastax.com/v2/databases/{database_id}/secureBundleURL?all=true"; const url = bundleURLTemplate.replace( "{database_id}", astraArgs.datacenterID ); const postResponse = await fetch(url, { method: "POST", headers: { Authorization: `Bearer ${astraArgs.token}`, "Content-Type": "application/json", }, }); if (!postResponse.ok) { throw new Error(`HTTP error! Status: ${postResponse.status}`); } const postData = await postResponse.json(); if (!postData || !Array.isArray(postData) || postData.length === 0) { throw new Error("Failed to get secure bundle URLs."); } // Find the download URL for the region, if specified let { downloadURL } = postData[0]; if (astraArgs.regionName) { const regionalBundle = postData.find( (bundle) => bundle.region === astraArgs.regionName ); if (regionalBundle) { downloadURL = regionalBundle.downloadURL; } } // GET request to download the file itself, and write to disk const getResponse = await fetch(downloadURL); if (!getResponse.ok) { throw new Error(`HTTP error! Status: ${getResponse.status}`); } const bundleData = await getResponse.arrayBuffer(); await fs.writeFile(scbPath, Buffer.from(bundleData)); } } /* ===================================================================================================================== * ===================================================================================================================== * Cassandra Table * ===================================================================================================================== * ===================================================================================================================== */ /** * Represents the definition of a column within a Cassandra table schema. * This interface is used to specify the properties of table columns during table creation * and to define how columns are utilized in select queries. * * Properties: * - `name`: The name of the column. * - `type`: The data type of the column, used during table creation to define the schema. * - `partition`: Optional. Specifies whether the column is part of the partition key. Important for table creation. * - `alias`: Optional. An alias for the column that can be used in select queries for readability or to avoid naming conflicts. * - `binds`: Optional. Specifies values to be bound to the column in queries, supporting parameterized query construction. * */ export interface Column { name: string; // Used by 'create' type: string; partition?: boolean; // Used by 'select' alias?: string; binds?: unknown | [unknown, ...unknown[]]; } /** * Defines an index on a Cassandra table column, facilitating efficient querying by column values. * This interface specifies the necessary configuration for creating secondary indexes on table columns, * enhancing query performance and flexibility. * * Properties: * - `name`: The name of the index. Typically related to the column it indexes for clarity. * - `value`: The name of the column on which the index is created. * - `options`: Optional. Custom options for the index, specified as a string. This can include various index * configurations supported by Cassandra, such as using specific indexing classes or options. * */ export interface Index { name: string; value: string; options?: string; } /** * Represents a filter condition used in constructing WHERE clauses for querying Cassandra tables. * Filters specify the criteria used to select rows from a table, based on column values. * * Properties: * - `name`: The name of the column to filter on. * - `value`: The value(s) to match against the column. Can be a single value or an array of values for operations like IN. * - `operator`: Optional. The comparison operator to use (e.g., '=', '<', '>', 'IN'). Defaults to '=' if not specified. * */ export interface Filter { name: string; value: unknown | [unknown, ...unknown[]]; operator?: string; } /** * Defines a type for specifying WHERE clause conditions in Cassandra queries. * This can be a single `Filter` object, an array of `Filter` objects for multiple conditions, * or a `Record<string, unknown>` for simple equality conditions keyed by column name. */ export type WhereClause = Filter[] | Filter | Record<string, unknown>; /** * Defines the configuration arguments for initializing a Cassandra table within an application. * This interface extends `AsyncCallerParams`, incorporating asynchronous operation configurations, * and adds specific properties for table creation, query execution, and data manipulation in a * Cassandra database context. * * Properties: * - `table`: The name of the table to be used or created. * - `keyspace`: The keyspace within which the table exists or will be created. * - `primaryKey`: Specifies the column(s) that constitute the primary key of the table. This can be a single * `Column` object for a simple primary key or an array of `Column` objects for composite keys. * - `nonKeyColumns`: Defines columns that are not part of the primary key. Similar to `primaryKey`, this can be a * single `Column` object or an array of `Column` objects, supporting flexible table schema definitions. * - `withClause`: Optional. A string containing additional CQL table options to be included in the CREATE TABLE statement. * This enables the specification of various table behaviors and properties, such as compaction strategies * and TTL settings. * - `indices`: Optional. An array of `Index` objects defining secondary indices on the table for improved query performance * on non-primary key columns. * - `batchSize`: Optional. Specifies the default size of batches for batched write operations to the table, affecting * performance and consistency trade-offs. * */ export interface CassandraTableArgs extends AsyncCallerParams { table: string; keyspace: string; primaryKey: Column | Column[]; nonKeyColumns: Column | Column[]; withClause?: string; indices?: Index[]; batchSize?: number; } /** * Represents a Cassandra table, encapsulating functionality for schema definition, data manipulation, and querying. * This class provides a high-level abstraction over Cassandra's table operations, including creating tables, * inserting, updating, selecting, and deleting records. It leverages the CassandraClient for executing * operations and supports asynchronous interactions with the database. * * Key features include: * - Table and keyspace management: Allows for specifying table schema, including primary keys, columns, * and indices, and handles the creation of these elements within the specified keyspace. * - Data manipulation: Offers methods for inserting (upserting) and deleting data in batches or individually, * with support for asynchronous operation and concurrency control. * - Querying: Enables selecting data with flexible filtering, sorting, and pagination options. * * The class is designed to be instantiated with a set of configuration arguments (`CassandraTableArgs`) * that define the table's structure and operational parameters, providing a streamlined interface for * interacting with Cassandra tables in a structured and efficient manner. * * Usage Example: * ```typescript * const tableArgs: CassandraTableArgs = { * table: 'my_table', * keyspace: 'my_keyspace', * primaryKey: [{ name: 'id', type: 'uuid', partition: true }], * nonKeyColumns: [{ name: 'data', type: 'text' }], * }; * const cassandraClient = new CassandraClient(clientConfig); * const myTable = new CassandraTable(tableArgs, cassandraClient); * ``` * * This class simplifies Cassandra database interactions, making it easier to perform robust data operations * while maintaining clear separation of concerns and promoting code reusability. */ export class CassandraTable { private client: Client; private readonly keyspace: string; private readonly table: string; private primaryKey: Column[]; private nonKeyColumns: Column[]; private indices: Index[]; private withClause: string; private batchSize: number; private initializationPromise: Promise<void> | null = null; private asyncCaller: AsyncCaller; private constructorArgs: CassandraTableArgs; /** * Initializes a new instance of the CassandraTable class with specified configuration. * This includes setting up the table schema (primary key, columns, and indices) and * preparing the environment for executing queries against a Cassandra database. * * @param args Configuration arguments defining the table schema and operational settings. * @param client Optional. A Cassandra Client instance. If not provided, one will be created * using the configuration specified in `args`. */ constructor(args: CassandraTableArgs, client?: Client) { const { keyspace, table, primaryKey, nonKeyColumns, withClause = "", indices = [], batchSize = 1, maxConcurrency = 25, } = args; // Set constructor args, which would include default values this.constructorArgs = { withClause, indices, batchSize, maxConcurrency, ...args, }; this.asyncCaller = new AsyncCaller(this.constructorArgs); // Assign properties this.keyspace = keyspace; this.table = table; this.primaryKey = Array.isArray(primaryKey) ? primaryKey : [primaryKey]; this.nonKeyColumns = Array.isArray(nonKeyColumns) ? nonKeyColumns : [nonKeyColumns]; this.withClause = withClause.trim().replace(/^with\s*/i, ""); this.indices = indices; this.batchSize = batchSize; // Start initialization but don't wait for it to complete here this.initialize(client).catch((error) => { console.error("Error during CassandraStore initialization:", error); }); } /** * Executes a SELECT query on the Cassandra table with optional filtering, ordering, and pagination. * Allows for specifying columns to return, filter conditions, sort order, and limits on the number of results. * * @param columns Optional. Columns to include in the result set. If omitted, all columns are selected. * @param filter Optional. Conditions to apply to the query for filtering results. * @param orderBy Optional. Criteria to sort the result set. * @param limit Optional. Maximum number of records to return. * @param allowFiltering Optional. Enables ALLOW FILTERING option for queries that cannot be executed directly due to Cassandra's query restrictions. * @param fetchSize Optional. The number of rows to fetch per page (for pagination). * @param pagingState Optional. The paging state from a previous query execution, used for pagination. * @returns A Promise resolving to the query result set. */ async select( columns?: Column[], filter?: WhereClause, orderBy?: Filter[], limit?: number, allowFiltering?: boolean, fetchSize?: number, pagingState?: string ): Promise<driverTypes.ResultSet> { await this.initialize(); // Ensure we have an array of Filter from the public interface const filters = this.asFilters(filter); // If no columns are specified, use all columns const queryColumns = columns || [...this.primaryKey, ...this.nonKeyColumns]; const queryStr = this.buildSearchQuery( queryColumns, filters, orderBy, limit, allowFiltering ); const queryParams = []; queryColumns.forEach(({ binds }) => { if (binds !== undefined && binds !== null) { if (Array.isArray(binds)) { queryParams.push(...binds); } else { queryParams.push(binds); } } }); if (filters) { filters.forEach(({ value }) => { if (Array.isArray(value)) { queryParams.push(...value); } else { queryParams.push(value); } }); } if (orderBy) { orderBy.forEach(({ value }) => { if (value !== undefined && value !== null) { if (Array.isArray(value)) { queryParams.push(...value); } else { queryParams.push(value); } } }); } if (limit) { queryParams.push(limit); } const execOptions = { prepare: true, fetchSize: fetchSize || undefined, pageState: pagingState || undefined, }; return this.client.execute(queryStr, queryParams, execOptions); } /** * Validates the correspondence between provided values and specified columns for database operations. * This method checks if the number of values matches the number of specified columns, ensuring * data integrity before executing insert or update operations. It also defaults to using all table columns * if specific columns are not provided. Throws an error if the validation fails. * * @param values An array of values or an array of arrays of values to be inserted or updated. Each * inner array represents a set of values corresponding to one row in the table. * @param columns Optional. An array of `Column` objects specifying the columns to be used for the operation. * If not provided, the method defaults to using both primary key and non-key columns of the table. * @returns An array of `Column` objects that have been validated for the operation. * @throws Error if the number of provided values does not match the number of specified columns. * @private */ private _columnCheck( values: unknown[] | unknown[][], columns?: Column[] ): Column[] { const cols = columns || [...this.primaryKey, ...this.nonKeyColumns]; if (!cols || cols.length === 0) { throw new Error("Columns must be specified."); } const firstValueSet = Array.isArray(values[0]) ? values[0] : values; if (firstValueSet && firstValueSet.length !== cols.length) { throw new Error("The number of values must match the number of columns."); } return cols; } /** * Inserts or updates records in the Cassandra table in batches, managing concurrency and batching size. * This method organizes the provided values into batches and uses `_upsert` to perform the database operations. * * @param values An array of arrays, where each inner array contains values for a single record. * @param columns Optional. Columns to be included in the insert/update operations. Defaults to all table columns. * @param batchSize Optional. The size of each batch for the operation. Defaults to the class's batchSize property. * @returns A Promise that resolves once all records have been upserted. */ async upsert( values: unknown[][], columns?: Column[], batchSize: number = this.batchSize ): Promise<void> { if (values.length === 0) { return; } // Ensure the store is initialized before proceeding await this.initialize(); const upsertColumns = this._columnCheck(values, columns); // Initialize an array to hold promises for each batch insert const upsertPromises: Promise<void>[] = []; // Buffers to hold the current batch of vectors and documents let currentBatch: unknown[][] = []; // Loop through each vector/document pair to insert; we use // <= vectors.length to ensure the last batch is inserted for (let i = 0; i <= values.length; i += 1) { // Check if we're still within the array boundaries if (i < values.length) { // Add the current vector and document to the batch currentBatch.push(values[i]); } // Check if we've reached the batch size or end of the array if (currentBatch.length >= batchSize || i === values.length) { // Only proceed if there are items in the current batch if (currentBatch.length > 0) { // Create copies of the current batch arrays to use in the async insert operation const batch = [...currentBatch]; // Execute the insert using the AsyncCaller - it will handle concurrency and queueing. upsertPromises.push( this.asyncCaller.call(() => this._upsert(batch, upsertColumns)) ); // Clear the current buffers for the next iteration currentBatch = []; } } } // Wait for all insert operations to complete. await Promise.all(upsertPromises); } /** * Deletes rows from the Cassandra table that match the specified WHERE clause conditions. * * @param whereClause Defines the conditions that must be met for rows to be deleted. Can be a single filter, * an array of filters, or a key-value map translating to filter conditions. * @returns A Promise that resolves when the DELETE operation has completed. */ async delete(whereClause: WhereClause) { await this.initialize(); const filters = this.asFilters(whereClause); const queryStr = `DELETE FROM ${this.keyspace}.${ this.table } ${this.buildWhereClause(filters)}`; const queryParams = filters.flatMap(({ value }) => { if (Array.isArray(value)) { return value; } else { return [value]; } }); return this.client.execute(queryStr, queryParams, { prepare: true, }); } /** * Retrieves the Node.js Cassandra client instance associated with this table. * This method ensures that the client is initialized and ready for use, returning the * Cassandra client object that can be used for database operations directly. * It initializes the client if it has not already been initialized. * * @returns A Promise that resolves to the Cassandra Client instance used by this table for database interactions. */ async getClient() { await this.initialize(); return this.client; } /** * Constructs the PRIMARY KEY clause for a Cassandra CREATE TABLE statement based on the specified columns. * This method organizes the provided columns into partition and clustering keys, forming the necessary syntax * for the PRIMARY KEY clause in a Cassandra table schema definition. It supports complex primary key structures, * including composite partition keys and clustering columns. * * - Partition columns are those marked with the `partition` property. If multiple partition columns are provided, * they are grouped together in parentheses as a composite partition key. * - Clustering columns are those not marked as partition keys and are listed after the partition key(s). * They determine the sort order of rows within a partition. * * The method ensures the correct syntax for primary keys, handling both simple and composite key structures, * and throws an error if no partition or clustering columns are provided. * * @param columns An array of `Column` objects representing the columns to be included in the primary key. * Each column must have a `name` and may have a `partition` boolean indicating if it is part * of the partition key. * @returns The PRIMARY KEY clause as a string, ready to be included in a CREATE TABLE statement. * @throws Error if no columns are marked as partition keys or if no columns are provided. * @private */ private buildPrimaryKey(columns: Column[]): string { // Partition columns may be specified with optional attribute col.partition const partitionColumns = columns .filter((col) => col.partition) .map((col) => col.name) .join(", "); // All columns not part of the partition key are clustering columns const clusteringColumns = columns .filter((col) => !col.partition) .map((col) => col.name) .join(", "); let primaryKey = ""; // If partition columns are specified, they are included in a () wrapper // If not, the clustering columns are used, and the first clustering column // is the partition key per normal Cassandra behaviour. if (partitionColumns && clusteringColumns) { primaryKey = `PRIMARY KEY ((${partitionColumns}), ${clusteringColumns})`; } else if (partitionColumns) { primaryKey = `PRIMARY KEY (${partitionColumns})`; } else if (clusteringColumns) { primaryKey = `PRIMARY KEY (${clusteringColumns})`; } else { throw new Error( "No partition or clustering columns provided for PRIMARY KEY definition." ); } return primaryKey; } /** * Type guard that checks if a given object conforms to the `Filter` interface. * This method is used to determine if an object can be treated as a filter for Cassandra * query conditions. It evaluates the object's structure, specifically looking for `name` * and `value` properties, which are essential for defining a filter in Cassandra queries. * * @param obj The object to be evaluated. * @returns A boolean value indicating whether the object is a `Filter`. Returns `true` * if the object has both `name` and `value` properties, signifying it meets the * criteria for being used as a filter in database operations; otherwise, returns `false`. * @private */ private isFilter(obj: unknown): obj is Filter { return ( typeof obj === "object" && obj !== null && "name" in obj && "value" in obj ); } /** * Helper to convert Record<string,unknown> to a Filter[] * @param record: a key-value Record collection * @returns Record as a Filter[] */ private convertToFilters(record: Record<string, unknown>): Filter[] { return Object.entries(record).map(([name, value]) => ({ name, value, operator: "=", })); } /** * Converts a key-value pair record into an array of `Filter` objects suitable for Cassandra query conditions. * This utility method allows for a more flexible specification of filter conditions by transforming * a simple object notation into the structured format expected by Cassandra query builders. Each key-value * pair in the record is interpreted as a filter condition, where the key represents the column name and * the value represents the filtering criterion. * * The method assumes a default equality operator for each filter. It is particularly useful for * converting concise filter specifications into the detailed format required for constructing CQL queries. * * @param record A key-value pair object where each entry represents a filter condition, with the key * as the column name and the value as the filter value. The value can be a single value * or an array to support IN queries with multiple criteria. * @returns An array of `Filter` objects, each representing a condition extracted from the input record. * The array can be directly used in constructing query WHERE clauses. * @private */ private asFilters(record: WhereClause | undefined): Filter[] { if (!record) { return []; } // If record is already an array if (Array.isArray(record)) { return record.flatMap((item) => { // Check if item is a Filter before passing it to convertToFilters if (this.isFilter(item)) { return [item]; } else { // Here item is treated as Record<string, unknown> return this.convertToFilters(item); } }); } // If record is a single Filter object, return it in an array if (this.isFilter(record)) { return [record]; } // If record is a Record<string, unknown>, convert it to an array of Filter return this.convertToFilters(record); } /** * Constructs the WHERE clause of a CQL query from an array of `Filter` objects. * This method generates the conditional part of a Cassandra Query Language (CQL) statement, * allowing for complex query constructions based on provided filters. Each filter in the array * translates into a condition within the WHERE clause, with support for various comparison operators. * * The method handles the assembly of these conditions into a syntactically correct CQL WHERE clause, * including the appropriate use of placeholders (?) for parameter binding in prepared statements. * It supports a range of operators, defaulting to "=" (equality) if an operator is not explicitly specified * in a filter. Filters with multiple values (e.g., for IN conditions) are also correctly formatted. * * @param filters Optional. An array of `Filter` objects representing the conditions to apply in the WHERE clause. * Each `Filter` includes a column name (`name`), a value or array of values (`value`), and optionally, * an operator (`operator`). If no filters are provided, an empty string is returned. * @returns The constructed WHERE clause as a string, ready to be appended to a CQL query. If no filters * are provided, returns an empty string, indicating no WHERE clause should be applied. * @private */ private buildWhereClause(filters?: Filter[]): string { if (!filters || filters.length === 0) { return ""; } const whereConditions = filters.map(({ name, operator = "=", value }) => { // Normalize the operator to handle case-insensitive comparison const normalizedOperator = operator.toUpperCase(); // Convert value to an array if it's not one, to simplify processing const valueArray = Array.isArray(value) ? value : [value]; if (valueArray.length === 1 && normalizedOperator !== "IN") { return `${name} ${operator} ?`; } else { // Remove quoted strings from 'name' to prevent counting '?' inside quotes as placeholders const quotesPattern = /'[^']*'|"[^"]*"/g; const modifiedName = name.replace(quotesPattern, ""); const nameQuestionMarkCount = (modifiedName.match(/\?/g) || []).length; // Check if there are enough elements in the array for the right side of the operator, // adjusted for any '?' placeholders within the 'name' itself if (valueArray.length < nameQuestionMarkCount + 1) { throw new Error( "Insufficient bind variables for the filter condition." ); } // Generate placeholders, considering any '?' placeholders that might have been part of 'name' const effectiveLength = Math.max( valueArray.length - nameQuestionMarkCount, 1 ); const placeholders = new Array(effectiveLength).fill("?").join(", "); // Wrap placeolders in a () if the operator is IN if (normalizedOperator === "IN") { return `${name} ${operator} (${placeholders})`; } else { return `${name} ${operator} ${placeholders}`; } } }); return `WHERE ${whereConditions.join(" AND ")}`; } /** * Generates the ORDER BY clause for a CQL query from an array of `Filter` objects. * This method forms the sorting part of a Cassandra Query Language (CQL) statement, * allowing for detailed control over the order of results based on specified column names * and directions. Each filter in the array represents a column and direction to sort by. * * It is important to note that unlike the traditional use of `Filter` objects for filtering, * in this context, they are repurposed to specify sorting criteria. The `name` field indicates * the column to sort by, and the `operator` field is used to specify the sort direction (`ASC` or `DESC`). * The `value` field is not utilized for constructing the ORDER BY clause and can be omitted. * * @param filters Optional. An array of `Filter` objects where each object specifies a column and * direction for sorting. The `name` field of each filter represents the column name, * and the `operator` field should contain the sorting direction (`ASC` or `DESC`). * If no filters are provided, the method returns an empty string. * @returns The constructed ORDER BY clause as a string, suitable for appending to a CQL query. * If no sorting criteria are provided, returns an empty string, indicating no ORDER BY * clause should be applied to the query. * @private */ private buildOrderByClause(filters?: Filter[]): string { if (!filters || filters.length === 0) { return ""; } const orderBy = filters.map(({ name, operator, value }) => { if (value) { return `${name} ${operator} ?`; } else if (operator) { return `${name} ${operator}`; } else { return name; } }); return `ORDER BY ${orderBy.join(" , ")}`; } /** * Constructs a CQL search query string for retrieving records from a Cassandra table. * This method combines various query components, including selected columns, filters, sorting criteria, * and pagination options, to form a complete and executable CQL query. It allows for fine-grained control * over the query construction process, enabling the inclusion of conditional filtering, ordering of results, * and limiting the number of returned records, with an optional allowance for filtering. * * The method meticulously constructs the SELECT part of the query using the provided columns, applies * the WHERE clause based on given filters, sorts the result set according to the orderBy criteria, and * restricts the number of results with the limit parameter. Additionally, it can enable the ALLOW FILTERING * option for queries that require server-side filtering beyond the capabilities of primary and secondary indexes. * * @param queryColumns An array of `Column` objects specifying which columns to include in the result set. * Each column can also have an alias defined for use in the query's result set. * @param filters Optional. An array of `Filter` objects to apply as conditions in the WHERE clause of the query. * @param orderBy Optional. An array of `Filter` objects specifying the ordering of the returned records. * Although repurposed as `Filter` objects, here they define the column names and the sort direction (ASC/DESC). * @param limit Optional. A numeric value specifying the maximum number of records the query should return. * @param allowFiltering Optional. A boolean flag that, when true, includes the ALLOW FILTERING clause in the query, * permitting Cassandra to execute queries that might not be efficiently indexable. * @returns A string representing the fully constructed CQL search query, ready for execution against a Cassandra table. * @private */ private buildSearchQuery( queryColumns: Column[], filters?: Filter[], orderBy?: Filter[], limit?: number, allowFiltering?: boolean ): string { const selectColumns = queryColumns .map((col) => (col.alias ? `${col.name} AS ${col.alias}` : col.name)) .join(", "); const whereClause = filters ? this.buildWhereClause(filters) : ""; const orderByClause = orderBy ? this.buildOrderByClause(orderBy) : ""; const limitClause = limit ? "LIMIT ?" : ""; const allowFilteringClause = allowFiltering ? "ALLOW FILTERING" : ""; const cqlQuery = `SELECT ${selectColumns} FROM ${this.keyspace}.${this.table} ${whereClause} ${orderByClause} ${limitClause} ${allowFilteringClause}`; return cqlQuery; } /** * Initializes the CassandraTable instance, ensuring it is ready for database operations. * This method is responsible for setting up the internal Cassandra client, creating the table * if it does not already exist, and preparing any indices as specified in the table configuration. * The initialization process is performed only once; subsequent calls return the result of the * initial setup. If a Cassandra `Client` instance is provided, it is used directly; otherwise, * a new client is created based on the table's configuration. * * The initialization includes: * - Assigning the provided or newly created Cassandra client to the internal client property. * - Executing a CQL statement to create the table with the specified columns, primary key, and * any additional options provided in the `withClause`. * - Creating any custom indices as defined in the table's indices array. * * This method leverages the asynchronous nature of JavaScript to perform potentially time-consuming * tasks, such as network requests to the Cassandra cluster, without blocking the execution thread. * * @param client Optional. A `Client` instance from the cassandra-driver package. If provided, this client * is used for all database operations performed by the instance. Otherwise, a new client * is instantiated based on the configuration provided at the CassandraTable instance creation. * @returns A Promise that resolves once the initialization process has completed, indicating the instance * is ready for database operations. If initialization has already occurred, the method returns * immediately without repeating the setup process. * @private */ private async initialize(client?: Client): Promise<void> { // If already initialized or initialization is in progress, return the existing promise if (this.initializationPromise) { return this.initializationPromise; } // Start the initialization process and store the promise this.initializationPromise = this.performInitialization(client) .then(() => { // Initialization successful }) .catch((error) => { // Reset to allow retrying in case of failure this.initializationPromise = null; throw error; }); return this.initializationPromise; } /** * Performs the actual initialization tasks for the CassandraTable instance. * This method is invoked by the `initialize` method to carry out the concrete steps necessary for preparing * the CassandraTable instance for operation. It includes establishing the Cassandra client (either by utilizing * an existing client passed as a parameter or by creating a new one based on the instance's configuration), * and executing the required CQL statements to create the table and its indices according to the specifications * provided during the instance's creation. * * The process encapsulates: * 1. Assigning the provided Cassandra `Client` to the instance, or creating a new one if none is provided. * 2. Creating the table with the specified schema if it does not exist. This involves constructing a CQL * `CREATE TABLE` statement that includes columns, primary key configuration, and any specified table options. * 3. Creating any indices specified in the instance's configuration using CQL `CREATE INDEX` statements, allowing * for custom index options if provided. * * This method ensures that the table and its environment are correctly set up for subsequent database operations, * encapsulating initialization logic to maintain separation of concerns and improve code readability and maintainability. * * @param client Optional. An instance of the Cassandra `Client` from the cassandra-driver package. If provided, * this client is used for all interactions with the Cassandra database. If not provided, a new client * is instantiated based on the provided configuration during the CassandraTable instance creation. * @returns A Promise that resolves when all initialization steps have been successfully completed, indicating * that the CassandraTable instance is fully prepared for database operations. * @private */ private async performInitialization(client?: Client) { if (client) { this.client = client; } else { this.client = await CassandraClientFactory.getClient( this.constructorArgs ); } const allColumns = [...this.primaryKey, ...this.nonKeyColumns]; let cql = ""; cql = `CREATE TABLE IF NOT EXISTS ${this.keyspace}.${this.table} ( ${ allColumns.length > 0 ? `${allColumns.map((col) => `${col.name} ${col.type}`).join(", ")}` : "" } , ${this.buildPrimaryKey(this.primaryKey)} ) ${this.withClause ? `WITH ${this.withClause}` : ""};`; await this.client.execute(cql); // Helper function to format custom index OPTIONS clause const _formatOptions = (options: string | undefined): string => { if (!options) { return ""; } let formattedOptions = options.trim(); if (!formattedOptions.toLowerCase().startsWith("with options =")) { formattedOptions = `WITH OPTIONS = ${formattedOptions}`; } return formattedOptions; }; for await (const { name, value, options } of this.indices) { const optionsClause = _formatOptions(options); cql = `CREATE CUSTOM INDEX IF NOT EXISTS idx_${this.table}_${name} ON ${this.keyspace}.${this.table} ${value} USING 'StorageAttachedIndex' ${optionsClause};`; await this.client.execute(cql); } } /** * Performs the actual insert or update operation (upsert) on the Cassandra table for a batch of values. * This method constructs and executes a CQL INSERT statement for each value in the batch. * * @param values An array of arrays, where each inner array contains values corresponding to the specified columns. * @param columns Optional. Specifies the columns into which the values should be inserted. Defaults to all columns. * @returns A Promise that resolves when the operation has completed. * @private */ private async _upsert( values: unknown[][], columns?: Column[] ): Promise<void> { if (values.length === 0) { return; } await this.initialize(); const upsertColumns = this._columnCheck(values, columns); const upsertColumnNames = upsertColumns.map((col) => col.name); const columnCount = upsertColumnNames.length; const bindPlaceholders = Array(columnCount).fill("?").join(", "); const upsertString = `INSERT INTO ${this.keyspace}.${ this.table } (${upsertColumnNames.join(", ")}) VALUES (${bindPlaceholders})`; // Initialize an array to hold query objects const queries = []; for (let i = 0; i < values.length; i += 1) { const query = { query: upsertString, params: values[i], }; // Add the query to the list queries.push(query); } // Execute the queries: use a batch if multiple, otherwise execute a single query if (queries.length === 1) { await this.client.execute(queries[0].query, queries[0].params, { prepare: true, }); } else { await this.client.batch(queries, { prepare: true, logged: false }); } } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/utils/ollama.ts
import { IterableReadableStream } from "@langchain/core/utils/stream"; import type { StringWithAutocomplete } from "@langchain/core/utils/types"; import { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base"; export interface OllamaInput { embeddingOnly?: boolean; f16KV?: boolean; frequencyPenalty?: number; headers?: Record<string, string>; keepAlive?: string; logitsAll?: boolean; lowVram?: boolean; mainGpu?: number; model?: string; baseUrl?: string; mirostat?: number; mirostatEta?: number; mirostatTau?: number; numBatch?: number; numCtx?: number; numGpu?: number; numGqa?: number; numKeep?: number; numPredict?: number; numThread?: number; penalizeNewline?: boolean; presencePenalty?: number; repeatLastN?: number; repeatPenalty?: number; ropeFrequencyBase?: number; ropeFrequencyScale?: number; temperature?: number; stop?: string[]; tfsZ?: number; topK?: number; topP?: number; typicalP?: number; useMLock?: boolean; useMMap?: boolean; vocabOnly?: boolean; format?: StringWithAutocomplete<"json">; } export interface OllamaRequestParams { model: string; format?: StringWithAutocomplete<"json">; images?: string[]; options: { embedding_only?: boolean; f16_kv?: boolean; frequency_penalty?: number; logits_all?: boolean; low_vram?: boolean; main_gpu?: number; mirostat?: number; mirostat_eta?: number; mirostat_tau?: number; num_batch?: number; num_ctx?: number; num_gpu?: number; num_gqa?: number; num_keep?: number; num_thread?: number; num_predict?: number; penalize_newline?: boolean; presence_penalty?: number; repeat_last_n?: number; repeat_penalty?: number; rope_frequency_base?: number; rope_frequency_scale?: number; temperature?: number; stop?: string[]; tfs_z?: number; top_k?: number; top_p?: number; typical_p?: number; use_mlock?: boolean; use_mmap?: boolean; vocab_only?: boolean; }; } export type OllamaMessage = { role: StringWithAutocomplete<"user" | "assistant" | "system">; content: string; images?: string[]; }; export interface OllamaGenerateRequestParams extends OllamaRequestParams { prompt: string; } export interface OllamaChatRequestParams extends OllamaRequestParams { messages: OllamaMessage[]; } export type BaseOllamaGenerationChunk = { model: string; created_at: string; done: boolean; total_duration?: number; load_duration?: number; prompt_eval_count?: number; prompt_eval_duration?: number; eval_count?: number; eval_duration?: number; }; export type OllamaGenerationChunk = BaseOllamaGenerationChunk & { response: string; }; export type OllamaChatGenerationChunk = BaseOllamaGenerationChunk & { message: OllamaMessage; }; export type OllamaCallOptions = BaseLanguageModelCallOptions & { headers?: Record<string, string>; }; async function* createOllamaStream( url: string, params: OllamaRequestParams, options: OllamaCallOptions ) { let formattedUrl = url; if (formattedUrl.startsWith("http://localhost:")) { // Node 18 has issues with resolving "localhost" // See https://github.com/node-fetch/node-fetch/issues/1624 formattedUrl = formattedUrl.replace( "http://localhost:", "http://127.0.0.1:" ); } const response = await fetch(formattedUrl, { method: "POST", body: JSON.stringify(params), headers: { "Content-Type": "application/json", ...options.headers, }, signal: options.signal, }); if (!response.ok) { let error; const responseText = await response.text(); try { const json = JSON.parse(responseText); error = new Error( `Ollama call failed with status code ${response.status}: ${json.error}` ); // eslint-disable-next-line @typescript-eslint/no-explicit-any } catch (e: any) { error = new Error( `Ollama call failed with status code ${response.status}: ${responseText}` ); } // eslint-disable-next-line @typescript-eslint/no-explicit-any (error as any).response = response; throw error; } if (!response.body) { throw new Error( "Could not begin Ollama stream. Please check the given URL and try again." ); } const stream = IterableReadableStream.fromReadableStream(response.body); const decoder = new TextDecoder(); let extra = ""; for await (const chunk of stream) { const decoded = extra + decoder.decode(chunk); const lines = decoded.split("\n"); extra = lines.pop() || ""; for (const line of lines) { try { yield JSON.parse(line); } catch (e) { console.warn(`Received a non-JSON parseable chunk: ${line}`); } } } } export async function* createOllamaGenerateStream( baseUrl: string, params: OllamaGenerateRequestParams, options: OllamaCallOptions ): AsyncGenerator<OllamaGenerationChunk> { yield* createOllamaStream(`${baseUrl}/api/generate`, params, options); } export async function* createOllamaChatStream( baseUrl: string, params: OllamaChatRequestParams, options: OllamaCallOptions ): AsyncGenerator<OllamaChatGenerationChunk> { yield* createOllamaStream(`${baseUrl}/api/chat`, params, options); }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/utils/time.ts
/** * Sleep for a given amount of time. * @param ms - The number of milliseconds to sleep for. Defaults to 1000. * @returns A promise that resolves when the sleep is complete. */ export async function sleep(ms = 1000): Promise<void> { return new Promise<void>((resolve) => { setTimeout(resolve, ms); }); }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/utils/ibm.ts
/* eslint-disable @typescript-eslint/no-explicit-any */ import { WatsonXAI } from "@ibm-cloud/watsonx-ai"; import { IamAuthenticator, BearerTokenAuthenticator, CloudPakForDataAuthenticator, } from "ibm-cloud-sdk-core"; import { JsonOutputKeyToolsParserParams, JsonOutputToolsParser, } from "@langchain/core/output_parsers/openai_tools"; import { OutputParserException } from "@langchain/core/output_parsers"; import { z } from "zod"; import { ChatGeneration } from "@langchain/core/outputs"; import { AIMessageChunk } from "@langchain/core/messages"; import { ToolCall } from "@langchain/core/messages/tool"; import { WatsonxAuth, WatsonxInit } from "../types/ibm.js"; export const authenticateAndSetInstance = ({ watsonxAIApikey, watsonxAIAuthType, watsonxAIBearerToken, watsonxAIUsername, watsonxAIPassword, watsonxAIUrl, version, serviceUrl, }: WatsonxAuth & Omit<WatsonxInit, "authenticator">): WatsonXAI | undefined => { if (watsonxAIAuthType === "iam" && watsonxAIApikey) { return WatsonXAI.newInstance({ version, serviceUrl, authenticator: new IamAuthenticator({ apikey: watsonxAIApikey, }), }); } else if (watsonxAIAuthType === "bearertoken" && watsonxAIBearerToken) { return WatsonXAI.newInstance({ version, serviceUrl, authenticator: new BearerTokenAuthenticator({ bearerToken: watsonxAIBearerToken, }), }); } else if (watsonxAIAuthType === "cp4d" && watsonxAIUrl) { if (watsonxAIUsername && watsonxAIPassword && watsonxAIApikey) return WatsonXAI.newInstance({ version, serviceUrl, authenticator: new CloudPakForDataAuthenticator({ username: watsonxAIUsername, password: watsonxAIPassword, url: watsonxAIUrl, apikey: watsonxAIApikey, }), }); } else return WatsonXAI.newInstance({ version, serviceUrl, }); return undefined; }; // Mistral enforces a specific pattern for tool call IDs // Thanks to Mistral for implementing this, I was unable to import which is why this is copied 1:1 const TOOL_CALL_ID_PATTERN = /^[a-zA-Z0-9]{9}$/; export function _isValidMistralToolCallId(toolCallId: string): boolean { return TOOL_CALL_ID_PATTERN.test(toolCallId); } function _base62Encode(num: number): string { let numCopy = num; const base62 = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; if (numCopy === 0) return base62[0]; const arr: string[] = []; const base = base62.length; while (numCopy) { arr.push(base62[numCopy % base]); numCopy = Math.floor(numCopy / base); } return arr.reverse().join(""); } function _simpleHash(str: string): number { let hash = 0; for (let i = 0; i < str.length; i += 1) { const char = str.charCodeAt(i); hash = (hash << 5) - hash + char; hash &= hash; // Convert to 32-bit integer } return Math.abs(hash); } export function _convertToolCallIdToMistralCompatible( toolCallId: string ): string { if (_isValidMistralToolCallId(toolCallId)) { return toolCallId; } else { const hash = _simpleHash(toolCallId); const base62Str = _base62Encode(hash); if (base62Str.length >= 9) { return base62Str.slice(0, 9); } else { return base62Str.padStart(9, "0"); } } } interface WatsonxToolsOutputParserParams<T extends Record<string, any>> extends JsonOutputKeyToolsParserParams<T> {} export class WatsonxToolsOutputParser< T extends Record<string, any> = Record<string, any> > extends JsonOutputToolsParser<T> { static lc_name() { return "WatsonxToolsOutputParser"; } lc_namespace = ["langchain", "watsonx", "output_parsers"]; returnId = false; keyName: string; returnSingle = false; zodSchema?: z.ZodType<T>; latestCorrect?: ToolCall; constructor(params: WatsonxToolsOutputParserParams<T>) { super(params); this.keyName = params.keyName; this.returnSingle = params.returnSingle ?? this.returnSingle; this.zodSchema = params.zodSchema; } protected async _validateResult(result: unknown): Promise<T> { let parsedResult = result; if (typeof result === "string") { try { parsedResult = JSON.parse(result); } catch (e: any) { throw new OutputParserException( `Failed to parse. Text: "${JSON.stringify( result, null, 2 )}". Error: ${JSON.stringify(e.message)}`, result ); } } else { parsedResult = result; } if (this.zodSchema === undefined) { return parsedResult as T; } const zodParsedResult = await this.zodSchema.safeParseAsync(parsedResult); if (zodParsedResult.success) { return zodParsedResult.data; } else { throw new OutputParserException( `Failed to parse. Text: "${JSON.stringify( result, null, 2 )}". Error: ${JSON.stringify(zodParsedResult.error.errors)}`, JSON.stringify(result, null, 2) ); } } async parsePartialResult(generations: ChatGeneration[]): Promise<T> { const tools = generations.flatMap((generation) => { const message = generation.message as AIMessageChunk; if (!Array.isArray(message.tool_calls)) { return []; } const tool = message.tool_calls; return tool; }); if (tools[0] === undefined) { if (this.latestCorrect) tools.push(this.latestCorrect); } const [tool] = tools; this.latestCorrect = tool; return tool.args as T; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/utils/zhipuai.ts
import jsonwebtoken from "jsonwebtoken"; const API_TOKEN_TTL_SECONDS = 3 * 60; const CACHE_TTL_SECONDS = API_TOKEN_TTL_SECONDS - 30; const tokenCache: { [key: string]: { token: string; createAt: number; }; } = {}; export const encodeApiKey = (apiSecretKey?: string, cache = true): string => { if (!apiSecretKey) throw new Error("Api_key is required"); try { if ( tokenCache[apiSecretKey] && Date.now() - tokenCache[apiSecretKey].createAt < CACHE_TTL_SECONDS * 1000 ) { return tokenCache[apiSecretKey].token; } const [apiKey, secret] = apiSecretKey.split("."); const payload = { api_key: apiKey, exp: Math.round(Date.now() * 1000) + API_TOKEN_TTL_SECONDS * 1000, timestamp: Math.round(Date.now() * 1000), }; // algorithm = "HS256", headers = { "alg": "HS256", "sign_type": "SIGN" } // eslint-disable-next-line @typescript-eslint/ban-ts-comment // @ts-ignore const ret = jsonwebtoken.sign(payload, secret, { algorithm: "HS256", header: { alg: "HS256", sign_type: "SIGN" }, }); if (cache) { tokenCache[apiSecretKey] = { token: ret, createAt: Date.now(), }; } return ret; } catch (e) { throw new Error("invalid api_key"); } };
0
lc_public_repos/langchainjs/libs/langchain-community/src/utils
lc_public_repos/langchainjs/libs/langchain-community/src/utils/bedrock/anthropic.ts
import { AIMessage, AIMessageChunk, BaseMessage, HumanMessage, MessageContent, SystemMessage, ToolMessage, UsageMetadata, isAIMessage, } from "@langchain/core/messages"; import { ToolCall, ToolCallChunk } from "@langchain/core/messages/tool"; import { concat } from "@langchain/core/utils/stream"; // eslint-disable-next-line @typescript-eslint/no-explicit-any export function extractToolCalls(content: Record<string, any>[]) { const toolCalls: ToolCall[] = []; for (const block of content) { if (block.type === "tool_use") { toolCalls.push({ name: block.name, args: block.input, id: block.id, type: "tool_call", }); } } return toolCalls; } function _formatImage(imageUrl: string) { const regex = /^data:(image\/.+);base64,(.+)$/; const match = imageUrl.match(regex); if (match === null) { throw new Error( [ "Anthropic only supports base64-encoded images currently.", "Example: data:image/png;base64,/9j/4AAQSk...", ].join("\n\n") ); } return { type: "base64", media_type: match[1] ?? "", data: match[2] ?? "", // eslint-disable-next-line @typescript-eslint/no-explicit-any } as any; } function _ensureMessageContents( messages: BaseMessage[] ): (SystemMessage | HumanMessage | AIMessage)[] { // Merge runs of human/tool messages into single human messages with content blocks. const updatedMsgs = []; for (const message of messages) { if (message._getType() === "tool") { if (typeof message.content === "string") { const previousMessage = updatedMsgs[updatedMsgs.length - 1]; if ( previousMessage?._getType() === "human" && Array.isArray(previousMessage.content) && "type" in previousMessage.content[0] && previousMessage.content[0].type === "tool_result" ) { // If the previous message was a tool result, we merge this tool message into it. previousMessage.content.push({ type: "tool_result", content: message.content, tool_use_id: (message as ToolMessage).tool_call_id, }); } else { // If not, we create a new human message with the tool result. updatedMsgs.push( new HumanMessage({ content: [ { type: "tool_result", content: message.content, tool_use_id: (message as ToolMessage).tool_call_id, }, ], }) ); } } else { updatedMsgs.push( new HumanMessage({ content: [ { type: "tool_result", content: _formatContent(message.content), tool_use_id: (message as ToolMessage).tool_call_id, }, ], }) ); } } else { updatedMsgs.push(message); } } return updatedMsgs; } export function _convertLangChainToolCallToAnthropic( toolCall: ToolCall // eslint-disable-next-line @typescript-eslint/no-explicit-any ): Record<string, any> { if (toolCall.id === undefined) { throw new Error(`Anthropic requires all tool calls to have an "id".`); } return { type: "tool_use", id: toolCall.id, name: toolCall.name, input: toolCall.args, }; } function _formatContent(content: MessageContent) { if (typeof content === "string") { return content; } else { const contentBlocks = content.flatMap((contentPart) => { if (contentPart.type === "image_url") { let source; if (typeof contentPart.image_url === "string") { source = _formatImage(contentPart.image_url); } else { source = _formatImage(contentPart.image_url.url); } return { type: "image" as const, // Explicitly setting the type as "image" source, }; } else if ( contentPart.type === "text" || contentPart.type === "text_delta" ) { if (contentPart.text === "") { return []; } // Assuming contentPart is of type MessageContentText here return { type: "text" as const, // Explicitly setting the type as "text" text: contentPart.text, }; } else if ( contentPart.type === "tool_use" || contentPart.type === "tool_result" ) { // TODO: Fix when SDK types are fixed return { ...contentPart, // eslint-disable-next-line @typescript-eslint/no-explicit-any } as any; } else if (contentPart.type === "input_json_delta") { return []; } else { throw new Error("Unsupported message content format"); } }); return contentBlocks; } } export function formatMessagesForAnthropic(messages: BaseMessage[]): { system?: string; messages: Record<string, unknown>[]; } { const mergedMessages = _ensureMessageContents(messages); let system: string | undefined; if (mergedMessages.length > 0 && mergedMessages[0]._getType() === "system") { if (typeof messages[0].content !== "string") { throw new Error("System message content must be a string."); } system = messages[0].content; } const conversationMessages = system !== undefined ? mergedMessages.slice(1) : mergedMessages; const formattedMessages = conversationMessages.map((message) => { let role; if (message._getType() === "human") { role = "user" as const; } else if (message._getType() === "ai") { role = "assistant" as const; } else if (message._getType() === "tool") { role = "user" as const; } else if (message._getType() === "system") { throw new Error( "System messages are only permitted as the first passed message." ); } else { throw new Error(`Message type "${message._getType()}" is not supported.`); } if (isAIMessage(message) && !!message.tool_calls?.length) { if (typeof message.content === "string") { if (message.content === "") { return { role, content: message.tool_calls.map( _convertLangChainToolCallToAnthropic ), }; } else { return { role, content: [ { type: "text", text: message.content }, ...message.tool_calls.map(_convertLangChainToolCallToAnthropic), ], }; } } else { const formattedContent = _formatContent(message.content); if (Array.isArray(formattedContent)) { const formattedToolsContent = message.tool_calls.map( _convertLangChainToolCallToAnthropic ); return { role, content: [...formattedContent, ...formattedToolsContent], }; } return { role, content: formattedContent, }; } } else { return { role, content: _formatContent(message.content), }; } }); return { messages: formattedMessages, system, }; } export function isAnthropicTool( tool: unknown ): tool is Record<string, unknown> { if (typeof tool !== "object" || !tool) return false; return "input_schema" in tool; } export function _makeMessageChunkFromAnthropicEvent( // eslint-disable-next-line @typescript-eslint/no-explicit-any data: Record<string, any>, fields: { coerceContentToString?: boolean; } ): AIMessageChunk | null { if (data.type === "message_start") { // eslint-disable-next-line @typescript-eslint/no-unused-vars const { content, usage, ...additionalKwargs } = data.message; // eslint-disable-next-line @typescript-eslint/no-explicit-any const filteredAdditionalKwargs: Record<string, any> = {}; for (const [key, value] of Object.entries(additionalKwargs)) { if (value !== undefined && value !== null) { filteredAdditionalKwargs[key] = value; } } return new AIMessageChunk({ content: fields.coerceContentToString ? "" : [], additional_kwargs: filteredAdditionalKwargs, }); } else if (data.type === "message_delta") { let usageMetadata: UsageMetadata | undefined; return new AIMessageChunk({ content: fields.coerceContentToString ? "" : [], additional_kwargs: { ...data.delta }, usage_metadata: usageMetadata, }); } else if ( data.type === "content_block_start" && data.content_block.type === "tool_use" ) { return new AIMessageChunk({ content: fields.coerceContentToString ? "" : [ { index: data.index, ...data.content_block, input: "", }, ], additional_kwargs: {}, }); } else if ( data.type === "content_block_delta" && data.delta.type === "text_delta" ) { const content = data.delta?.text; if (content !== undefined) { return new AIMessageChunk({ content: fields.coerceContentToString ? content : [ { index: data.index, ...data.delta, }, ], additional_kwargs: {}, }); } } else if ( data.type === "content_block_delta" && data.delta.type === "input_json_delta" ) { return new AIMessageChunk({ content: fields.coerceContentToString ? "" : [ { index: data.index, input: data.delta.partial_json, type: data.delta.type, }, ], additional_kwargs: {}, }); } else if ( data.type === "message_stop" && data["amazon-bedrock-invocationMetrics"] !== undefined ) { return new AIMessageChunk({ content: "", response_metadata: { "amazon-bedrock-invocationMetrics": data["amazon-bedrock-invocationMetrics"], }, usage_metadata: { input_tokens: data["amazon-bedrock-invocationMetrics"].inputTokenCount, output_tokens: data["amazon-bedrock-invocationMetrics"].outputTokenCount, total_tokens: data["amazon-bedrock-invocationMetrics"].inputTokenCount + data["amazon-bedrock-invocationMetrics"].outputTokenCount, }, }); } return null; } export function extractToolCallChunk( chunk: AIMessageChunk ): ToolCallChunk | undefined { let newToolCallChunk: ToolCallChunk | undefined; // Initial chunk for tool calls from anthropic contains identifying information like ID and name. // This chunk does not contain any input JSON. const toolUseChunks = Array.isArray(chunk.content) ? chunk.content.find((c) => c.type === "tool_use") : undefined; if ( toolUseChunks && "index" in toolUseChunks && "name" in toolUseChunks && "id" in toolUseChunks ) { newToolCallChunk = { args: "", id: toolUseChunks.id, name: toolUseChunks.name, index: toolUseChunks.index, type: "tool_call_chunk", }; } // Chunks after the initial chunk only contain the index and partial JSON. const inputJsonDeltaChunks = Array.isArray(chunk.content) ? chunk.content.find((c) => c.type === "input_json_delta") : undefined; if ( inputJsonDeltaChunks && "index" in inputJsonDeltaChunks && "input" in inputJsonDeltaChunks ) { if (typeof inputJsonDeltaChunks.input === "string") { newToolCallChunk = { args: inputJsonDeltaChunks.input, index: inputJsonDeltaChunks.index, type: "tool_call_chunk", }; } else { newToolCallChunk = { args: JSON.stringify(inputJsonDeltaChunks.input, null, 2), index: inputJsonDeltaChunks.index, type: "tool_call_chunk", }; } } return newToolCallChunk; } export function extractToken(chunk: AIMessageChunk): string | undefined { return typeof chunk.content === "string" && chunk.content !== "" ? chunk.content : undefined; } export function extractToolUseContent( chunk: AIMessageChunk, concatenatedChunks: AIMessageChunk | undefined ) { let newConcatenatedChunks = concatenatedChunks; // Remove `tool_use` content types until the last chunk. let toolUseContent: | { id: string; type: "tool_use"; name: string; input: Record<string, unknown>; } | undefined; if (!newConcatenatedChunks) { newConcatenatedChunks = chunk; } else { newConcatenatedChunks = concat(newConcatenatedChunks, chunk); } if ( Array.isArray(newConcatenatedChunks.content) && newConcatenatedChunks.content.find((c) => c.type === "tool_use") ) { try { const toolUseMsg = newConcatenatedChunks.content.find( (c) => c.type === "tool_use" ); if ( !toolUseMsg || !("input" in toolUseMsg || "name" in toolUseMsg || "id" in toolUseMsg) ) return; const parsedArgs = JSON.parse(toolUseMsg.input); if (parsedArgs) { toolUseContent = { type: "tool_use", id: toolUseMsg.id, name: toolUseMsg.name, input: parsedArgs, }; } } catch (_) { // no-op } } return { toolUseContent, concatenatedChunks: newConcatenatedChunks, }; } // eslint-disable-next-line @typescript-eslint/no-explicit-any export function _toolsInParams(params: Record<string, any>): boolean { return !!(params.tools && params.tools.length > 0); }
0
lc_public_repos/langchainjs/libs/langchain-community/src/utils
lc_public_repos/langchainjs/libs/langchain-community/src/utils/bedrock/index.ts
import type { AwsCredentialIdentity, Provider } from "@aws-sdk/types"; import { AIMessage, AIMessageChunk, BaseMessage, } from "@langchain/core/messages"; import { StructuredToolInterface } from "@langchain/core/tools"; import { ChatGeneration, ChatGenerationChunk } from "@langchain/core/outputs"; import { _makeMessageChunkFromAnthropicEvent, extractToken, extractToolCallChunk, extractToolUseContent, extractToolCalls, formatMessagesForAnthropic, } from "./anthropic.js"; export type CredentialType = | AwsCredentialIdentity | Provider<AwsCredentialIdentity>; /** * format messages for Cohere Command-R and CommandR+ via AWS Bedrock. * * @param messages messages The base messages to format as a prompt. * * @returns The formatted prompt for Cohere. * * `system`: user system prompts. Overrides the default preamble for search query generation. Has no effect on tool use generations.\ * `message`: (Required) Text input for the model to respond to.\ * `chatHistory`: A list of previous messages between the user and the model, meant to give the model conversational context for responding to the user's message.\ * The following are required fields. * - `role` - The role for the message. Valid values are USER or CHATBOT.\ * - `message` – Text contents of the message.\ * * The following is example JSON for the chat_history field.\ * "chat_history": [ * {"role": "USER", "message": "Who discovered gravity?"}, * {"role": "CHATBOT", "message": "The man who is widely credited with discovering gravity is Sir Isaac Newton"}]\ * * docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-cohere-command-r-plus.html */ function formatMessagesForCohere(messages: BaseMessage[]): { system?: string; message: string; chatHistory: Record<string, unknown>[]; } { const systemMessages = messages.filter( (system) => system._getType() === "system" ); const system = systemMessages .filter((m) => typeof m.content === "string") .map((m) => m.content) .join("\n\n"); const conversationMessages = messages.filter( (message) => message._getType() !== "system" ); const questionContent = conversationMessages.slice(-1); if (!questionContent.length || questionContent[0]._getType() !== "human") { throw new Error("question message content must be a human message."); } if (typeof questionContent[0].content !== "string") { throw new Error("question message content must be a string."); } const formattedMessage = questionContent[0].content; const formattedChatHistories = conversationMessages .slice(0, -1) .map((message) => { let role; switch (message._getType()) { case "human": role = "USER" as const; break; case "ai": role = "CHATBOT" as const; break; case "system": throw new Error("chat_history can not include system prompts."); default: throw new Error( `Message type "${message._getType()}" is not supported.` ); } if (typeof message.content !== "string") { throw new Error("message content must be a string."); } return { role, message: message.content, }; }); return { chatHistory: formattedChatHistories, message: formattedMessage, system, }; } /** Bedrock models. To authenticate, the AWS client uses the following methods to automatically load credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used. Make sure the credentials / roles used have the required policies to access the Bedrock service. */ export interface BaseBedrockInput { /** Model to use. For example, "amazon.titan-tg1-large", this is equivalent to the modelId property in the list-foundation-models api. */ model: string; /** The AWS region e.g. `us-west-2`. Fallback to AWS_DEFAULT_REGION env variable or region specified in ~/.aws/config in case it is not provided here. */ region?: string; /** AWS Credentials. If no credentials are provided, the default credentials from `@aws-sdk/credential-provider-node` will be used. */ credentials?: CredentialType; /** Temperature. */ temperature?: number; /** Max tokens. */ maxTokens?: number; /** A custom fetch function for low-level access to AWS API. Defaults to fetch(). */ fetchFn?: typeof fetch; /** @deprecated Use endpointHost instead Override the default endpoint url. */ endpointUrl?: string; /** Override the default endpoint hostname. */ endpointHost?: string; /** * Optional additional stop sequences to pass to the model. Currently only supported for Anthropic and AI21. * @deprecated Use .bind({ "stop": [...] }) instead * */ stopSequences?: string[]; /** Additional kwargs to pass to the model. */ modelKwargs?: Record<string, unknown>; /** Whether or not to stream responses */ streaming: boolean; /** Trace settings for the Bedrock Guardrails. */ trace?: "ENABLED" | "DISABLED"; /** Identifier for the guardrail configuration. */ guardrailIdentifier?: string; /** Version for the guardrail configuration. */ guardrailVersion?: string; /** Required when Guardrail is in use. */ guardrailConfig?: { tagSuffix: string; streamProcessingMode: "SYNCHRONOUS" | "ASYNCHRONOUS"; }; awsAccessKeyId?: string; awsSecretAccessKey?: string; awsSessionToken?: string; } type Dict = { [key: string]: unknown }; /** * A helper class used within the `Bedrock` class. It is responsible for * preparing the input and output for the Bedrock service. It formats the * input prompt based on the provider (e.g., "anthropic", "ai21", * "amazon") and extracts the generated text from the service response. */ export class BedrockLLMInputOutputAdapter { /** Adapter class to prepare the inputs from Langchain to a format that LLM model expects. Also, provides a helper function to extract the generated text from the model response. */ static prepareInput( provider: string, prompt: string, maxTokens = 50, temperature = 0, stopSequences: string[] | undefined = undefined, modelKwargs: Record<string, unknown> = {}, bedrockMethod: "invoke" | "invoke-with-response-stream" = "invoke", guardrailConfig: | { tagSuffix: string; streamProcessingMode: "SYNCHRONOUS" | "ASYNCHRONOUS"; } | undefined = undefined ): Dict { const inputBody: Dict = {}; if (provider === "anthropic") { inputBody.prompt = prompt; inputBody.max_tokens_to_sample = maxTokens; inputBody.temperature = temperature; inputBody.stop_sequences = stopSequences; } else if (provider === "ai21") { inputBody.prompt = prompt; inputBody.maxTokens = maxTokens; inputBody.temperature = temperature; inputBody.stopSequences = stopSequences; } else if (provider === "meta") { inputBody.prompt = prompt; inputBody.max_gen_len = maxTokens; inputBody.temperature = temperature; } else if (provider === "amazon") { inputBody.inputText = prompt; inputBody.textGenerationConfig = { maxTokenCount: maxTokens, temperature, }; } else if (provider === "cohere") { inputBody.prompt = prompt; inputBody.max_tokens = maxTokens; inputBody.temperature = temperature; inputBody.stop_sequences = stopSequences; if (bedrockMethod === "invoke-with-response-stream") { inputBody.stream = true; } } else if (provider === "mistral") { inputBody.prompt = prompt; inputBody.max_tokens = maxTokens; inputBody.temperature = temperature; inputBody.stop = stopSequences; } if ( guardrailConfig && guardrailConfig.tagSuffix && guardrailConfig.streamProcessingMode ) { inputBody["amazon-bedrock-guardrailConfig"] = guardrailConfig; } return { ...inputBody, ...modelKwargs }; } static prepareMessagesInput( provider: string, messages: BaseMessage[], maxTokens = 1024, temperature = 0, stopSequences: string[] | undefined = undefined, modelKwargs: Record<string, unknown> = {}, guardrailConfig: | { tagSuffix: string; streamProcessingMode: "SYNCHRONOUS" | "ASYNCHRONOUS"; } | undefined = undefined, tools: (StructuredToolInterface | Record<string, unknown>)[] = [] ): Dict { const inputBody: Dict = {}; if (provider === "anthropic") { const { system, messages: formattedMessages } = formatMessagesForAnthropic(messages); if (system !== undefined) { inputBody.system = system; } inputBody.anthropic_version = "bedrock-2023-05-31"; inputBody.messages = formattedMessages; inputBody.max_tokens = maxTokens; inputBody.temperature = temperature; inputBody.stop_sequences = stopSequences; if (tools.length > 0) { inputBody.tools = tools; } return { ...inputBody, ...modelKwargs }; } else if (provider === "cohere") { const { system, message: formattedMessage, chatHistory: formattedChatHistories, } = formatMessagesForCohere(messages); if (system !== undefined && system.length > 0) { inputBody.preamble = system; } inputBody.message = formattedMessage; inputBody.chat_history = formattedChatHistories; inputBody.max_tokens = maxTokens; inputBody.temperature = temperature; inputBody.stop_sequences = stopSequences; } else { throw new Error( "The messages API is currently only supported by Anthropic or Cohere" ); } if ( guardrailConfig && guardrailConfig.tagSuffix && guardrailConfig.streamProcessingMode ) { inputBody["amazon-bedrock-guardrailConfig"] = guardrailConfig; } return { ...inputBody, ...modelKwargs }; } /** * Extracts the generated text from the service response. * @param provider The provider name. * @param responseBody The response body from the service. * @returns The generated text. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any static prepareOutput(provider: string, responseBody: any): string { if (provider === "anthropic") { return responseBody.completion; } else if (provider === "ai21") { return responseBody?.completions?.[0]?.data?.text ?? ""; } else if (provider === "cohere") { return responseBody?.generations?.[0]?.text ?? responseBody?.text ?? ""; } else if (provider === "meta") { return responseBody.generation; } else if (provider === "mistral") { return responseBody?.outputs?.[0]?.text; } // I haven't been able to get a response with more than one result in it. return responseBody.results?.[0]?.outputText; } static prepareMessagesOutput( provider: string, // eslint-disable-next-line @typescript-eslint/no-explicit-any response: any, fields?: { coerceContentToString?: boolean; } ): ChatGeneration | undefined { const responseBody = response ?? {}; if (provider === "anthropic") { if (responseBody.type === "message") { return parseMessage(responseBody); } else if (responseBody.type === "message_start") { return parseMessage(responseBody.message, true); } const chunk = _makeMessageChunkFromAnthropicEvent(response, { coerceContentToString: fields?.coerceContentToString, }); if (!chunk) return undefined; const newToolCallChunk = extractToolCallChunk(chunk); let toolUseContent; const extractedContent = extractToolUseContent(chunk, undefined); if (extractedContent) { toolUseContent = extractedContent.toolUseContent; } // Filter partial `tool_use` content, and only add `tool_use` chunks if complete JSON available. const chunkContent = Array.isArray(chunk.content) ? chunk.content.filter((c) => c.type !== "tool_use") : chunk.content; if (Array.isArray(chunkContent) && toolUseContent) { chunkContent.push(toolUseContent); } // Extract the text content token for text field and runManager. const token = extractToken(chunk); return new ChatGenerationChunk({ message: new AIMessageChunk({ content: chunkContent, additional_kwargs: chunk.additional_kwargs, tool_call_chunks: newToolCallChunk ? [newToolCallChunk] : undefined, usage_metadata: chunk.usage_metadata, response_metadata: chunk.response_metadata, }), // Backwards compatibility generationInfo: { ...chunk.response_metadata }, text: token ?? "", }); } else if (provider === "cohere") { if (responseBody.event_type === "stream-start") { return parseMessageCohere(responseBody.message, true); } else if ( responseBody.event_type === "text-generation" && typeof responseBody?.text === "string" ) { return new ChatGenerationChunk({ message: new AIMessageChunk({ content: responseBody.text, }), text: responseBody.text, }); } else if (responseBody.event_type === "search-queries-generation") { return parseMessageCohere(responseBody); } else if ( responseBody.event_type === "stream-end" && responseBody.response !== undefined && responseBody["amazon-bedrock-invocationMetrics"] !== undefined ) { return new ChatGenerationChunk({ message: new AIMessageChunk({ content: "" }), text: "", generationInfo: { response: responseBody.response, "amazon-bedrock-invocationMetrics": responseBody["amazon-bedrock-invocationMetrics"], }, }); } else { if ( responseBody.finish_reason === "COMPLETE" || responseBody.finish_reason === "MAX_TOKENS" ) { return parseMessageCohere(responseBody); } else { return undefined; } } } else { throw new Error( "The messages API is currently only supported by Anthropic or Cohere." ); } } } // eslint-disable-next-line @typescript-eslint/no-explicit-any function parseMessage(responseBody: any, asChunk?: boolean): ChatGeneration { const { content, id, ...generationInfo } = responseBody; let parsedContent; if ( Array.isArray(content) && content.length === 1 && content[0].type === "text" ) { parsedContent = content[0].text; } else if (Array.isArray(content) && content.length === 0) { parsedContent = ""; } else { parsedContent = content; } if (asChunk) { return new ChatGenerationChunk({ message: new AIMessageChunk({ content: parsedContent, additional_kwargs: { id }, }), text: typeof parsedContent === "string" ? parsedContent : "", generationInfo, }); } else { // TODO: we are throwing away here the text response, as the interface of this method returns only one const toolCalls = extractToolCalls(responseBody.content); if (toolCalls.length > 0) { return { message: new AIMessage({ content: "", additional_kwargs: { id }, tool_calls: toolCalls, }), text: typeof parsedContent === "string" ? parsedContent : "", generationInfo, }; } return { message: new AIMessage({ content: parsedContent, additional_kwargs: { id }, tool_calls: toolCalls, }), text: typeof parsedContent === "string" ? parsedContent : "", generationInfo, }; } } function parseMessageCohere( // eslint-disable-next-line @typescript-eslint/no-explicit-any responseBody: any, asChunk?: boolean ): ChatGeneration { const { text, ...generationInfo } = responseBody; let parsedContent = text; if (typeof text !== "string") { parsedContent = ""; } if (asChunk) { return new ChatGenerationChunk({ message: new AIMessageChunk({ content: parsedContent, }), text: parsedContent, generationInfo, }); } else { return { message: new AIMessage({ content: parsedContent, }), text: parsedContent, generationInfo, }; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/utils/@furkantoprak
lc_public_repos/langchainjs/libs/langchain-community/src/utils/@furkantoprak/bm25/LICENSE.md
# MIT License ## Copyright (c) 2020 Furkan Toprak Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
lc_public_repos/langchainjs/libs/langchain-community/src/utils/@furkantoprak
lc_public_repos/langchainjs/libs/langchain-community/src/utils/@furkantoprak/bm25/BM25.ts
/** * Adapted from * https://github.com/FurkanToprak/OkapiBM25 * * Inlined due to CJS import issues. */ /** Gets word count. */ export const getWordCount = (corpus: string) => { return ((corpus || "").match(/\w+/g) || []).length; }; /** Number of occurences of a word in a string. */ export const getTermFrequency = (term: string, corpus: string) => { return ((corpus || "").match(new RegExp(term, "g")) || []).length; }; /** Inverse document frequency. */ export const getIDF = (term: string, documents: string[]) => { // Number of relevant documents. const relevantDocuments = documents.filter((document: string) => document.includes(term) ).length; return Math.log( (documents.length - relevantDocuments + 0.5) / (relevantDocuments + 0.5) + 1 ); }; /** Represents a document; useful when sorting results. */ export interface BMDocument { /** The document is originally scoreed. */ document: string; /** The score that the document recieves. */ score: number; } /** Constants that are free parameters used in BM25, specifically when generating inverse document frequency. */ export interface BMConstants { /** Free parameter. Is 0.75 by default. */ b?: number; /** Free parameter. Is 1.2 by default. Generally in range [1.2, 2.0] */ k1?: number; } /** If returns positive, the sorting results in secondEl coming before firstEl, else, firstEl comes before secondEL */ export type BMSorter = (firstEl: BMDocument, secondEl: BMDocument) => number; /** Implementation of Okapi BM25 algorithm. * @param documents: Collection of documents. * @param keywords: query terms. * @param constants: Contains free parameters k1 and b. b=0.75 and k1=1.2 by default. * @param sort: A function that allows you to sort queries by a given rule. If not provided, returns results corresponding to the original order. * If this option is provided, the return type will not be an array of scores but an array of documents with their scores. */ export function BM25( documents: string[], keywords: string[], constants?: BMConstants, sorter?: BMSorter ): number[] | BMDocument[] { const b = constants && constants.b ? constants.b : 0.75; const k1 = constants && constants.k1 ? constants.k1 : 1.2; const documentLengths = documents.map((document: string) => getWordCount(document) ); const averageDocumentLength = documentLengths.reduce((a, b) => a + b, 0) / documents.length; const idfByKeyword = keywords.reduce((obj, keyword) => { obj.set(keyword, getIDF(keyword, documents)); return obj; }, new Map<string, number>()); const scores = documents.map((document: string, index: number) => { const score = keywords .map((keyword: string) => { const inverseDocumentFrequency = idfByKeyword.get(keyword); if (inverseDocumentFrequency === undefined) { throw new Error("Missing keyword."); } const termFrequency = getTermFrequency(keyword, document); const documentLength = documentLengths[index]; return ( (inverseDocumentFrequency * (termFrequency * (k1 + 1))) / (termFrequency + k1 * (1 - b + (b * documentLength) / averageDocumentLength)) ); }) .reduce((a: number, b: number) => a + b, 0); if (sorter) { return { score, document } as BMDocument; } return score; }); // sort the results if (sorter) { return (scores as BMDocument[]).sort(sorter); } return scores as number[]; }
0
lc_public_repos/langchainjs/libs/langchain-community/src/utils
lc_public_repos/langchainjs/libs/langchain-community/src/utils/tests/googlevertexai-connection.test.ts
import { describe, expect, it, jest } from "@jest/globals"; import { AsyncCaller } from "@langchain/core/utils/async_caller"; import { GoogleVertexAILLMConnection } from "../googlevertexai-connection.js"; describe("GoogleVertexAILLMConnection", () => { it("should correctly build the url when useGooglePublishedModel param is not provided", async () => { const connection = new GoogleVertexAILLMConnection( { model: "text-bison", }, new AsyncCaller({}), { request: jest.fn(), getProjectId: async () => "fake_project_id", }, false ); const streamingConnection = new GoogleVertexAILLMConnection( { model: "text-bison", }, new AsyncCaller({}), { request: jest.fn(), getProjectId: async () => "fake_project_id", }, true ); const url = await connection.buildUrl(); const streamedUrl = await streamingConnection.buildUrl(); expect(url).toBe( "https://us-central1-aiplatform.googleapis.com/v1/projects/fake_project_id/locations/us-central1/publishers/google/models/text-bison:predict" ); expect(streamedUrl).toBe( "https://us-central1-aiplatform.googleapis.com/v1/projects/fake_project_id/locations/us-central1/publishers/google/models/text-bison:serverStreamingPredict" ); }); it("should use the customModelURL when provided", async () => { const fakeClient = { request: jest.fn(), getProjectId: async () => "fake_project_id", }; const asyncCaller = new AsyncCaller({}); const customModelURL = "https://us-central1-aiplatform.googleapis.com/v1/projects/fake_project_id/locations/us-central1/endpoints/99999999"; const connection = new GoogleVertexAILLMConnection( { customModelURL, }, asyncCaller, fakeClient, false ); const streamingConnection = new GoogleVertexAILLMConnection( { customModelURL, }, asyncCaller, fakeClient, true ); const url = await connection.buildUrl(); const streamedUrl = await streamingConnection.buildUrl(); expect(url).toBe( "https://us-central1-aiplatform.googleapis.com/v1/projects/fake_project_id/locations/us-central1/endpoints/99999999:predict" ); expect(streamedUrl).toBe( "https://us-central1-aiplatform.googleapis.com/v1/projects/fake_project_id/locations/us-central1/endpoints/99999999:serverStreamingPredict" ); }); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/utils
lc_public_repos/langchainjs/libs/langchain-community/src/utils/tencent_hunyuan/web.ts
import sha256 from "crypto-js/sha256.js"; import hmacSha256 from "crypto-js/hmac-sha256.js"; import { getDate, service, signedHeaders } from "./common.js"; /** * Method that calculate Tencent Cloud API v3 signature * for making requests to the Tencent Cloud API. * See https://cloud.tencent.com/document/api/1729/101843. * @param host Tencent Cloud API host. * @param payload HTTP request body. * @param timestamp Sign timestamp in seconds. * @param secretId Tencent Cloud Secret ID, which can be obtained from https://console.cloud.tencent.com/cam/capi. * @param secretKey Tencent Cloud Secret Key, which can be obtained from https://console.cloud.tencent.com/cam/capi. * @param headers HTTP request headers. * @returns The signature for making requests to the Tencent API. */ export const sign = ( host: string, payload: object, timestamp: number, secretId: string, secretKey: string, headers: Record<string, string> ): string => { const contentType = headers["Content-Type"]; const payloadHash = sha256(JSON.stringify(payload)); const canonicalRequest = `POST\n/\n\ncontent-type:${contentType}\nhost:${host}\n\n${signedHeaders}\n${payloadHash}`; const date = getDate(timestamp); const signature = hmacSha256( `TC3-HMAC-SHA256\n${timestamp}\n${date}/${service}/tc3_request\n${sha256( canonicalRequest ).toString()}`, hmacSha256( "tc3_request", hmacSha256(service, hmacSha256(date, `TC3${secretKey}`)) ) ).toString(); return `TC3-HMAC-SHA256 Credential=${secretId}/${date}/${service}/tc3_request, SignedHeaders=${signedHeaders}, Signature=${signature}`; };
0
lc_public_repos/langchainjs/libs/langchain-community/src/utils
lc_public_repos/langchainjs/libs/langchain-community/src/utils/tencent_hunyuan/index.ts
import { createHash, createHmac, BinaryLike } from "node:crypto"; import { getDate, service, signedHeaders } from "./common.js"; const sha256 = (data: string) => createHash("sha256").update(data).digest("hex"); const hmacSha256 = (data: string, key: BinaryLike) => createHmac("sha256", key).update(data).digest(); const hmacSha256Hex = (data: string, key: BinaryLike) => createHmac("sha256", key).update(data).digest("hex"); /** * Method that calculate Tencent Cloud API v3 signature * for making requests to the Tencent Cloud API. * See https://cloud.tencent.com/document/api/1729/101843. * @param host Tencent Cloud API host. * @param payload HTTP request body. * @param timestamp Sign timestamp in seconds. * @param secretId Tencent Cloud Secret ID, which can be obtained from https://console.cloud.tencent.com/cam/capi. * @param secretKey Tencent Cloud Secret Key, which can be obtained from https://console.cloud.tencent.com/cam/capi. * @param headers HTTP request headers. * @returns The signature for making requests to the Tencent API. */ export const sign = ( host: string, payload: object, timestamp: number, secretId: string, secretKey: string, headers: Record<string, string> ): string => { const contentType = headers["Content-Type"]; const payloadHash = sha256(JSON.stringify(payload)); const canonicalRequest = `POST\n/\n\ncontent-type:${contentType}\nhost:${host}\n\n${signedHeaders}\n${payloadHash}`; const date = getDate(timestamp); const signature = hmacSha256Hex( `TC3-HMAC-SHA256\n${timestamp}\n${date}/${service}/tc3_request\n${sha256( canonicalRequest )}`, hmacSha256( "tc3_request", hmacSha256(service, hmacSha256(date, `TC3${secretKey}`)) ) ); return `TC3-HMAC-SHA256 Credential=${secretId}/${date}/${service}/tc3_request, SignedHeaders=${signedHeaders}, Signature=${signature}`; };
0
lc_public_repos/langchainjs/libs/langchain-community/src/utils
lc_public_repos/langchainjs/libs/langchain-community/src/utils/tencent_hunyuan/common.ts
export const service = "hunyuan"; export const signedHeaders = `content-type;host`; export const getDate = (timestamp: number) => { const date = new Date(timestamp * 1000); const year = date.getUTCFullYear(); const month = `0${(date.getUTCMonth() + 1).toString()}`.slice(-2); const day = `0${date.getUTCDate()}`.slice(-2); return `${year}-${month}-${day}`; }; /** * Method that calculate Tencent Cloud API v3 signature * for making requests to the Tencent Cloud API. * See https://cloud.tencent.com/document/api/1729/101843. * @param host Tencent Cloud API host. * @param payload HTTP request body. * @param timestamp Sign timestamp in seconds. * @param secretId Tencent Cloud Secret ID, which can be obtained from https://console.cloud.tencent.com/cam/capi. * @param secretKey Tencent Cloud Secret Key, which can be obtained from https://console.cloud.tencent.com/cam/capi. * @param headers HTTP request headers. * @returns The signature for making requests to the Tencent API. */ export type sign = ( host: string, payload: object, timestamp: number, secretId: string, secretKey: string, headers: Record<string, string> ) => string;
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/fs/obsidian.ts
import type { basename as BasenameT } from "node:path"; import type { readFile as ReadFileT, stat as StatT } from "node:fs/promises"; import yaml from "js-yaml"; import { Document } from "@langchain/core/documents"; import { getEnv } from "@langchain/core/utils/env"; import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; import { DirectoryLoader, UnknownHandling, } from "langchain/document_loaders/fs/directory"; export type FrontMatter = { title?: string; description?: string; tags?: string[] | string; [key: string]: unknown; }; export interface ObsidianFileLoaderOptions { encoding?: BufferEncoding; collectMetadata?: boolean; } /** * Represents a loader for Obsidian markdown files. This loader extends the BaseDocumentLoader * and provides functionality to parse and extract metadata, tags, and dataview fields from * Obsidian markdown files. */ class ObsidianFileLoader extends BaseDocumentLoader { private filePath: string; private encoding: BufferEncoding; private collectMetadata: boolean; /** * Initializes a new instance of the ObsidianFileLoader class. * @param filePath The path to the Obsidian markdown file. * @param encoding The character encoding to use when reading the file. Defaults to 'utf-8'. * @param collectMetadata Determines whether metadata should be collected from the file. Defaults to true. */ constructor( filePath: string, { encoding = "utf-8", collectMetadata = true, }: ObsidianFileLoaderOptions = {} ) { super(); this.filePath = filePath; this.encoding = encoding; this.collectMetadata = collectMetadata; } private static FRONT_MATTER_REGEX = /^---\n(.*?)\n---\n/s; /** * Parses the YAML front matter from the given content string. * @param content The string content of the markdown file. * @returns An object representing the parsed front matter. */ private parseFrontMatter(content: string): FrontMatter { if (!this.collectMetadata) { return {}; } const match = content.match(ObsidianFileLoader.FRONT_MATTER_REGEX); if (!match) { return {}; } try { const frontMatter = yaml.load(match[1]) as FrontMatter; if (frontMatter.tags && typeof frontMatter.tags === "string") { frontMatter.tags = frontMatter.tags.split(", "); } return frontMatter; } catch (e) { console.warn("Encountered non-yaml frontmatter"); return {}; } } /** * Removes YAML front matter from the given content string. * @param content The string content of the markdown file. * @returns The content string with the front matter removed. */ private removeFrontMatter(content: string): string { if (!this.collectMetadata) { return content; } return content.replace(ObsidianFileLoader.FRONT_MATTER_REGEX, ""); } private static TAG_REGEX = /(?:\s|^)#([a-zA-Z_][\w/-]*)/g; /** * Parses Obsidian-style tags from the given content string. * @param content The string content of the markdown file. * @returns A set of parsed tags. */ private parseObsidianTags(content: string): Set<string> { if (!this.collectMetadata) { return new Set(); } const matches = content.matchAll(ObsidianFileLoader.TAG_REGEX); const tags = new Set<string>(); for (const match of matches) { tags.add(match[1]); } return tags; } private static DATAVIEW_LINE_REGEX = /^\s*(\w+)::\s*(.*)$/gm; private static DATAVIEW_INLINE_BRACKET_REGEX = /\[(\w+)::\s*(.*)\]/gm; private static DATAVIEW_INLINE_PAREN_REGEX = /\((\w+)::\s*(.*)\)/gm; /** * Parses dataview fields from the given content string. * @param content The string content of the markdown file. * @returns A record object containing key-value pairs of dataview fields. */ private parseObsidianDataviewFields(content: string): Record<string, string> { if (!this.collectMetadata) { return {}; } const fields: Record<string, string> = {}; const lineMatches = content.matchAll( ObsidianFileLoader.DATAVIEW_LINE_REGEX ); for (const [, key, value] of lineMatches) { fields[key] = value; } const bracketMatches = content.matchAll( ObsidianFileLoader.DATAVIEW_INLINE_BRACKET_REGEX ); for (const [, key, value] of bracketMatches) { fields[key] = value; } const parenMatches = content.matchAll( ObsidianFileLoader.DATAVIEW_INLINE_PAREN_REGEX ); for (const [, key, value] of parenMatches) { fields[key] = value; } return fields; } /** * Converts metadata to a format compatible with Langchain. * @param metadata The metadata object to convert. * @returns A record object containing key-value pairs of Langchain-compatible metadata. */ private toLangchainCompatibleMetadata(metadata: Record<string, unknown>) { const result: Record<string, unknown> = {}; for (const [key, value] of Object.entries(metadata)) { if (typeof value === "string" || typeof value === "number") { result[key] = value; } else { result[key] = JSON.stringify(value); } } return result; } /** * It loads the Obsidian file, parses it, and returns a `Document` instance. * @returns An array of `Document` instances to comply with the BaseDocumentLoader interface. */ public async load(): Promise<Document[]> { const documents: Document[] = []; const { basename, readFile, stat } = await ObsidianFileLoader.imports(); const fileName = basename(this.filePath); const stats = await stat(this.filePath); let content = await readFile(this.filePath, this.encoding); const frontMatter = this.parseFrontMatter(content); const tags = this.parseObsidianTags(content); const dataviewFields = this.parseObsidianDataviewFields(content); content = this.removeFrontMatter(content); const metadata: Document["metadata"] = { source: fileName, path: this.filePath, created: stats.birthtimeMs, lastModified: stats.mtimeMs, lastAccessed: stats.atimeMs, ...this.toLangchainCompatibleMetadata(frontMatter), ...dataviewFields, }; if (tags.size || frontMatter.tags) { metadata.tags = Array.from( new Set([...tags, ...(frontMatter.tags ?? [])]) ).join(","); } documents.push( new Document({ pageContent: content, metadata, }) ); return documents; } /** * Imports the necessary functions from the `node:path` and * `node:fs/promises` modules. It is used to dynamically import the * functions when needed. If the import fails, it throws an error * indicating that the modules failed to load. * @returns A promise that resolves to an object containing the imported functions. */ static async imports(): Promise<{ basename: typeof BasenameT; readFile: typeof ReadFileT; stat: typeof StatT; }> { try { const { basename } = await import("node:path"); const { readFile, stat } = await import("node:fs/promises"); return { basename, readFile, stat }; } catch (e) { console.error(e); throw new Error( `Failed to load fs/promises. ObsidianFileLoader available only on environment 'node'. It appears you are running environment '${getEnv()}'. See https://<link to docs> for alternatives.` ); } } } /** * Represents a loader for directories containing Obsidian markdown files. This loader extends * the DirectoryLoader and provides functionality to load and parse '.md' files with YAML frontmatter, * Obsidian tags, and Dataview fields. */ export class ObsidianLoader extends DirectoryLoader { /** * Initializes a new instance of the ObsidianLoader class. * @param directoryPath The path to the directory containing Obsidian markdown files. * @param encoding The character encoding to use when reading files. Defaults to 'utf-8'. * @param collectMetadata Determines whether metadata should be collected from the files. Defaults to true. */ constructor(directoryPath: string, options?: ObsidianFileLoaderOptions) { super( directoryPath, { ".md": (filePath) => new ObsidianFileLoader(filePath, options), }, true, UnknownHandling.Ignore ); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/fs/openai_whisper_audio.ts
import { type ClientOptions, OpenAIClient, toFile } from "@langchain/openai"; import { Document } from "@langchain/core/documents"; import { BufferLoader } from "langchain/document_loaders/fs/buffer"; const MODEL_NAME = "whisper-1"; /** * @example * ```typescript * const loader = new OpenAIWhisperAudio( * "./src/document_loaders/example_data/test.mp3", * ); * const docs = await loader.load(); * console.log(docs); * ``` */ export class OpenAIWhisperAudio extends BufferLoader { private readonly openAIClient: OpenAIClient; constructor( filePathOrBlob: string | Blob, fields?: { clientOptions?: ClientOptions; } ) { super(filePathOrBlob); this.openAIClient = new OpenAIClient(fields?.clientOptions); } protected async parse( raw: Buffer, metadata: Record<string, string> ): Promise<Document[]> { const fileName = metadata.source === "blob" ? metadata.blobType : metadata.source; const transcriptionResponse = await this.openAIClient.audio.transcriptions.create({ file: await toFile(raw, fileName), model: MODEL_NAME, }); const document = new Document({ pageContent: transcriptionResponse.text, metadata, }); return [document]; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/fs/srt.ts
import srtParser2 from "srt-parser-2"; import { TextLoader } from "langchain/document_loaders/fs/text"; /** * A class that extends the `TextLoader` class. It represents a document * loader that loads documents from SRT (SubRip) subtitle files. It has a * constructor that takes a `filePathOrBlob` parameter representing the * path to the SRT file or a `Blob` object. The `parse()` method is * implemented to parse the SRT file and extract the text content of each * subtitle. * @example * ```typescript * const loader = new SRTLoader("path/to/file.srt"); * const docs = await loader.load(); * console.log({ docs }); * ``` */ export class SRTLoader extends TextLoader { constructor(filePathOrBlob: string | Blob) { super(filePathOrBlob); } /** * A protected method that takes a `raw` string as a parameter and returns * a promise that resolves to an array of strings. It parses the raw SRT * string using the `SRTParser2` class from the `srt-parser-2` module. It * retrieves the subtitle objects from the parsed SRT data and extracts * the text content from each subtitle object. It filters out any empty * text content and joins the non-empty text content with a space * separator. * @param raw The raw SRT string to be parsed. * @returns A promise that resolves to an array of strings representing the text content of each subtitle. */ protected async parse(raw: string): Promise<string[]> { // eslint-disable-next-line new-cap const parser = new srtParser2(); const srts = parser.fromSrt(raw); return [ srts .map((srt) => srt.text) .filter(Boolean) .join(" "), ]; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/fs/csv.ts
import { TextLoader } from "langchain/document_loaders/fs/text"; /** * Loads a CSV file into a list of documents. * Each document represents one row of the CSV file. * * When `column` is not specified, each row is converted into a key/value pair * with each key/value pair outputted to a new line in the document's pageContent. * * @example * // CSV file: * // id,html * // 1,<i>Corruption discovered at the core of the Banking Clan!</i> * // 2,<i>Corruption discovered at the core of the Banking Clan!</i> * * const loader = new CSVLoader("path/to/file.csv"); * const docs = await loader.load(); * * // docs[0].pageContent: * // id: 1 * // html: <i>Corruption discovered at the core of the Banking Clan!</i> * * When `column` is specified, one document is created for each row, and the * value of the specified column is used as the document's pageContent. * * @example * // CSV file: * // id,html * // 1,<i>Corruption discovered at the core of the Banking Clan!</i> * // 2,<i>Corruption discovered at the core of the Banking Clan!</i> * * const loader = new CSVLoader("path/to/file.csv", "html"); * const docs = await loader.load(); * * // docs[0].pageContent: * // <i>Corruption discovered at the core of the Banking Clan!</i> */ type CSVLoaderOptions = { column?: string; separator?: string; }; /** * A class that extends the TextLoader class. It represents a document * loader that loads documents from a CSV file. It has a constructor that * takes a `filePathOrBlob` parameter representing the path to the CSV * file or a Blob object, and an optional `options` parameter of type * `CSVLoaderOptions` or a string representing the column to use as the * document's pageContent. */ export class CSVLoader extends TextLoader { protected options: CSVLoaderOptions = {}; constructor( filePathOrBlob: string | Blob, options?: CSVLoaderOptions | string ) { super(filePathOrBlob); if (typeof options === "string") { this.options = { column: options }; } else { this.options = options ?? this.options; } } /** * A protected method that parses the raw CSV data and returns an array of * strings representing the pageContent of each document. It uses the * `dsvFormat` function from the `d3-dsv` module to parse the CSV data. If * the `column` option is specified, it checks if the column exists in the * CSV file and returns the values of that column as the pageContent. If * the `column` option is not specified, it converts each row of the CSV * data into key/value pairs and joins them with newline characters. * @param raw The raw CSV data to be parsed. * @returns An array of strings representing the pageContent of each document. */ protected async parse(raw: string): Promise<string[]> { const { column, separator = "," } = this.options; const { dsvFormat } = await CSVLoaderImports(); const psv = dsvFormat(separator); const parsed = psv.parse(raw.trim()); if (column !== undefined) { if (!parsed.columns.includes(column)) { throw new Error(`Column ${column} not found in CSV file.`); } // Note TextLoader will raise an exception if the value is null. // eslint-disable-next-line @typescript-eslint/no-non-null-assertion return parsed.map((row) => row[column]!); } return parsed.map((row) => Object.keys(row) .map((key) => `${key.trim()}: ${row[key]?.trim()}`) .join("\n") ); } } async function CSVLoaderImports() { try { const { dsvFormat } = await import("d3-dsv"); return { dsvFormat }; } catch (e) { console.error(e); throw new Error( "Please install d3-dsv as a dependency with, e.g. `yarn add d3-dsv@2`" ); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/fs/epub.ts
import type { EPub } from "epub2"; import { Document } from "@langchain/core/documents"; import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; /** * A class that extends the `BaseDocumentLoader` class. It represents a * document loader that loads documents from EPUB files. */ export class EPubLoader extends BaseDocumentLoader { private splitChapters: boolean; constructor(public filePath: string, { splitChapters = true } = {}) { super(); this.splitChapters = splitChapters; } /** * A protected method that takes an EPUB object as a parameter and returns * a promise that resolves to an array of objects representing the content * and metadata of each chapter. * @param epub The EPUB object to parse. * @returns A promise that resolves to an array of objects representing the content and metadata of each chapter. */ protected async parse( epub: EPub ): Promise<{ pageContent: string; metadata?: object }[]> { const { htmlToText } = await HtmlToTextImport(); const chapters = await Promise.all( epub.flow.map(async (chapter) => { if (!chapter.id) return null as never; const html: string = await epub.getChapterRawAsync(chapter.id); if (!html) return null as never; return { html, title: chapter.title, }; }) ); return chapters.filter(Boolean).map((chapter) => ({ pageContent: htmlToText(chapter.html), metadata: { ...(chapter.title && { chapter: chapter.title }), }, })); } /** * A method that loads the EPUB file and returns a promise that resolves * to an array of `Document` instances. * @returns A promise that resolves to an array of `Document` instances. */ public async load(): Promise<Document[]> { const { EPub } = await EpubImport(); const epub = await EPub.createAsync(this.filePath); const parsed = await this.parse(epub); const metadata = { source: this.filePath }; if (parsed.length === 0) return []; return this.splitChapters ? parsed.map( (chapter) => new Document({ pageContent: chapter.pageContent, metadata: { ...metadata, ...chapter.metadata, }, }) ) : [ new Document({ pageContent: parsed .map((chapter) => chapter.pageContent) .join("\n\n"), metadata, }), ]; } } async function EpubImport() { const { EPub } = await import("epub2").catch(() => { throw new Error( "Failed to load epub2. Please install it with eg. `npm install epub2`." ); }); return { EPub }; } async function HtmlToTextImport() { const { htmlToText } = await import("html-to-text").catch(() => { throw new Error( "Failed to load html-to-text. Please install it with eg. `npm install html-to-text`." ); }); return { htmlToText }; }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/fs/chatgpt.ts
import { Document } from "@langchain/core/documents"; import { TextLoader } from "langchain/document_loaders/fs/text"; interface ChatGPTMessage { author: { role: string; }; content: { parts: string[]; }; create_time: number; } interface ChatGPTLog { title: string; mapping: Record<string, { message: ChatGPTMessage }>; } function concatenateRows(message: ChatGPTMessage, title: string): string { /** * Combine message information in a readable format ready to be used. * @param {ChatGPTMessage} message - Message to be concatenated * @param {string} title - Title of the conversation * * @returns {string} Concatenated message */ if (!message) { return ""; } const sender = message.author ? message.author.role : "unknown"; const text = message.content.parts[0]; const date = new Date(message.create_time * 1000) .toISOString() .slice(0, 19) .replace("T", " "); return `${title} - ${sender} on ${date}: ${text}\n\n`; } export class ChatGPTLoader extends TextLoader { public numLogs: number; constructor(filePathOrBlob: string | Blob, numLogs = 0) { super(filePathOrBlob); this.numLogs = numLogs; } protected async parse(raw: string): Promise<string[]> { let data; try { data = JSON.parse(raw); } catch (e) { console.error(e); throw new Error("Failed to parse JSON"); } const truncatedData = this.numLogs > 0 ? data.slice(0, this.numLogs) : data; return truncatedData.map((d: ChatGPTLog) => Object.values(d.mapping) .filter( (msg, idx) => !(idx === 0 && msg.message.author.role === "system") ) .map((msg) => concatenateRows(msg.message, d.title)) .join("") ); } public async load(): Promise<Document[]> { let text: string; let metadata: Record<string, string>; if (typeof this.filePathOrBlob === "string") { const { readFile } = await TextLoader.imports(); try { text = await readFile(this.filePathOrBlob, "utf8"); } catch (e) { console.error(e); throw new Error("Failed to read file"); } metadata = { source: this.filePathOrBlob }; } else { try { text = await this.filePathOrBlob.text(); } catch (e) { console.error(e); throw new Error("Failed to read blob"); } metadata = { source: "blob", blobType: this.filePathOrBlob.type }; } const parsed = await this.parse(text); return parsed.map( (pageContent, i) => new Document({ pageContent, metadata: { ...metadata, logIndex: i + 1, }, }) ); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/fs/pdf.ts
import { Document } from "@langchain/core/documents"; import { BufferLoader } from "langchain/document_loaders/fs/buffer"; /** * A class that extends the `BufferLoader` class. It represents a document * loader that loads documents from PDF files. * @example * ```typescript * const loader = new PDFLoader("path/to/bitcoin.pdf"); * const docs = await loader.load(); * console.log({ docs }); * ``` */ export class PDFLoader extends BufferLoader { private splitPages: boolean; private pdfjs: typeof PDFLoaderImports; protected parsedItemSeparator: string; constructor( filePathOrBlob: string | Blob, { splitPages = true, pdfjs = PDFLoaderImports, parsedItemSeparator = "", } = {} ) { super(filePathOrBlob); this.splitPages = splitPages; this.pdfjs = pdfjs; this.parsedItemSeparator = parsedItemSeparator; } /** * A method that takes a `raw` buffer and `metadata` as parameters and * returns a promise that resolves to an array of `Document` instances. It * uses the `getDocument` function from the PDF.js library to load the PDF * from the buffer. It then iterates over each page of the PDF, retrieves * the text content using the `getTextContent` method, and joins the text * items to form the page content. It creates a new `Document` instance * for each page with the extracted text content and metadata, and adds it * to the `documents` array. If `splitPages` is `true`, it returns the * array of `Document` instances. Otherwise, if there are no documents, it * returns an empty array. Otherwise, it concatenates the page content of * all documents and creates a single `Document` instance with the * concatenated content. * @param raw The buffer to be parsed. * @param metadata The metadata of the document. * @returns A promise that resolves to an array of `Document` instances. */ public async parse( raw: Buffer, metadata: Document["metadata"] ): Promise<Document[]> { const { getDocument, version } = await this.pdfjs(); const pdf = await getDocument({ data: new Uint8Array(raw.buffer), useWorkerFetch: false, isEvalSupported: false, useSystemFonts: true, }).promise; const meta = await pdf.getMetadata().catch(() => null); const documents: Document[] = []; for (let i = 1; i <= pdf.numPages; i += 1) { const page = await pdf.getPage(i); const content = await page.getTextContent(); if (content.items.length === 0) { continue; } // Eliminate excessive newlines // Source: https://github.com/albertcui/pdf-parse/blob/7086fc1cc9058545cdf41dd0646d6ae5832c7107/lib/pdf-parse.js#L16 let lastY; const textItems = []; for (const item of content.items) { if ("str" in item) { if (lastY === item.transform[5] || !lastY) { textItems.push(item.str); } else { textItems.push(`\n${item.str}`); } // eslint-disable-next-line prefer-destructuring lastY = item.transform[5]; } } const text = textItems.join(this.parsedItemSeparator); documents.push( new Document({ pageContent: text, metadata: { ...metadata, pdf: { version, info: meta?.info, metadata: meta?.metadata, totalPages: pdf.numPages, }, loc: { pageNumber: i, }, }, }) ); } if (this.splitPages) { return documents; } if (documents.length === 0) { return []; } return [ new Document({ pageContent: documents.map((doc) => doc.pageContent).join("\n\n"), metadata: { ...metadata, pdf: { version, info: meta?.info, metadata: meta?.metadata, totalPages: pdf.numPages, }, }, }), ]; } } async function PDFLoaderImports() { try { const { default: mod } = await import( "pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js" ); const { getDocument, version } = mod; return { getDocument, version }; } catch (e) { console.error(e); throw new Error( "Failed to load pdf-parse. Please install it with eg. `npm install pdf-parse`." ); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/fs/unstructured.ts
import type { basename as BasenameT } from "node:path"; import type { readFile as ReadFileT } from "node:fs/promises"; import { Document } from "@langchain/core/documents"; import { getEnv, getEnvironmentVariable } from "@langchain/core/utils/env"; import { StringWithAutocomplete } from "@langchain/core/utils/types"; import { DirectoryLoader, UnknownHandling, LoadersMapping, } from "langchain/document_loaders/fs/directory"; import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; export const UNSTRUCTURED_API_FILETYPES = [ ".txt", ".text", ".pdf", ".docx", ".doc", ".jpg", ".jpeg", ".eml", ".html", ".htm", ".md", ".pptx", ".ppt", ".msg", ".rtf", ".xlsx", ".xls", ".odt", ".epub", ]; /** * Represents an element returned by the Unstructured API. It has * properties for the element type, text content, and metadata. */ type Element = { type: string; text: string; // this is purposefully loosely typed metadata: { [key: string]: unknown; }; }; /** * Represents the available strategies for the UnstructuredLoader. It can * be one of "hi_res", "fast", "ocr_only", or "auto". */ export type UnstructuredLoaderStrategy = | "hi_res" | "fast" | "ocr_only" | "auto"; /** * Represents the available hi-res models for the UnstructuredLoader. It can * be one of "chipper". */ export type HiResModelName = "chipper"; /** * To enable or disable table extraction for file types other than PDF, set * the skipInferTableTypes property in the UnstructuredLoaderOptions object. * The skipInferTableTypes property is an array of file types for which table * extraction is disabled. For example, to disable table extraction for .docx * and .doc files, set the skipInferTableTypes property to ["docx", "doc"]. * You can also disable table extraction for all file types other than PDF by * setting the skipInferTableTypes property to []. */ export type SkipInferTableTypes = | "txt" | "text" | "pdf" | "docx" | "doc" | "jpg" | "jpeg" | "eml" | "html" | "htm" | "md" | "pptx" | "ppt" | "msg" | "rtf" | "xlsx" | "xls" | "odt" | "epub"; /** * Set the chunking_strategy to chunk text into larger or smaller elements. Defaults to None with optional arg of by_title */ export type ChunkingStrategy = "None" | "by_title"; export type UnstructuredLoaderOptions = { apiKey?: string; apiUrl?: string; strategy?: StringWithAutocomplete<UnstructuredLoaderStrategy>; encoding?: string; ocrLanguages?: Array<string>; coordinates?: boolean; pdfInferTableStructure?: boolean; xmlKeepTags?: boolean; skipInferTableTypes?: Array<StringWithAutocomplete<SkipInferTableTypes>>; hiResModelName?: StringWithAutocomplete<HiResModelName>; includePageBreaks?: boolean; chunkingStrategy?: StringWithAutocomplete<ChunkingStrategy>; multiPageSections?: boolean; combineUnderNChars?: number; newAfterNChars?: number; maxCharacters?: number; extractImageBlockTypes?: string[]; overlap?: number; overlapAll?: boolean; }; export type UnstructuredDirectoryLoaderOptions = UnstructuredLoaderOptions & { recursive?: boolean; unknown?: UnknownHandling; }; export type UnstructuredMemoryLoaderOptions = { buffer: Buffer; fileName: string; }; /** * A document loader that uses the Unstructured API to load unstructured * documents. It supports both the new syntax with options object and the * legacy syntax for backward compatibility. The load() method sends a * partitioning request to the Unstructured API and retrieves the * partitioned elements. It creates a Document instance for each element * and returns an array of Document instances. * * It accepts either a filepath or an object containing a buffer and a filename * as input. */ export class UnstructuredLoader extends BaseDocumentLoader { public filePath: string; private buffer?: Buffer; private fileName?: string; private apiUrl = "https://api.unstructured.io/general/v0/general"; private apiKey?: string; private strategy: StringWithAutocomplete<UnstructuredLoaderStrategy> = "hi_res"; private encoding?: string; private ocrLanguages: Array<string> = []; private coordinates?: boolean; private pdfInferTableStructure?: boolean; private xmlKeepTags?: boolean; private skipInferTableTypes?: Array< StringWithAutocomplete<SkipInferTableTypes> >; private hiResModelName?: StringWithAutocomplete<HiResModelName>; private includePageBreaks?: boolean; private chunkingStrategy?: StringWithAutocomplete<ChunkingStrategy>; private multiPageSections?: boolean; private combineUnderNChars?: number; private newAfterNChars?: number; private maxCharacters?: number; private extractImageBlockTypes?: string[]; private overlap?: number; private overlapAll?: boolean; constructor( filepathOrBufferOptions: string | UnstructuredMemoryLoaderOptions, unstructuredOptions: UnstructuredLoaderOptions | string = {} ) { super(); // Temporary shim to avoid breaking existing users // Remove when API keys are enforced by Unstructured and existing code will break anyway const isLegacySyntax = typeof unstructuredOptions === "string"; const isMemorySyntax = typeof filepathOrBufferOptions === "object"; if (isMemorySyntax) { this.buffer = filepathOrBufferOptions.buffer; this.fileName = filepathOrBufferOptions.fileName; } else if (isLegacySyntax) { this.filePath = unstructuredOptions; this.apiUrl = filepathOrBufferOptions; } else { this.filePath = filepathOrBufferOptions; } if (!isLegacySyntax) { const options = unstructuredOptions; this.apiKey = options.apiKey ?? getEnvironmentVariable("UNSTRUCTURED_API_KEY"); this.apiUrl = options.apiUrl ?? getEnvironmentVariable("UNSTRUCTURED_API_URL") ?? this.apiUrl; this.strategy = options.strategy ?? this.strategy; this.encoding = options.encoding; this.ocrLanguages = options.ocrLanguages ?? this.ocrLanguages; this.coordinates = options.coordinates; this.pdfInferTableStructure = options.pdfInferTableStructure; this.xmlKeepTags = options.xmlKeepTags; this.skipInferTableTypes = options.skipInferTableTypes; this.hiResModelName = options.hiResModelName; this.includePageBreaks = options.includePageBreaks; this.chunkingStrategy = options.chunkingStrategy; this.multiPageSections = options.multiPageSections; this.combineUnderNChars = options.combineUnderNChars; this.newAfterNChars = options.newAfterNChars; this.maxCharacters = options.maxCharacters; this.extractImageBlockTypes = options.extractImageBlockTypes; this.overlap = options.overlap; this.overlapAll = options.overlapAll ?? false; } } async _partition() { let buffer = this.buffer; let fileName = this.fileName; if (!buffer) { const { readFile, basename } = await this.imports(); buffer = await readFile(this.filePath); fileName = basename(this.filePath); // I'm aware this reads the file into memory first, but we have lots of work // to do on then consuming Documents in a streaming fashion anyway, so not // worried about this for now. } const formData = new FormData(); formData.append("files", new Blob([buffer]), fileName); formData.append("strategy", this.strategy); this.ocrLanguages.forEach((language) => { formData.append("ocr_languages", language); }); if (this.encoding) { formData.append("encoding", this.encoding); } if (this.coordinates === true) { formData.append("coordinates", "true"); } if (this.pdfInferTableStructure === true) { formData.append("pdf_infer_table_structure", "true"); } if (this.xmlKeepTags === true) { formData.append("xml_keep_tags", "true"); } if (this.skipInferTableTypes) { formData.append( "skip_infer_table_types", JSON.stringify(this.skipInferTableTypes) ); } if (this.hiResModelName) { formData.append("hi_res_model_name", this.hiResModelName); } if (this.includePageBreaks) { formData.append("include_page_breaks", "true"); } if (this.chunkingStrategy) { formData.append("chunking_strategy", this.chunkingStrategy); } if (this.multiPageSections !== undefined) { formData.append( "multipage_sections", this.multiPageSections ? "true" : "false" ); } if (this.combineUnderNChars !== undefined) { formData.append("combine_under_n_chars", String(this.combineUnderNChars)); } if (this.newAfterNChars !== undefined) { formData.append("new_after_n_chars", String(this.newAfterNChars)); } if (this.maxCharacters !== undefined) { formData.append("max_characters", String(this.maxCharacters)); } if (this.extractImageBlockTypes !== undefined) { formData.append( "extract_image_block_types", JSON.stringify(this.extractImageBlockTypes) ); } if (this.overlap !== undefined) { formData.append("overlap", String(this.overlap)); } if (this.overlapAll === true) { formData.append("overlap_all", "true"); } const headers = { "UNSTRUCTURED-API-KEY": this.apiKey ?? "", }; const response = await fetch(this.apiUrl, { method: "POST", body: formData, headers, }); if (!response.ok) { throw new Error( `Failed to partition file ${this.filePath} with error ${ response.status } and message ${await response.text()}` ); } const elements = await response.json(); if (!Array.isArray(elements)) { throw new Error( `Expected partitioning request to return an array, but got ${elements}` ); } return elements.filter((el) => typeof el.text === "string") as Element[]; } async load(): Promise<Document[]> { const elements = await this._partition(); const documents: Document[] = []; for (const element of elements) { const { metadata, text } = element; if (typeof text === "string" && text !== "") { documents.push( new Document({ pageContent: text, metadata: { ...metadata, category: element.type, }, }) ); } } return documents; } async imports(): Promise<{ readFile: typeof ReadFileT; basename: typeof BasenameT; }> { try { const { readFile } = await import("node:fs/promises"); const { basename } = await import("node:path"); return { readFile, basename }; } catch (e) { console.error(e); throw new Error( `Failed to load fs/promises. TextLoader available only on environment 'node'. It appears you are running environment '${getEnv()}'. See https://<link to docs> for alternatives.` ); } } } /** * A document loader that loads unstructured documents from a directory * using the UnstructuredLoader. It creates a UnstructuredLoader instance * for each supported file type and passes it to the DirectoryLoader * constructor. * @example * ```typescript * const loader = new UnstructuredDirectoryLoader("path/to/directory", { * apiKey: "MY_API_KEY", * }); * const docs = await loader.load(); * ``` */ export class UnstructuredDirectoryLoader extends DirectoryLoader { constructor( directoryPathOrLegacyApiUrl: string, optionsOrLegacyDirectoryPath: UnstructuredDirectoryLoaderOptions | string, legacyOptionRecursive = true, legacyOptionUnknown: UnknownHandling = UnknownHandling.Warn ) { let directoryPath; let options: UnstructuredDirectoryLoaderOptions; // Temporary shim to avoid breaking existing users // Remove when API keys are enforced by Unstructured and existing code will break anyway const isLegacySyntax = typeof optionsOrLegacyDirectoryPath === "string"; if (isLegacySyntax) { directoryPath = optionsOrLegacyDirectoryPath; options = { apiUrl: directoryPathOrLegacyApiUrl, recursive: legacyOptionRecursive, unknown: legacyOptionUnknown, }; } else { directoryPath = directoryPathOrLegacyApiUrl; options = optionsOrLegacyDirectoryPath; } const loader = (p: string) => new UnstructuredLoader(p, options); const loaders = UNSTRUCTURED_API_FILETYPES.reduce( (loadersObject: LoadersMapping, filetype: string) => { // eslint-disable-next-line no-param-reassign loadersObject[filetype] = loader; return loadersObject; }, {} ); super(directoryPath, loaders, options.recursive, options.unknown); } } export { UnknownHandling };
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/fs/docx.ts
import { Document } from "@langchain/core/documents"; import { BufferLoader } from "langchain/document_loaders/fs/buffer"; /** * A class that extends the `BufferLoader` class. It represents a document * loader that loads documents from DOCX files. */ export class DocxLoader extends BufferLoader { constructor(filePathOrBlob: string | Blob) { super(filePathOrBlob); } /** * A method that takes a `raw` buffer and `metadata` as parameters and * returns a promise that resolves to an array of `Document` instances. It * uses the `extractRawText` function from the `mammoth` module to extract * the raw text content from the buffer. If the extracted text content is * empty, it returns an empty array. Otherwise, it creates a new * `Document` instance with the extracted text content and the provided * metadata, and returns it as an array. * @param raw The raw buffer from which to extract text content. * @param metadata The metadata to be associated with the created `Document` instance. * @returns A promise that resolves to an array of `Document` instances. */ public async parse( raw: Buffer, metadata: Document["metadata"] ): Promise<Document[]> { const { extractRawText } = await DocxLoaderImports(); const docx = await extractRawText({ buffer: raw, }); if (!docx.value) return []; return [ new Document({ pageContent: docx.value, metadata, }), ]; } } async function DocxLoaderImports() { try { const { extractRawText } = await import("mammoth"); return { extractRawText }; } catch (e) { console.error(e); throw new Error( "Failed to load mammoth. Please install it with eg. `npm install mammoth`." ); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/fs/pptx.ts
import { parseOfficeAsync } from "officeparser"; import { Document } from "@langchain/core/documents"; import { BufferLoader } from "langchain/document_loaders/fs/buffer"; /** * A class that extends the `BufferLoader` class. It represents a document * loader that loads documents from PDF files. */ export class PPTXLoader extends BufferLoader { constructor(filePathOrBlob: string | Blob) { super(filePathOrBlob); } /** * A method that takes a `raw` buffer and `metadata` as parameters and * returns a promise that resolves to an array of `Document` instances. It * uses the `parseOfficeAsync` function from the `officeparser` module to extract * the raw text content from the buffer. If the extracted powerpoint content is * empty, it returns an empty array. Otherwise, it creates a new * `Document` instance with the extracted powerpoint content and the provided * metadata, and returns it as an array. * @param raw The buffer to be parsed. * @param metadata The metadata of the document. * @returns A promise that resolves to an array of `Document` instances. */ public async parse( raw: Buffer, metadata: Document["metadata"] ): Promise<Document[]> { const pptx = await parseOfficeAsync(raw, { outputErrorToConsole: true }); if (!pptx) return []; return [ new Document({ pageContent: pptx, metadata, }), ]; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/fs/notion.ts
import { DirectoryLoader, UnknownHandling, } from "langchain/document_loaders/fs/directory"; import { TextLoader } from "langchain/document_loaders/fs/text"; /** * A class that extends the DirectoryLoader class. It represents a * document loader that loads documents from a directory in the Notion * format. It uses the TextLoader for loading '.md' files and ignores * unknown file types. */ export class NotionLoader extends DirectoryLoader { constructor(directoryPath: string) { super( directoryPath, { ".md": (filePath) => new TextLoader(filePath), }, true, UnknownHandling.Ignore ); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/imsdb.ts
import { Document } from "@langchain/core/documents"; import { CheerioWebBaseLoader } from "./cheerio.js"; /** * A class that extends the CheerioWebBaseLoader class. It represents a * loader for loading web pages from the IMSDB (Internet Movie Script * Database) website. */ export class IMSDBLoader extends CheerioWebBaseLoader { constructor(public webPath: string) { super(webPath); } /** * An asynchronous method that loads the web page using the scrape() * method inherited from the base class. It selects the element with the * class 'scrtext' using the $ function provided by Cheerio and extracts * the text content. It creates a Document instance with the text content * as the page content and the source as metadata. It returns an array * containing the Document instance. * @returns An array containing a Document instance. */ public async load(): Promise<Document[]> { const $ = await this.scrape(); const text = $("td[class='scrtext']").text().trim(); const metadata = { source: this.webPath }; return [new Document({ pageContent: text, metadata })]; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/html.ts
import { AsyncCaller, AsyncCallerParams, } from "@langchain/core/utils/async_caller"; import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; import { Document } from "@langchain/core/documents"; import type { DocumentLoader } from "@langchain/core/document_loaders/base"; /** * Represents the parameters for configuring WebBaseLoaders. It extends the * AsyncCallerParams interface and adds additional parameters specific to * web-based loaders. */ export interface WebBaseLoaderParams extends AsyncCallerParams { /** * The timeout in milliseconds for the fetch request. Defaults to 10s. */ timeout?: number; /** * The text decoder to use to decode the response. Defaults to UTF-8. */ textDecoder?: TextDecoder; /** * The headers to use in the fetch request. */ headers?: HeadersInit; /** * The selector to use to extract the text from the document. * Defaults to "body". * @deprecated Use CheerioWebBaseLoaderParams from @langchain/community/document_loaders/web/cheerio * instead. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any selector?: any; } export interface WebBaseLoader extends DocumentLoader { timeout: number; caller: AsyncCaller; textDecoder?: TextDecoder; headers?: HeadersInit; } export class HTMLWebBaseLoader extends BaseDocumentLoader implements WebBaseLoader { timeout: number; caller: AsyncCaller; textDecoder?: TextDecoder; headers?: HeadersInit; constructor(public webPath: string, fields?: WebBaseLoaderParams) { super(); const { timeout, textDecoder, headers, ...rest } = fields ?? {}; this.timeout = timeout ?? 10000; this.caller = new AsyncCaller(rest); this.textDecoder = textDecoder; this.headers = headers; } async load(): Promise<Document[]> { const response = await this.caller.call(fetch, this.webPath, { signal: this.timeout ? AbortSignal.timeout(this.timeout) : undefined, headers: this.headers, }); const html = this.textDecoder?.decode(await response.arrayBuffer()) ?? (await response.text()); return [new Document({ pageContent: html })]; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/azure_blob_storage_file.ts
import * as fs from "node:fs"; import * as path from "node:path"; import * as os from "node:os"; import { BlobServiceClient } from "@azure/storage-blob"; import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; import { UnstructuredLoader, UnstructuredLoaderOptions, } from "../fs/unstructured.js"; /** * Interface representing the configuration for accessing a specific file * in Azure Blob Storage. */ interface AzureBlobStorageFileConfig { connectionString: string; container: string; blobName: string; } /** * Interface representing the configuration for the * AzureBlobStorageFileLoader. It contains the Azure Blob Storage file * configuration and the options for the UnstructuredLoader. */ interface AzureBlobStorageFileLoaderConfig { azureConfig: AzureBlobStorageFileConfig; unstructuredConfig?: UnstructuredLoaderOptions; } /** * Class representing a document loader that loads a specific file from * Azure Blob Storage. It extends the BaseDocumentLoader class and * implements the DocumentLoader interface. * @example * ```typescript * const loader = new AzureBlobStorageFileLoader({ * azureConfig: { * connectionString: "{connectionString}", * container: "{containerName}", * blobName: "{blobName}", * }, * }); * const docs = await loader.load(); * ``` */ export class AzureBlobStorageFileLoader extends BaseDocumentLoader { private readonly connectionString: string; private readonly container: string; private readonly blobName: string; private readonly unstructuredConfig?: UnstructuredLoaderOptions; constructor({ azureConfig, unstructuredConfig, }: AzureBlobStorageFileLoaderConfig) { super(); this.connectionString = azureConfig.connectionString; this.container = azureConfig.container; this.blobName = azureConfig.blobName; this.unstructuredConfig = unstructuredConfig; } /** * Method to load a specific file from Azure Blob Storage. It creates a * temporary directory, constructs the file path, downloads the file, and * loads the documents using the UnstructuredLoader. The loaded documents * are returned, and the temporary directory is deleted. * @returns An array of documents loaded from the file in Azure Blob Storage. */ public async load() { const tempDir = fs.mkdtempSync( path.join(os.tmpdir(), "azureblobfileloader-") ); const filePath = path.join(tempDir, this.blobName); try { const blobServiceClient = BlobServiceClient.fromConnectionString( this.connectionString, { userAgentOptions: { userAgentPrefix: "langchainjs-blob-storage-file", }, } ); const containerClient = blobServiceClient.getContainerClient( this.container ); const blobClient = containerClient.getBlobClient(this.blobName); fs.mkdirSync(path.dirname(filePath), { recursive: true }); await blobClient.downloadToFile(filePath); } catch (e: unknown) { throw new Error( `Failed to download file ${ this.blobName } from Azure Blob Storage container ${this.container}: ${ (e as Error).message }` ); } try { const unstructuredLoader = new UnstructuredLoader( filePath, this.unstructuredConfig ); const docs = await unstructuredLoader.load(); return docs; } catch { throw new Error( `Failed to load file ${filePath} using unstructured loader.` ); } finally { fs.rmSync(path.dirname(filePath), { recursive: true, force: true }); } } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/puppeteer.ts
import type { launch, WaitForOptions, Page, Browser, PuppeteerLaunchOptions, } from "puppeteer"; import { Document } from "@langchain/core/documents"; import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; import type { DocumentLoader } from "@langchain/core/document_loaders/base"; export { Page, Browser }; export type PuppeteerGotoOptions = WaitForOptions & { referer?: string; referrerPolicy?: string; }; /** * Type representing a function for evaluating JavaScript code on a web * page using Puppeteer. It takes a Page and Browser object as parameters * and returns a Promise that resolves to a string. */ export type PuppeteerEvaluate = ( page: Page, browser: Browser ) => Promise<string>; export type PuppeteerWebBaseLoaderOptions = { launchOptions?: PuppeteerLaunchOptions; gotoOptions?: PuppeteerGotoOptions; evaluate?: PuppeteerEvaluate; }; /** * Class that extends the BaseDocumentLoader class and implements the * DocumentLoader interface. It represents a document loader for scraping * web pages using Puppeteer. * @example * ```typescript * const loader = new PuppeteerWebBaseLoader("https:exampleurl.com", { * launchOptions: { * headless: true, * }, * gotoOptions: { * waitUntil: "domcontentloaded", * }, * }); * const screenshot = await loader.screenshot(); * ``` */ export class PuppeteerWebBaseLoader extends BaseDocumentLoader implements DocumentLoader { options: PuppeteerWebBaseLoaderOptions | undefined; constructor(public webPath: string, options?: PuppeteerWebBaseLoaderOptions) { super(); this.options = options ?? undefined; } static async _scrape( url: string, options?: PuppeteerWebBaseLoaderOptions ): Promise<string> { const { launch } = await PuppeteerWebBaseLoader.imports(); const browser = await launch({ headless: true, defaultViewport: null, ignoreDefaultArgs: ["--disable-extensions"], ...options?.launchOptions, }); const page = await browser.newPage(); await page.goto(url, { timeout: 180000, waitUntil: "domcontentloaded", ...options?.gotoOptions, }); const bodyHTML = options?.evaluate ? await options?.evaluate(page, browser) : await page.evaluate(() => document.body.innerHTML); await browser.close(); return bodyHTML; } /** * Method that calls the _scrape method to perform the scraping of the web * page specified by the webPath property. * @returns Promise that resolves to the scraped HTML content of the web page. */ async scrape(): Promise<string> { return PuppeteerWebBaseLoader._scrape(this.webPath, this.options); } /** * Method that calls the scrape method and returns the scraped HTML * content as a Document object. * @returns Promise that resolves to an array of Document objects. */ async load(): Promise<Document[]> { const text = await this.scrape(); const metadata = { source: this.webPath }; return [new Document({ pageContent: text, metadata })]; } /** * Static class method used to screenshot a web page and return * it as a {@link Document} object where the pageContent property * is the screenshot encoded in base64. * * @param {string} url * @param {PuppeteerWebBaseLoaderOptions} options * @returns {Document} A document object containing the screenshot of the page encoded in base64. */ static async _screenshot( url: string, options?: PuppeteerWebBaseLoaderOptions ): Promise<Document> { const { launch } = await PuppeteerWebBaseLoader.imports(); const browser = await launch({ headless: true, defaultViewport: null, ignoreDefaultArgs: ["--disable-extensions"], ...options?.launchOptions, }); const page = await browser.newPage(); await page.goto(url, { timeout: 180000, waitUntil: "domcontentloaded", ...options?.gotoOptions, }); const screenshot = await page.screenshot(); const base64 = screenshot.toString("base64"); const metadata = { source: url }; return new Document({ pageContent: base64, metadata }); } /** * Screenshot a web page and return it as a {@link Document} object where * the pageContent property is the screenshot encoded in base64. * * @returns {Promise<Document>} A document object containing the screenshot of the page encoded in base64. */ async screenshot(): Promise<Document> { return PuppeteerWebBaseLoader._screenshot(this.webPath, this.options); } /** * Static method that imports the necessary Puppeteer modules. It returns * a Promise that resolves to an object containing the imported modules. * @returns Promise that resolves to an object containing the imported Puppeteer modules. */ static async imports(): Promise<{ launch: typeof launch; }> { try { // eslint-disable-next-line import/no-extraneous-dependencies const { launch } = await import("puppeteer"); return { launch }; } catch (e) { console.error(e); throw new Error( "Please install puppeteer as a dependency with, e.g. `yarn add puppeteer`" ); } } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/notionapi.ts
import { APIResponseError, Client, isFullBlock, isFullPage, iteratePaginatedAPI, APIErrorCode, isNotionClientError, isFullDatabase, } from "@notionhq/client"; import { NotionToMarkdown } from "notion-to-md"; import { getBlockChildren } from "notion-to-md/build/utils/notion.js"; import type { ListBlockChildrenResponseResults, MdBlock, } from "notion-to-md/build/types"; import yaml from "js-yaml"; import { Document } from "@langchain/core/documents"; import { AsyncCaller } from "@langchain/core/utils/async_caller"; import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; // eslint-disable-next-line @typescript-eslint/no-explicit-any type GuardType<T> = T extends (x: any, ...rest: any) => x is infer U ? U : never; export type GetBlockResponse = Parameters<typeof isFullBlock>[0]; export type GetPageResponse = Parameters<typeof isFullPage>[0]; export type GetDatabaseResponse = Parameters<typeof isFullDatabase>[0]; export type BlockObjectResponse = GuardType<typeof isFullBlock>; export type PageObjectResponse = GuardType<typeof isFullPage>; export type DatabaseObjectResponse = GuardType<typeof isFullDatabase>; export type GetResponse = | GetBlockResponse | GetPageResponse | GetDatabaseResponse | APIResponseError; export type PagePropertiesType = PageObjectResponse["properties"]; export type PagePropertiesValue = PagePropertiesType[keyof PagePropertiesType]; export const isPageResponse = (res: GetResponse): res is GetPageResponse => !isNotionClientError(res) && res.object === "page"; export const isDatabaseResponse = ( res: GetResponse ): res is GetDatabaseResponse => !isNotionClientError(res) && res.object === "database"; export const isErrorResponse = (res: GetResponse): res is APIResponseError => isNotionClientError(res); export const isPage = (res: GetResponse): res is PageObjectResponse => isPageResponse(res) && isFullPage(res); export const isDatabase = (res: GetResponse): res is DatabaseObjectResponse => isDatabaseResponse(res) && isFullDatabase(res); /** * Represents the type of Notion API to load documents from. The options * are "database" or "page". */ // @deprecated `type` property is now automatically determined. export type NotionAPIType = "database" | "page"; export type OnDocumentLoadedCallback = ( current: number, total: number, currentTitle?: string, rootTitle?: string ) => void; export type NotionAPILoaderOptions = { clientOptions: ConstructorParameters<typeof Client>[0]; id: string; type?: NotionAPIType; // @deprecated `type` property is now automatically determined. callerOptions?: ConstructorParameters<typeof AsyncCaller>[0]; onDocumentLoaded?: OnDocumentLoadedCallback; propertiesAsHeader?: boolean; }; /** * A class that extends the BaseDocumentLoader class. It represents a * document loader for loading documents from Notion using the Notion API. * @example * ```typescript * const pageLoader = new NotionAPILoader({ * clientOptions: { auth: "<NOTION_INTEGRATION_TOKEN>" }, * id: "<PAGE_ID>", * type: "page", * }); * const pageDocs = await pageLoader.loadAndSplit(); * const dbLoader = new NotionAPILoader({ * clientOptions: { auth: "<NOTION_INTEGRATION_TOKEN>" }, * id: "<DATABASE_ID>", * type: "database", * propertiesAsHeader: true, * }); * const dbDocs = await dbLoader.load(); * ``` */ export class NotionAPILoader extends BaseDocumentLoader { private caller: AsyncCaller; private notionClient: Client; private n2mClient: NotionToMarkdown; private id: string; private pageQueue: string[]; private pageCompleted: string[]; public pageQueueTotal: number; private documents: Document[]; private rootTitle: string; private onDocumentLoaded: OnDocumentLoadedCallback; private propertiesAsHeader: boolean; constructor(options: NotionAPILoaderOptions) { super(); this.caller = new AsyncCaller({ maxConcurrency: 64, ...options.callerOptions, }); this.notionClient = new Client({ logger: () => {}, // Suppress Notion SDK logger ...options.clientOptions, }); this.n2mClient = new NotionToMarkdown({ notionClient: this.notionClient, config: { parseChildPages: false, convertImagesToBase64: false }, }); this.id = options.id; this.pageQueue = []; this.pageCompleted = []; this.pageQueueTotal = 0; this.documents = []; this.rootTitle = ""; this.onDocumentLoaded = options.onDocumentLoaded ?? ((_ti, _cu) => {}); this.propertiesAsHeader = options.propertiesAsHeader || false; } /** * Adds a selection of page ids to the pageQueue and removes duplicates. * @param items An array of string ids */ private addToQueue(...items: string[]) { const deDuped = items.filter( (item) => !this.pageCompleted.concat(this.pageQueue).includes(item) ); this.pageQueue.push(...deDuped); this.pageQueueTotal += deDuped.length; } /** * Parses a Notion GetResponse object (page or database) and returns a string of the title. * @param obj The Notion GetResponse object to parse. * @returns The string of the title. */ private getTitle(obj: GetResponse) { if (isPage(obj)) { const titleProp = Object.values(obj.properties).find( (prop) => prop.type === "title" ); if (titleProp) return this.getPropValue(titleProp); } if (isDatabase(obj)) return obj.title .map((v) => this.n2mClient.annotatePlainText(v.plain_text, v.annotations) ) .join(""); return null; } /** * Parses the property type and returns a string * @param page The Notion page property to parse. * @returns A string of parsed property. */ private getPropValue(prop: PagePropertiesValue) { switch (prop.type) { case "number": { const propNumber = prop[prop.type]; return propNumber !== null ? propNumber.toString() : ""; } case "url": return prop[prop.type] || ""; case "select": return prop[prop.type]?.name ?? ""; case "multi_select": return `[${prop[prop.type].map((v) => `"${v.name}"`).join(", ")}]`; case "status": return prop[prop.type]?.name ?? ""; case "date": return `${prop[prop.type]?.start ?? ""}${ prop[prop.type]?.end ? ` - ${prop[prop.type]?.end}` : "" }`; case "email": return prop[prop.type] || ""; case "phone_number": return prop[prop.type] || ""; case "checkbox": return prop[prop.type].toString(); case "files": return `[${prop[prop.type].map((v) => `"${v.name}"`).join(", ")}]`; case "created_by": return `["${prop[prop.type].object}", "${prop[prop.type].id}"]`; case "created_time": return prop[prop.type]; case "last_edited_by": return `["${prop[prop.type].object}", "${prop[prop.type].id}"]`; case "last_edited_time": return prop[prop.type]; case "title": return prop[prop.type] .map((v) => this.n2mClient.annotatePlainText(v.plain_text, v.annotations) ) .join(""); case "rich_text": return prop[prop.type] .map((v) => this.n2mClient.annotatePlainText(v.plain_text, v.annotations) ) .join(""); case "people": return `[${prop[prop.type] .map((v) => `["${v.object}", "${v.id}"]`) .join(", ")}]`; case "unique_id": return `${prop[prop.type].prefix || ""}${prop[prop.type].number}`; case "relation": return `[${prop[prop.type].map((v) => `"${v.id}"`).join(", ")}]`; default: return `Unsupported type: ${prop.type}`; } } /** * Parses the properties of a Notion page and returns them as key-value * pairs. * @param page The Notion page to parse. * @returns An object containing the parsed properties as key-value pairs. */ private parsePageProperties(page: PageObjectResponse) { return Object.entries(page.properties).reduce((accum, [propName, prop]) => { const value = this.getPropValue(prop); const props = { ...accum, [propName]: value }; return prop.type === "title" ? { ...props, _title: value } : props; }, {} as { [key: string]: string }); } /** * Parses the details of a Notion page and returns them as an object. * @param page The Notion page to parse. * @returns An object containing the parsed details of the page. */ private parsePageDetails(page: PageObjectResponse) { const { id, ...rest } = page; return { ...rest, notionId: id, properties: this.parsePageProperties(page), }; } /** * Loads a Notion block and returns it as an MdBlock object. * @param block The Notion block to load. * @returns A Promise that resolves to an MdBlock object. */ private async loadBlock(block: BlockObjectResponse): Promise<MdBlock> { const mdBlock: MdBlock = { type: block.type, blockId: block.id, parent: await this.caller.call(() => this.n2mClient.blockToMarkdown(block) ), children: [], }; if (block.has_children) { const block_id = block.type === "synced_block" && block.synced_block?.synced_from?.block_id ? block.synced_block.synced_from.block_id : block.id; const childBlocks = await this.loadBlocks( await this.caller.call(() => getBlockChildren(this.notionClient, block_id, null) ) ); mdBlock.children = childBlocks; } return mdBlock; } /** * Loads Notion blocks and their children recursively. * @param blocksResponse The response from the Notion API containing the blocks to load. * @returns A Promise that resolves to an array containing the loaded MdBlocks. */ private async loadBlocks( blocksResponse: ListBlockChildrenResponseResults ): Promise<MdBlock[]> { const blocks = blocksResponse.filter(isFullBlock); // Add child pages to queue const childPages = blocks .filter((block) => block.type.includes("child_page")) .map((block) => block.id); if (childPages.length > 0) this.addToQueue(...childPages); // Add child database pages to queue const childDatabases = blocks .filter((block) => block.type.includes("child_database")) .map((block) => this.caller.call(() => this.loadDatabase(block.id))); // Load this block and child blocks const loadingMdBlocks = blocks .filter((block) => !["child_page", "child_database"].includes(block.type)) .map((block) => this.loadBlock(block)); const [mdBlocks] = await Promise.all([ Promise.all(loadingMdBlocks), Promise.all(childDatabases), ]); return mdBlocks; } /** * Loads a Notion page and its child documents, then adds it to the completed documents array. * @param page The Notion page or page ID to load. */ private async loadPage(page: string | PageObjectResponse) { // Check page is a page ID or a PageObjectResponse const [pageData, pageId] = typeof page === "string" ? [ this.caller.call(() => this.notionClient.pages.retrieve({ page_id: page }) ), page, ] : [page, page.id]; const [pageDetails, pageBlocks] = await Promise.all([ pageData, this.caller.call(() => getBlockChildren(this.notionClient, pageId, null)), ]); if (!isFullPage(pageDetails)) { this.pageCompleted.push(pageId); return; } const mdBlocks = await this.loadBlocks(pageBlocks); const mdStringObject = this.n2mClient.toMarkdownString(mdBlocks); let pageContent = mdStringObject.parent; const metadata = this.parsePageDetails(pageDetails); if (this.propertiesAsHeader) { pageContent = `---\n` + `${yaml.dump(metadata.properties)}` + `---\n\n` + `${pageContent ?? ""}`; } if (!pageContent) { this.pageCompleted.push(pageId); return; } const pageDocument = new Document({ pageContent, metadata }); this.documents.push(pageDocument); this.pageCompleted.push(pageId); this.onDocumentLoaded( this.documents.length, this.pageQueueTotal, this.getTitle(pageDetails) || undefined, this.rootTitle ); } /** * Loads a Notion database and adds it's pages to the queue. * @param id The ID of the Notion database to load. */ private async loadDatabase(id: string) { try { for await (const page of iteratePaginatedAPI( this.notionClient.databases.query, { database_id: id, page_size: 50, } )) { this.addToQueue(page.id); } } catch (e) { console.log(e); // TODO: Catch and report api request errors } } /** * Loads the documents from Notion based on the specified options. * @returns A Promise that resolves to an array of Documents. */ async load(): Promise<Document[]> { const resPagePromise = this.notionClient.pages .retrieve({ page_id: this.id }) .then((res) => { this.addToQueue(this.id); return res; }) .catch((error: APIResponseError) => error); const resDatabasePromise = this.notionClient.databases .retrieve({ database_id: this.id }) .then(async (res) => { await this.loadDatabase(this.id); return res; }) .catch((error: APIResponseError) => error); const [resPage, resDatabase] = await Promise.all([ resPagePromise, resDatabasePromise, ]); // Check if both resPage and resDatabase resulted in error responses const errors = [resPage, resDatabase].filter(isErrorResponse); if (errors.length === 2) { if (errors.every((e) => e.code === APIErrorCode.ObjectNotFound)) { throw new AggregateError([ Error( `Could not find object with ID: ${this.id}. Make sure the relevant pages and databases are shared with your integration.` ), ...errors, ]); } throw new AggregateError(errors); } this.rootTitle = this.getTitle(resPage) || this.getTitle(resDatabase) || this.id; let pageId = this.pageQueue.shift(); while (pageId) { await this.loadPage(pageId); pageId = this.pageQueue.shift(); } return this.documents; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/sonix_audio.ts
import { SonixSpeechRecognitionService } from "sonix-speech-recognition"; import { SpeechToTextRequest } from "sonix-speech-recognition/lib/types.js"; import { Document } from "@langchain/core/documents"; import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; /** * A class that represents a document loader for transcribing audio files * using the Sonix Speech Recognition service. * @example * ```typescript * const loader = new SonixAudioTranscriptionLoader({ * sonixAuthKey: "SONIX_AUTH_KEY", * request: { * audioFilePath: "LOCAL_AUDIO_FILE_PATH", * fileName: "FILE_NAME", * language: "en", * }, * }); * const docs = await loader.load(); * ``` */ export class SonixAudioTranscriptionLoader extends BaseDocumentLoader { private readonly sonixSpeechRecognitionService: SonixSpeechRecognitionService; private readonly speechToTextRequest: SpeechToTextRequest; constructor({ sonixAuthKey, request: speechToTextRequest, }: { sonixAuthKey: string; request: SpeechToTextRequest; }) { super(); this.sonixSpeechRecognitionService = new SonixSpeechRecognitionService( sonixAuthKey ); this.speechToTextRequest = speechToTextRequest; } /** * Performs the speech-to-text transcription using the * SonixSpeechRecognitionService and returns the transcribed text as a * Document object. * @returns An array of Document objects containing the transcribed text. */ async load(): Promise<Document[]> { const { text, status, error } = await this.sonixSpeechRecognitionService.speechToText( this.speechToTextRequest ); if (status === "failed") { throw new Error(`Failed to transcribe audio file. Error: ${error}`); } const document = new Document({ pageContent: text, metadata: { fileName: this.speechToTextRequest.fileName, }, }); return [document]; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/figma.ts
import { Document } from "@langchain/core/documents"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; /** * Interface representing a Figma file. It includes properties for the * file name, role, last modified date, editor type, thumbnail URL, * version, document node, schema version, main file key, and an array of * branches. */ export interface FigmaFile { name: string; role: string; lastModified: string; editorType: string; thumbnailUrl: string; version: string; document: Node; schemaVersion: number; mainFileKey: string; branches: Array<{ key: string; name: string; thumbnail_url: string; last_modified: string; link_access: string; }>; } /** * Interface representing the parameters for configuring the FigmaLoader. * It includes optional properties for the access token, an array of node * IDs, and the file key. */ export interface FigmaLoaderParams { accessToken?: string; nodeIds: string[]; fileKey: string; } /** * Class representing a document loader for loading Figma files. It * extends the BaseDocumentLoader and implements the FigmaLoaderParams * interface. The constructor takes a config object as a parameter, which * contains the access token, an array of node IDs, and the file key. * @example * ```typescript * const loader = new FigmaFileLoader({ * accessToken: "FIGMA_ACCESS_TOKEN", * nodeIds: ["id1", "id2", "id3"], * fileKey: "key", * }); * const docs = await loader.load(); * ``` */ export class FigmaFileLoader extends BaseDocumentLoader implements FigmaLoaderParams { public accessToken?: string; public nodeIds: string[]; public fileKey: string; private headers: Record<string, string> = {}; constructor({ accessToken = getEnvironmentVariable("FIGMA_ACCESS_TOKEN"), nodeIds, fileKey, }: FigmaLoaderParams) { super(); this.accessToken = accessToken; this.nodeIds = nodeIds; this.fileKey = fileKey; if (this.accessToken) { this.headers = { "x-figma-token": this.accessToken, }; } } /** * Constructs the URL for the Figma API call. * @returns The constructed URL as a string. */ private constructFigmaApiURL(): string { return `https://api.figma.com/v1/files/${ this.fileKey }/nodes?ids=${this.nodeIds.join(",")}`; } /** * Fetches the Figma file using the Figma API and returns it as a * FigmaFile object. * @returns A Promise that resolves to a FigmaFile object. */ private async getFigmaFile(): Promise<FigmaFile> { const url = this.constructFigmaApiURL(); const response = await fetch(url, { headers: this.headers }); const data = await response.json(); if (!response.ok) { throw new Error( `Unable to get figma file: ${response.status} ${JSON.stringify(data)}` ); } if (!data) { throw new Error("Unable to get file"); } return data as FigmaFile; } /** * Fetches the Figma file using the Figma API, creates a Document instance * with the JSON representation of the file as the page content and the * API URL as the metadata, and returns it. * @returns A Promise that resolves to an array of Document instances. */ public async load(): Promise<Document[]> { const data = await this.getFigmaFile(); const text = JSON.stringify(data); const metadata = { source: this.constructFigmaApiURL() }; return [new Document({ pageContent: text, metadata })]; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/github.ts
import ignore, { Ignore } from "ignore"; import binaryExtensions from "binary-extensions"; import { Document } from "@langchain/core/documents"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { AsyncCaller, AsyncCallerParams, } from "@langchain/core/utils/async_caller"; import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; import { UnknownHandling } from "langchain/document_loaders/fs/directory"; import { extname } from "../../utils/extname.js"; const extensions = /* #__PURE__ */ new Set(binaryExtensions); /** * A function that checks if a file path is a binary file based on its * extension. * @param name The file path to check. * @returns A boolean indicating whether the file path is a binary file. */ function isBinaryPath(name: string) { return extensions.has(extname(name).slice(1).toLowerCase()); } /** * An interface that represents a file in a GitHub repository. It has * properties for the file name, path, SHA, size, URLs, type, and links. */ export interface GithubFile { name: string; path: string; sha: string; size: number; url: string; html_url: string; git_url: string; download_url: string; type: string; _links: { self: string; git: string; html: string; }; } /** * An interface that represents the response from fetching the content of * a file. It has properties for the file contents and metadata. */ interface GetContentResponse { contents: string; metadata: { source: string; repository: string; branch: string }; } /** * An interface describing the submodules of a Git repository. */ interface SubmoduleInfo { name: string; path: string; url: string; ref: string; } /** * An interface that represents the parameters for the GithubRepoLoader * class. It extends the AsyncCallerParams interface and adds additional * properties specific to the GitHub repository loader. */ export interface GithubRepoLoaderParams extends AsyncCallerParams { /** * The base URL of the GitHub instance. * To be used when you are not targeting github.com, e.g. a GitHub Enterprise instance. */ baseUrl?: string; /** * The API endpoint URL of the GitHub instance. * To be used when you are not targeting github.com, e.g. a GitHub Enterprise instance. */ apiUrl?: string; branch?: string; recursive?: boolean; /** * Set to true to recursively process submodules. Is only effective, when recursive=true. */ processSubmodules?: boolean; unknown?: UnknownHandling; accessToken?: string; ignoreFiles?: (string | RegExp)[]; ignorePaths?: string[]; verbose?: boolean; /** * The maximum number of concurrent calls that can be made. Defaults to 2. */ maxConcurrency?: number; /** * The maximum number of retries that can be made for a single call, * with an exponential backoff between each attempt. Defaults to 2. */ maxRetries?: number; } /** * A class that extends the BaseDocumentLoader and implements the * GithubRepoLoaderParams interface. It represents a document loader for * loading files from a GitHub repository. */ export class GithubRepoLoader extends BaseDocumentLoader implements GithubRepoLoaderParams { public baseUrl: string; public apiUrl: string; private readonly owner: string; private readonly repo: string; private readonly initialPath: string; private headers: Record<string, string> = {}; public branch: string; public recursive: boolean; public processSubmodules: boolean; public unknown: UnknownHandling; public accessToken?: string; public ignoreFiles: (string | RegExp)[]; public ignore?: Ignore; public verbose?: boolean; public maxConcurrency?: number; public maxRetries?: number; protected caller: AsyncCaller; public ignorePaths?: string[]; private submoduleInfos: SubmoduleInfo[]; constructor( githubUrl: string, { accessToken = getEnvironmentVariable("GITHUB_ACCESS_TOKEN"), baseUrl = "https://github.com", apiUrl = "https://api.github.com", branch = "main", recursive = true, processSubmodules = false, unknown = UnknownHandling.Warn, ignoreFiles = [], ignorePaths, verbose = false, maxConcurrency = 2, maxRetries = 2, ...rest }: GithubRepoLoaderParams = {} ) { super(); this.baseUrl = baseUrl; this.apiUrl = apiUrl; const { owner, repo, path } = this.extractOwnerAndRepoAndPath(githubUrl); this.owner = owner; this.repo = repo; this.initialPath = path; this.branch = branch; this.recursive = recursive; // processing submodules without processing contents of other directories makes no sense if (processSubmodules && !recursive) { throw new Error( `Input property "recursive" must be true if "processSubmodules" is true.` ); } this.processSubmodules = processSubmodules; this.unknown = unknown; this.accessToken = accessToken; this.ignoreFiles = ignoreFiles; this.verbose = verbose; this.maxConcurrency = maxConcurrency; this.maxRetries = maxRetries; this.headers = { "User-Agent": "langchain", }; this.caller = new AsyncCaller({ maxConcurrency, maxRetries, ...rest, }); this.ignorePaths = ignorePaths; if (ignorePaths) { this.ignore = ignore.default().add(ignorePaths); } if (this.accessToken) { this.headers = { ...this.headers, Authorization: `Bearer ${this.accessToken}`, }; } } /** * Extracts the owner, repository, and path from a GitHub URL. * @param url The GitHub URL to extract information from. * @returns An object containing the owner, repository, and path extracted from the GitHub URL. */ private extractOwnerAndRepoAndPath(url: string): { owner: string; repo: string; path: string; } { const match = url.match( new RegExp(`${this.baseUrl}/([^/]+)/([^/]+)(/tree/[^/]+/(.+))?`, "i") ); if (!match) { throw new Error("Invalid GitHub URL format."); } return { owner: match[1], repo: match[2], path: match[4] || "" }; } /** * Fetches the files from the GitHub repository and creates Document * instances for each file. It also handles error handling based on the * unknown handling option. * @returns A promise that resolves to an array of Document instances. */ public async load(): Promise<Document[]> { this.log( `Loading documents from ${this.baseUrl}/${this.owner}/${this.repo}/${this.initialPath}...` ); // process repository without submodules const documents: Document[] = (await this.processRepo()).map( (fileResponse) => new Document({ pageContent: fileResponse.contents, metadata: fileResponse.metadata, }) ); if (this.processSubmodules) { // process submodules await this.getSubmoduleInfo(); for (const submoduleInfo of this.submoduleInfos) { documents.push(...(await this.loadSubmodule(submoduleInfo))); } } return documents; } /** * Asynchronously streams documents from the entire GitHub repository. * It is suitable for situations where processing large repositories in a memory-efficient manner is required. * @yields Yields a Promise that resolves to a Document object for each file or submodule content found in the repository. */ public async *loadAsStream(): AsyncGenerator<Document, void, undefined> { this.log( `Loading documents from ${this.baseUrl}/${this.owner}/${this.repo}/${this.initialPath}...` ); yield* await this.processRepoAsStream(this.initialPath); if (!this.processSubmodules) { return; } await this.getSubmoduleInfo(); for (const submoduleInfo of this.submoduleInfos) { yield* await this.loadSubmoduleAsStream(submoduleInfo); } } /** * Loads the information about Git submodules from the repository, if available. */ private async getSubmoduleInfo(): Promise<void> { this.log("Loading info about submodules..."); // we have to fetch the files of the root directory to get the download url of the .gitmodules file // however, we cannot reuse the files retrieved in processRepo() as initialPath may be != "" // so it may be that we end up fetching this file list twice const repoFiles = await this.fetchRepoFiles(""); const gitmodulesFile = repoFiles.filter( ({ name }) => name === ".gitmodules" )?.[0]; if (gitmodulesFile) { const gitmodulesContent = await this.fetchFileContent({ download_url: gitmodulesFile.download_url, } as GithubFile); this.submoduleInfos = await this.parseGitmodules(gitmodulesContent); } else { this.submoduleInfos = []; } this.log(`Found ${this.submoduleInfos.length} submodules:`); for (const submoduleInfo of this.submoduleInfos) { this.log(JSON.stringify(submoduleInfo)); } } /** * Parses the given content of a .gitmodules file. Furthermore, queries the current SHA ref of all submodules. * Returns the submodule information as array. * @param gitmodulesContent the content of a .gitmodules file */ private async parseGitmodules( gitmodulesContent: string ): Promise<SubmoduleInfo[]> { let validGitmodulesContent = gitmodulesContent; // in case the .gitmodules file does not end with a newline, we add one to make the regex work if (!validGitmodulesContent.endsWith("\n")) { validGitmodulesContent += "\n"; } // catches the initial line of submodule entries const submodulePattern = /\[submodule "(.*?)"]\n((\s+.*?\s*=\s*.*?\n)*)/g; // catches the properties of a submodule const keyValuePattern = /\s+(.*?)\s*=\s*(.*?)\s/g; const submoduleInfos = []; for (const [, name, propertyLines] of validGitmodulesContent.matchAll( submodulePattern )) { if (!name || !propertyLines) { throw new Error("Could not parse submodule entry"); } const submodulePropertyLines = propertyLines.matchAll(keyValuePattern); let path; let url; for (const [, key, value] of submodulePropertyLines) { if (!key || !value) { throw new Error( `Could not parse key/value pairs for submodule ${name}` ); } switch (key) { case "path": path = value; break; case "url": url = value; if (url.endsWith(".git")) { url = url.substring(0, url.length - 4); } break; default: // ignoring unused keys } } if (!path || !url) { throw new Error(`Missing properties for submodule ${name}`); } // fetch the current ref of the submodule const files = await this.fetchRepoFiles(path); const submoduleInfo: SubmoduleInfo = { name, path, url, ref: files[0].sha, }; submoduleInfos.push(submoduleInfo); } return submoduleInfos; } /** * Loads the documents of the given submodule. Uses the same parameters as for the current repository. * External submodules, i.e. submodules pointing to another GitHub instance, are ignored. * @param submoduleInfo the info about the submodule to be loaded */ private async loadSubmodule( submoduleInfo: SubmoduleInfo ): Promise<Document[]> { if (!submoduleInfo.url.startsWith(this.baseUrl)) { this.log(`Ignoring external submodule ${submoduleInfo.url}.`); return []; } else if (!submoduleInfo.path.startsWith(this.initialPath)) { this.log( `Ignoring submodule ${submoduleInfo.url}, as it is not on initial path.` ); return []; } else { this.log( `Accessing submodule ${submoduleInfo.name} (${submoduleInfo.url})...` ); return new GithubRepoLoader(submoduleInfo.url, { accessToken: this.accessToken, apiUrl: this.apiUrl, baseUrl: this.baseUrl, branch: submoduleInfo.ref, recursive: this.recursive, processSubmodules: this.processSubmodules, unknown: this.unknown, ignoreFiles: this.ignoreFiles, ignorePaths: this.ignorePaths, verbose: this.verbose, maxConcurrency: this.maxConcurrency, maxRetries: this.maxRetries, }).load(); } } /** * Asynchronously processes and streams the contents of a specified submodule in the GitHub repository. * @param submoduleInfo the info about the submodule to be loaded * @yields Yields a Promise that resolves to a Document object for each file found in the submodule. */ private async *loadSubmoduleAsStream( submoduleInfo: SubmoduleInfo ): AsyncGenerator<Document, void, undefined> { if (!submoduleInfo.url.startsWith(this.baseUrl)) { this.log(`Ignoring external submodule ${submoduleInfo.url}.`); yield* []; } if (!submoduleInfo.path.startsWith(this.initialPath)) { this.log( `Ignoring submodule ${submoduleInfo.url}, as it is not on initial path.` ); yield* []; } this.log( `Accessing submodule ${submoduleInfo.name} (${submoduleInfo.url})...` ); const submoduleLoader = new GithubRepoLoader(submoduleInfo.url, { accessToken: this.accessToken, baseUrl: this.baseUrl, apiUrl: this.apiUrl, branch: submoduleInfo.ref, recursive: this.recursive, processSubmodules: this.processSubmodules, unknown: this.unknown, ignoreFiles: this.ignoreFiles, ignorePaths: this.ignorePaths, verbose: this.verbose, maxConcurrency: this.maxConcurrency, maxRetries: this.maxRetries, }); yield* await submoduleLoader.processRepoAsStream(submoduleInfo.path); } /** * Determines whether a file or directory should be ignored based on its * path and type. * @param path The path of the file or directory. * @param fileType The type of the file or directory. * @returns A boolean indicating whether the file or directory should be ignored. */ protected shouldIgnore(path: string, fileType: string): boolean { if (fileType !== "dir" && isBinaryPath(path)) { return true; } if (this.ignore !== undefined) { return this.ignore.ignores(path); } return ( fileType !== "dir" && this.ignoreFiles.some((pattern) => { if (typeof pattern === "string") { return path === pattern; } try { return pattern.test(path); } catch { throw new Error(`Unknown ignore file pattern: ${pattern}`); } }) ); } /** * Takes the file info and wrap it in a promise that will resolve to the file content and metadata * @param file * @returns */ private async fetchFileContentWrapper( file: GithubFile ): Promise<GetContentResponse> { const fileContent = await this.fetchFileContent(file).catch((error) => { this.handleError(`Failed wrap file content: ${file}, ${error}`); }); return { contents: fileContent || "", metadata: { source: file.path, repository: `${this.baseUrl}/${this.owner}/${this.repo}`, branch: this.branch, }, }; } /** * Maps a list of files / directories to a list of promises that will fetch the file / directory contents */ private async getCurrentDirectoryFilePromises( files: GithubFile[] ): Promise<Promise<GetContentResponse>[]> { const currentDirectoryFilePromises: Promise<GetContentResponse>[] = []; // Directories have nested files / directories, which is why this is a list of promises of promises const currentDirectoryDirectoryPromises: Promise< Promise<GetContentResponse>[] >[] = []; for (const file of files) { if (file.type !== "dir" && this.shouldIgnore(file.path, file.type)) { continue; } if (file.type === "file" && file.size === 0) { // this is a submodule. ignoring for the moment. submodule processing is done separately continue; } if (file.type !== "dir") { try { currentDirectoryFilePromises.push(this.fetchFileContentWrapper(file)); } catch (e) { this.handleError(`Failed to fetch file content: ${file.path}, ${e}`); } } else if (this.recursive) { currentDirectoryDirectoryPromises.push( this.processDirectory(file.path) ); } } const curDirDirectories: Promise<GetContentResponse>[][] = await Promise.all(currentDirectoryDirectoryPromises); return [...currentDirectoryFilePromises, ...curDirDirectories.flat()]; } /** * Begins the process of fetching the contents of the repository */ private async processRepo(): Promise<GetContentResponse[]> { try { // Get the list of file / directory names in the root directory const files = await this.fetchRepoFiles(this.initialPath); // Map the file / directory paths to promises that will fetch the file / directory contents const currentDirectoryFilePromises = await this.getCurrentDirectoryFilePromises(files); return Promise.all(currentDirectoryFilePromises); } catch (error) { this.handleError( `Failed to process directory: ${this.initialPath}, ${error}` ); return Promise.reject(error); } } /** * Asynchronously processes the contents of the entire GitHub repository, * streaming each file as a Document object. * @param path The path of the directory to process. * @yields Yields a Promise that resolves to a Document object for each file found in the repository. */ private async *processRepoAsStream( path: string ): AsyncGenerator<Document, void, undefined> { const files = await this.fetchRepoFiles(path); for (const file of files) { if (file.type !== "dir" && this.shouldIgnore(file.path, file.type)) { continue; } if (file.type === "file") { try { const fileResponse = await this.fetchFileContentWrapper(file); yield new Document({ pageContent: fileResponse.contents, metadata: fileResponse.metadata, }); } catch (error) { this.handleError( `Failed to fetch file content: ${file.path}, ${error}` ); } } else if (this.recursive) { yield* await this.processDirectoryAsStream(file.path); } } } /** * Fetches the contents of a directory and maps the file / directory paths * to promises that will fetch the file / directory contents. * @param path The path of the directory to process. * @returns A promise that resolves to an array of promises that will fetch the file / directory contents. */ private async processDirectory( path: string ): Promise<Promise<GetContentResponse>[]> { try { const files = await this.fetchRepoFiles(path); return this.getCurrentDirectoryFilePromises(files); } catch (error) { this.handleError(`Failed to process directory: ${path}, ${error}`); return Promise.reject(error); } } /** * Asynchronously processes the contents of a given directory in the GitHub repository, * streaming each file as a Document object. * @param path The path of the directory to process. * @yields Yields a Promise that resolves to a Document object for each file in the directory. */ private async *processDirectoryAsStream( path: string ): AsyncGenerator<Document, void, undefined> { const files = await this.fetchRepoFiles(path); for (const file of files) { if (file.type !== "dir" && this.shouldIgnore(file.path, file.type)) { continue; } if (file.type === "file") { try { const fileResponse = await this.fetchFileContentWrapper(file); yield new Document({ pageContent: fileResponse.contents, metadata: fileResponse.metadata, }); } catch { this.handleError(`Failed to fetch file content: ${file.path}`); } } else if (this.recursive) { yield* await this.processDirectoryAsStream(file.path); } } } /** * Fetches the files from a GitHub repository. * If the path denotes a single file, the resulting array contains only one element. * @param path The path of the repository to fetch the files from. * @returns A promise that resolves to an array of GithubFile instances. */ private async fetchRepoFiles(path: string): Promise<GithubFile[]> { const url = `${this.apiUrl}/repos/${this.owner}/${this.repo}/contents/${path}?ref=${this.branch}`; return this.caller.call(async () => { this.log(`Fetching ${url}`); const response = await fetch(url, { headers: this.headers }); const data = await response.json(); if (!response.ok) { throw new Error( `Unable to fetch repository files: ${ response.status } ${JSON.stringify(data)}` ); } if (Array.isArray(data)) { return data as GithubFile[]; } else { return [data as GithubFile]; } }); } /** * Fetches the content of a file from a GitHub repository. * @param file The file to fetch the content from. * @returns A promise that resolves to the content of the file. */ private async fetchFileContent(file: GithubFile): Promise<string> { return this.caller.call(async () => { this.log(`Fetching ${file.download_url}`); const response = await fetch(file.download_url, { headers: this.headers, }); return response.text(); }); } /** * Handles errors based on the unknown handling option. * @param message The error message. * @returns void */ private handleError(message: string): void { switch (this.unknown) { case UnknownHandling.Ignore: break; case UnknownHandling.Warn: console.warn(message); break; case UnknownHandling.Error: throw new Error(message); default: throw new Error(`Unknown unknown handling: ${this.unknown}`); } } /** * Logs the given message to the console, if parameter 'verbose' is set to true. * @param message the message to be logged. */ private log(message: string): void { if (this.verbose) { console.log(message); } } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/browserbase.ts
import { Document, type DocumentInterface } from "@langchain/core/documents"; import { BaseDocumentLoader, type DocumentLoader, } from "@langchain/core/document_loaders/base"; import Browserbase, { LoadOptions, ClientOptions } from "@browserbasehq/sdk"; type BrowserbaseLoaderOptions = ClientOptions & LoadOptions; /** * Load pre-rendered web pages using a headless browser hosted on Browserbase. * * Depends on `@browserbasehq/sdk` package. * Get your API key from https://browserbase.com * * @example * ```typescript * import { BrowserbaseLoader } from "langchain/document_loaders/web/browserbase"; * * const loader = new BrowserbaseLoader(["https://example.com"], { * apiKey: process.env.BROWSERBASE_API_KEY, * textContent: true, * }); * * const docs = await loader.load(); * ``` * * @param {string[]} urls - The URLs of the web pages to load. * @param {BrowserbaseLoaderOptions} [options] - Browserbase client options. */ export class BrowserbaseLoader extends BaseDocumentLoader implements DocumentLoader { urls: string[]; options: BrowserbaseLoaderOptions; browserbase: Browserbase; constructor(urls: string[], options: BrowserbaseLoaderOptions = {}) { super(); this.urls = urls; this.options = options; this.browserbase = new Browserbase(options); } /** * Load pages from URLs. * * @returns {Promise<DocumentInterface[]>} - A promise which resolves to a list of documents. */ async load(): Promise<DocumentInterface[]> { const documents: DocumentInterface[] = []; for await (const doc of this.lazyLoad()) { documents.push(doc); } return documents; } /** * Load pages from URLs. * * @returns {Generator<DocumentInterface>} - A generator that yields documents. */ async *lazyLoad() { const pages = await this.browserbase.loadURLs(this.urls, this.options); let index = 0; for await (const page of pages) { yield new Document({ pageContent: page, metadata: { url: this.urls[index], }, }); index += index + 1; } } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/cheerio.ts
import type { CheerioAPI, CheerioOptions, load as LoadT, SelectorType, } from "cheerio"; import { Document } from "@langchain/core/documents"; import { AsyncCaller } from "@langchain/core/utils/async_caller"; import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; import type { WebBaseLoaderParams, WebBaseLoader } from "./html.js"; /** * @deprecated Either import the CheerioWebBaseLoaderParams from @langchain/community/document_loaders/web/cheerio * or use the WebBaseLoaderParams from @langchain/community/document_loaders/web/html. */ export { WebBaseLoaderParams }; /** * Represents the parameters for configuring the CheerioWebBaseLoader. It * extends the WebBaseLoaderParams interface and adds additional parameters * specific to loading with Cheerio. */ export interface CheerioWebBaseLoaderParams extends WebBaseLoaderParams { /** * The selector to use to extract the text from the document. Defaults to * "body". */ selector?: SelectorType; } /** * A class that extends the BaseDocumentLoader and implements the * DocumentLoader interface. It represents a document loader for loading * web-based documents using Cheerio. * @example * ```typescript * const loader = new CheerioWebBaseLoader("https://exampleurl.com"); * const docs = await loader.load(); * console.log({ docs }); * ``` */ export class CheerioWebBaseLoader extends BaseDocumentLoader implements WebBaseLoader { timeout: number; caller: AsyncCaller; selector?: SelectorType; textDecoder?: TextDecoder; headers?: HeadersInit; constructor(public webPath: string, fields?: CheerioWebBaseLoaderParams) { super(); const { timeout, selector, textDecoder, headers, ...rest } = fields ?? {}; this.timeout = timeout ?? 10000; this.caller = new AsyncCaller(rest); this.selector = selector ?? "body"; this.textDecoder = textDecoder; this.headers = headers; } /** * Fetches web documents from the given array of URLs and loads them using Cheerio. * It returns an array of CheerioAPI instances. * @param urls An array of URLs to fetch and load. * @returns A Promise that resolves to an array of CheerioAPI instances. */ static async scrapeAll( urls: string[], caller: AsyncCaller, timeout: number | undefined, textDecoder?: TextDecoder, options?: CheerioOptions & { headers?: HeadersInit; } ): Promise<CheerioAPI[]> { return Promise.all( urls.map((url) => CheerioWebBaseLoader._scrape(url, caller, timeout, textDecoder, options) ) ); } static async _scrape( url: string, caller: AsyncCaller, timeout: number | undefined, textDecoder?: TextDecoder, options?: CheerioOptions & { headers?: HeadersInit; } ): Promise<CheerioAPI> { const { headers, ...cheerioOptions } = options ?? {}; const { load } = await CheerioWebBaseLoader.imports(); const response = await caller.call(fetch, url, { signal: timeout ? AbortSignal.timeout(timeout) : undefined, headers, }); const html = textDecoder?.decode(await response.arrayBuffer()) ?? (await response.text()); return load(html, cheerioOptions); } /** * Fetches the web document from the webPath and loads it using Cheerio. * It returns a CheerioAPI instance. * @returns A Promise that resolves to a CheerioAPI instance. */ async scrape(): Promise<CheerioAPI> { const options = { headers: this.headers }; return CheerioWebBaseLoader._scrape( this.webPath, this.caller, this.timeout, this.textDecoder, options ); } /** * Extracts the text content from the loaded document using the selector * and creates a Document instance with the extracted text and metadata. * It returns an array of Document instances. * @returns A Promise that resolves to an array of Document instances. */ async load(): Promise<Document[]> { const $ = await this.scrape(); const text = $(this.selector).text(); const metadata = { source: this.webPath }; return [new Document({ pageContent: text, metadata })]; } /** * A static method that dynamically imports the Cheerio library and * returns the load function. If the import fails, it throws an error. * @returns A Promise that resolves to an object containing the load function from the Cheerio library. */ static async imports(): Promise<{ load: typeof LoadT; }> { try { const { load } = await import("cheerio"); return { load }; } catch (e) { console.error(e); throw new Error( "Please install cheerio as a dependency with, e.g. `yarn add cheerio`" ); } } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/youtube.ts
import { TranscriptResponse, YoutubeTranscript } from "youtube-transcript"; import { Innertube } from "youtubei.js"; import { Document } from "@langchain/core/documents"; import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; /** * Configuration options for the YoutubeLoader class. Includes properties * such as the videoId, language, and addVideoInfo. */ interface YoutubeConfig { videoId: string; language?: string; addVideoInfo?: boolean; } /** * Metadata of a YouTube video. Includes properties such as the source * (videoId), description, title, view_count, author, and category. */ interface VideoMetadata { source: string; description?: string; title?: string; view_count?: number; author?: string; category?: string; } /** * A document loader for loading data from YouTube videos. It uses the * youtube-transcript and youtubei.js libraries to fetch the transcript * and video metadata. * @example * ```typescript * const loader = new YoutubeLoader( * "https: * "en", * true, * ); * const docs = await loader.load(); * ``` */ export class YoutubeLoader extends BaseDocumentLoader { private videoId: string; private language?: string; private addVideoInfo: boolean; constructor(config: YoutubeConfig) { super(); this.videoId = config.videoId; this.language = config?.language; this.addVideoInfo = config?.addVideoInfo ?? false; } /** * Extracts the videoId from a YouTube video URL. * @param url The URL of the YouTube video. * @returns The videoId of the YouTube video. */ private static getVideoID(url: string): string { const match = url.match( /.*(?:youtu.be\/|v\/|u\/\w\/|embed\/|watch\?v=)([^#&?]*).*/ ); if (match !== null && match[1].length === 11) { return match[1]; } else { throw new Error("Failed to get youtube video id from the url"); } } /** * Creates a new instance of the YoutubeLoader class from a YouTube video * URL. * @param url The URL of the YouTube video. * @param config Optional configuration options for the YoutubeLoader instance, excluding the videoId. * @returns A new instance of the YoutubeLoader class. */ static createFromUrl( url: string, config?: Omit<YoutubeConfig, "videoId"> ): YoutubeLoader { const videoId = YoutubeLoader.getVideoID(url); return new YoutubeLoader({ ...config, videoId }); } /** * Loads the transcript and video metadata from the specified YouTube * video. It uses the youtube-transcript library to fetch the transcript * and the youtubei.js library to fetch the video metadata. * @returns An array of Documents representing the retrieved data. */ async load(): Promise<Document[]> { let transcript: TranscriptResponse[] | undefined; const metadata: VideoMetadata = { source: this.videoId, }; try { transcript = await YoutubeTranscript.fetchTranscript(this.videoId, { lang: this.language, }); if (transcript === undefined) { throw new Error("Transcription not found"); } if (this.addVideoInfo) { const youtube = await Innertube.create(); const info = (await youtube.getBasicInfo(this.videoId)).basic_info; metadata.description = info.short_description; metadata.title = info.title; metadata.view_count = info.view_count; metadata.author = info.author; } } catch (e: unknown) { throw new Error( `Failed to get YouTube video transcription: ${(e as Error).message}` ); } const document = new Document({ pageContent: transcript.map((item) => item.text).join(" "), metadata, }); return [document]; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/spider.ts
import { Spider } from "@spider-cloud/spider-client"; import { Document, type DocumentInterface } from "@langchain/core/documents"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; /** * Interface representing the parameters for the Spider loader. It * includes properties such as the URL to scrape or crawl and the API key. */ interface SpiderLoaderParameters { /** * URL to scrape or crawl */ url: string; /** * API key for Spider. If not provided, the default value is the value of the SPIDER_API_KEY environment variable. */ apiKey?: string; /** * Mode of operation. Can be either "crawl" or "scrape". If not provided, the default value is "scrape". */ mode?: "crawl" | "scrape"; params?: Record<string, unknown>; } interface SpiderDocument { content: string; metadata: Record<string, unknown>; } /** * Class representing a document loader for loading data from * Spider (spider.cloud). It extends the BaseDocumentLoader class. * @example * ```typescript * const loader = new SpiderLoader({ * url: "{url}", * apiKey: "{apiKey}", * mode: "crawl" * }); * const docs = await loader.load(); * ``` */ export class SpiderLoader extends BaseDocumentLoader { private apiKey: string; private url: string; private mode: "crawl" | "scrape"; private params?: Record<string, unknown>; constructor(loaderParams: SpiderLoaderParameters) { super(); const { apiKey = getEnvironmentVariable("SPIDER_API_KEY"), url, mode = "scrape", params, } = loaderParams; if (!apiKey) { throw new Error( "Spider API key not set. You can set it as SPIDER_API_KEY in your .env file, or pass it to Spider." ); } this.apiKey = apiKey; this.url = url; this.mode = mode; this.params = params || { metadata: true, return_format: "markdown" }; } /** * Loads the data from the Spider. * @returns An array of Documents representing the retrieved data. * @throws An error if the data could not be loaded. */ public async load(): Promise<DocumentInterface[]> { const app = new Spider({ apiKey: this.apiKey }); let spiderDocs: SpiderDocument[]; if (this.mode === "scrape") { const response = await app.scrapeUrl(this.url, this.params); if (response.error) { throw new Error( `Spider: Failed to scrape URL. Error: ${response.error}` ); } spiderDocs = response as SpiderDocument[]; } else if (this.mode === "crawl") { const response = await app.crawlUrl(this.url, this.params); if (response.error) { throw new Error( `Spider: Failed to crawl URL. Error: ${response.error}` ); } spiderDocs = response as SpiderDocument[]; } else { throw new Error( `Unrecognized mode '${this.mode}'. Expected one of 'crawl', 'scrape'.` ); } return spiderDocs.map( (doc) => new Document({ pageContent: doc.content || "", metadata: doc.metadata || {}, }) ); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/college_confidential.ts
import { Document } from "@langchain/core/documents"; import { CheerioWebBaseLoader } from "./cheerio.js"; /** * A document loader specifically designed for loading documents from the * College Confidential website. It extends the CheerioWebBaseLoader. * @example * ```typescript * const loader = new CollegeConfidentialLoader("https:exampleurl.com"); * const docs = await loader.load(); * console.log({ docs }); * ``` */ export class CollegeConfidentialLoader extends CheerioWebBaseLoader { constructor(webPath: string) { super(webPath); } /** * Overrides the base load() method to extract the text content from the * loaded document using a specific selector for the College Confidential * website. It creates a Document instance with the extracted text and * metadata, and returns an array containing the Document instance. * @returns An array containing a Document instance with the extracted text and metadata from the loaded College Confidential web document. */ public async load(): Promise<Document[]> { const $ = await this.scrape(); const text = $("main[class='skin-handler']").text(); const metadata = { source: this.webPath }; return [new Document({ pageContent: text, metadata })]; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/searchapi.ts
import { Document } from "@langchain/core/documents"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; type JSONPrimitive = string | number | boolean | null; type JSONValue = JSONPrimitive | JSONObject | JSONArray; interface JSONObject { [key: string]: JSONValue; } interface JSONArray extends Array<JSONValue> {} /** * SearchApiParameters Type Definition. * * For more parameters and supported search engines, refer specific engine documentation: * Google - https://www.searchapi.io/docs/google * Google News - https://www.searchapi.io/docs/google-news * Google Scholar - https://www.searchapi.io/docs/google-scholar * YouTube Transcripts - https://www.searchapi.io/docs/youtube-transcripts * and others. * */ type SearchApiParameters = { [key: string]: JSONValue; }; /** * Class representing a document loader for loading search results from * the SearchApi. It extends the BaseDocumentLoader class. * @example * ```typescript * const loader = new SearchApiLoader({ * q: "{query}", * apiKey: "{apiKey}", * engine: "google", * }); * const docs = await loader.load(); * ``` */ export class SearchApiLoader extends BaseDocumentLoader { private apiKey: string; private parameters: SearchApiParameters; constructor(params: SearchApiParameters) { super(); const { apiKey = getEnvironmentVariable("SEARCHAPI_API_KEY") } = params; if (typeof apiKey !== "string") { throw new Error("Invalid type for apiKey. Expected string."); } if (!apiKey) { throw new Error( "SearchApi API key not set. You can set it as SEARCHAPI_API_KEY in your .env file, or pass it to SearchApi." ); } this.apiKey = apiKey; this.parameters = { ...params }; } /** * Builds the URL for the SearchApi search request. * @returns The URL for the search request. */ public buildUrl(): string { this.parameters = { engine: "google", api_key: this.apiKey, ...this.parameters, }; const preparedParams: [string, string][] = Object.entries(this.parameters) .filter( ([key, value]) => value !== undefined && value !== null && key !== "apiKey" ) .map(([key, value]) => [key, `${value}`]); const searchParams = new URLSearchParams(preparedParams); return `https://www.searchapi.io/api/v1/search?${searchParams}`; } /** * Extracts documents from the provided output. * @param output - The output to extract documents from. * @param responseType - The type of the response to extract documents from. * @returns An array of Documents. */ private extractDocuments(output: unknown, responseType: string): Document[] { const documents: Document[] = []; const results = Array.isArray(output) ? output : [output]; if (responseType === "transcripts") { const pageContent = results.map((result) => result.text).join("\n"); const metadata = { source: "SearchApi", responseType, }; documents.push(new Document({ pageContent, metadata })); } else { for (const result of results) { const pageContent = JSON.stringify(result); const metadata = { source: "SearchApi", responseType, }; documents.push(new Document({ pageContent, metadata })); } } return documents; } /** * Processes the response data from the SearchApi search request and converts it into an array of Documents. * @param data - The response data from the SearchApi search request. * @returns An array of Documents. */ public processResponseData(data: Record<string, unknown>): Document[] { const documents: Document[] = []; const responseTypes = [ "answer_box", "shopping_results", "knowledge_graph", "organic_results", "transcripts", ]; for (const responseType of responseTypes) { if (responseType in data) { documents.push( ...this.extractDocuments(data[responseType], responseType) ); } } return documents; } /** * Fetches the data from the provided URL and returns it as a JSON object. * If an error occurs during the fetch operation, an exception is thrown with the error message. * @param url - The URL to fetch data from. * @returns A promise that resolves to the fetched data as a JSON object. * @throws An error if the fetch operation fails. */ private async fetchData(url: string): Promise<Record<string, unknown>> { const response = await fetch(url); const data = await response.json(); if (data.error) { throw new Error( `Failed to load search results from SearchApi due to: ${data.error}` ); } return data; } /** * Loads the search results from the SearchApi. * @returns An array of Documents representing the search results. * @throws An error if the search results could not be loaded. */ public async load(): Promise<Document[]> { const url = this.buildUrl(); const data = await this.fetchData(url); try { return this.processResponseData(data); } catch (error) { console.error(error); throw new Error( `Failed to process search results from SearchApi: ${error}` ); } } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/azure_blob_storage_container.ts
import { BlobServiceClient } from "@azure/storage-blob"; import { Document } from "@langchain/core/documents"; import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; import { AzureBlobStorageFileLoader } from "./azure_blob_storage_file.js"; import { UnstructuredLoaderOptions } from "../fs/unstructured.js"; /** * Interface representing the configuration for accessing an Azure Blob * Storage container. It includes properties for the connection string and * container name. */ interface AzureBlobStorageContainerConfig { connectionString: string; container: string; } /** * Interface representing the configuration for the * AzureBlobStorageContainerLoader. It includes properties for the * azureConfig and unstructuredConfig. The azureConfig property contains * the Azure Blob Storage container configuration, and the * unstructuredConfig property contains the options for the * UnstructuredLoader. */ interface AzureBlobStorageContainerLoaderConfig { azureConfig: AzureBlobStorageContainerConfig; unstructuredConfig?: UnstructuredLoaderOptions; } /** * Class representing a document loader that loads documents from an Azure * Blob Storage container. It extends the BaseDocumentLoader class. */ export class AzureBlobStorageContainerLoader extends BaseDocumentLoader { private readonly connectionString: string; private readonly container: string; private readonly unstructuredConfig?: UnstructuredLoaderOptions; constructor({ azureConfig, unstructuredConfig, }: AzureBlobStorageContainerLoaderConfig) { super(); this.connectionString = azureConfig.connectionString; this.container = azureConfig.container; this.unstructuredConfig = unstructuredConfig; } /** * Method to load documents from an Azure Blob Storage container. It * creates a BlobServiceClient using the connection string, gets the * container client using the container name, and iterates over the blobs * in the container. For each blob, it creates an instance of * AzureBlobStorageFileLoader and loads the documents using the loader. * The loaded documents are concatenated to the docs array and returned. * @returns An array of loaded documents. */ public async load() { const blobServiceClient = BlobServiceClient.fromConnectionString( this.connectionString, { userAgentOptions: { userAgentPrefix: "langchainjs-blob-storage-container", }, } ); const containerClient = blobServiceClient.getContainerClient( this.container ); let docs: Document[] = []; for await (const blob of containerClient.listBlobsFlat()) { const loader = new AzureBlobStorageFileLoader({ azureConfig: { connectionString: this.connectionString, container: this.container, blobName: blob.name, }, unstructuredConfig: this.unstructuredConfig, }); docs = docs.concat(await loader.load()); } return docs; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/firecrawl.ts
import FirecrawlApp from "@mendable/firecrawl-js"; import { Document, type DocumentInterface } from "@langchain/core/documents"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; /** * Interface representing the parameters for the Firecrawl loader. It * includes properties such as the URL to scrape or crawl and the API key. */ interface FirecrawlLoaderParameters { /** * URL to scrape or crawl */ url: string; /** * API key for Firecrawl. If not provided, the default value is the value of the FIRECRAWL_API_KEY environment variable. */ apiKey?: string; /** * API URL for Firecrawl. */ apiUrl?: string; /** * Mode of operation. Can be "crawl", "scrape", or "map". If not provided, the default value is "crawl". */ mode?: "crawl" | "scrape" | "map"; params?: Record<string, unknown>; } interface FirecrawlDocument { markdown?: string; html?: string; rawHtml?: string; metadata?: Record<string, unknown>; } /** * Class representing a document loader for loading data from * Firecrawl (firecrawl.dev). It extends the BaseDocumentLoader class. * @example * ```typescript * const loader = new FireCrawlLoader({ * url: "{url}", * apiKey: "{apiKey}", * mode: "crawl" * }); * const docs = await loader.load(); * ``` */ export class FireCrawlLoader extends BaseDocumentLoader { private apiKey: string; private apiUrl?: string; private url: string; private mode: "crawl" | "scrape" | "map"; private params?: Record<string, unknown>; constructor(loaderParams: FirecrawlLoaderParameters) { super(); const { apiKey = getEnvironmentVariable("FIRECRAWL_API_KEY"), apiUrl, url, mode = "crawl", params, } = loaderParams; if (!apiKey) { throw new Error( "Firecrawl API key not set. You can set it as FIRECRAWL_API_KEY in your .env file, or pass it to Firecrawl." ); } this.apiKey = apiKey; this.apiUrl = apiUrl; this.url = url; this.mode = mode; this.params = params; } /** * Loads data from Firecrawl. * @returns An array of Documents representing the retrieved data. * @throws An error if the data could not be loaded. */ public async load(): Promise<DocumentInterface[]> { const params: ConstructorParameters<typeof FirecrawlApp>[0] = { apiKey: this.apiKey, }; if (this.apiUrl !== undefined) { params.apiUrl = this.apiUrl; } const app = new FirecrawlApp(params); let firecrawlDocs: FirecrawlDocument[]; if (this.mode === "scrape") { // eslint-disable-next-line @typescript-eslint/no-explicit-any const response = await app.scrapeUrl(this.url, this.params as any); if (!response.success) { throw new Error( `Firecrawl: Failed to scrape URL. Error: ${response.error}` ); } firecrawlDocs = [response] as FirecrawlDocument[]; } else if (this.mode === "crawl") { const response = await app.crawlUrl(this.url, this.params); if (!response.success) { throw new Error( `Firecrawl: Failed to crawl URL. Error: ${response.error}` ); } firecrawlDocs = response.data as FirecrawlDocument[]; } else if (this.mode === "map") { const response = await app.mapUrl(this.url, this.params); if (!response.success) { throw new Error( `Firecrawl: Failed to map URL. Error: ${response.error}` ); } firecrawlDocs = response.links as FirecrawlDocument[]; return firecrawlDocs.map( (doc) => new Document({ pageContent: JSON.stringify(doc), }) ); } else { throw new Error( `Unrecognized mode '${this.mode}'. Expected one of 'crawl', 'scrape'.` ); } return firecrawlDocs.map( (doc) => new Document({ pageContent: doc.markdown || doc.html || doc.rawHtml || "", metadata: doc.metadata || {}, }) ); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/s3.ts
import * as fsDefault from "node:fs"; import * as path from "node:path"; import * as os from "node:os"; import { Readable } from "node:stream"; import { S3Client, GetObjectCommand, S3ClientConfig } from "@aws-sdk/client-s3"; import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; import { UnstructuredLoader as UnstructuredLoaderDefault } from "../fs/unstructured.js"; /** * Represents the configuration options for the S3 client. It extends the * S3ClientConfig interface from the "@aws-sdk/client-s3" package and * includes additional deprecated properties for access key ID and secret * access key. */ export type S3Config = S3ClientConfig & { /** @deprecated Use the credentials object instead */ accessKeyId?: string; /** @deprecated Use the credentials object instead */ secretAccessKey?: string; }; /** * Represents the parameters for the S3Loader class. It includes * properties such as the S3 bucket, key, unstructured API URL, * unstructured API key, S3 configuration, file system module, and * UnstructuredLoader module. */ export interface S3LoaderParams { bucket: string; key: string; unstructuredAPIURL: string; unstructuredAPIKey: string; s3Config?: S3Config & { /** @deprecated Use the credentials object instead */ accessKeyId?: string; /** @deprecated Use the credentials object instead */ secretAccessKey?: string; }; fs?: typeof fsDefault; UnstructuredLoader?: typeof UnstructuredLoaderDefault; } /** * A class that extends the BaseDocumentLoader class. It represents a * document loader for loading files from an S3 bucket. * @example * ```typescript * const loader = new S3Loader({ * bucket: "my-document-bucket-123", * key: "AccountingOverview.pdf", * s3Config: { * region: "us-east-1", * credentials: { * accessKeyId: "<YourAccessKeyId>", * secretAccessKey: "<YourSecretAccessKey>", * }, * }, * unstructuredAPIURL: "<YourUnstructuredAPIURL>", * unstructuredAPIKey: "<YourUnstructuredAPIKey>", * }); * const docs = await loader.load(); * ``` */ export class S3Loader extends BaseDocumentLoader { private bucket: string; private key: string; private unstructuredAPIURL: string; private unstructuredAPIKey: string; private s3Config: S3Config & { /** @deprecated Use the credentials object instead */ accessKeyId?: string; /** @deprecated Use the credentials object instead */ secretAccessKey?: string; }; private _fs: typeof fsDefault; private _UnstructuredLoader: typeof UnstructuredLoaderDefault; constructor({ bucket, key, unstructuredAPIURL, unstructuredAPIKey, s3Config = {}, fs = fsDefault, UnstructuredLoader = UnstructuredLoaderDefault, }: S3LoaderParams) { super(); this.bucket = bucket; this.key = key; this.unstructuredAPIURL = unstructuredAPIURL; this.unstructuredAPIKey = unstructuredAPIKey; this.s3Config = s3Config; this._fs = fs; this._UnstructuredLoader = UnstructuredLoader; } /** * Loads the file from the S3 bucket, saves it to a temporary directory, * and then uses the UnstructuredLoader to load the file as a document. * @returns An array of Document objects representing the loaded documents. */ public async load() { const tempDir = this._fs.mkdtempSync( path.join(os.tmpdir(), "s3fileloader-") ); const filePath = path.join(tempDir, this.key); try { const s3Client = new S3Client(this.s3Config); const getObjectCommand = new GetObjectCommand({ Bucket: this.bucket, Key: this.key, }); const response = await s3Client.send(getObjectCommand); const objectData = await new Promise<Buffer>((resolve, reject) => { const chunks: Buffer[] = []; // eslint-disable-next-line no-instanceof/no-instanceof if (response.Body instanceof Readable) { response.Body.on("data", (chunk: Buffer) => chunks.push(chunk)); response.Body.on("end", () => resolve(Buffer.concat(chunks))); response.Body.on("error", reject); } else { reject(new Error("Response body is not a readable stream.")); } }); this._fs.mkdirSync(path.dirname(filePath), { recursive: true }); this._fs.writeFileSync(filePath, objectData); // eslint-disable-next-line @typescript-eslint/no-explicit-any } catch (e: any) { throw new Error( `Failed to download file ${this.key} from S3 bucket ${this.bucket}: ${e.message}` ); } try { const options = { apiUrl: this.unstructuredAPIURL, apiKey: this.unstructuredAPIKey, }; const unstructuredLoader = new this._UnstructuredLoader( filePath, options ); const docs = await unstructuredLoader.load(); return docs; } catch { throw new Error( `Failed to load file ${filePath} using unstructured loader.` ); } } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/sitemap.ts
import { Document, DocumentInterface } from "@langchain/core/documents"; import { chunkArray } from "@langchain/core/utils/chunk_array"; import { CheerioWebBaseLoader, CheerioWebBaseLoaderParams } from "./cheerio.js"; /** * Interface representing the parameters for initializing a SitemapLoader. * @interface SitemapLoaderParams * @extends CheerioWebBaseLoaderParams */ export interface SitemapLoaderParams extends CheerioWebBaseLoaderParams { /** * @property {(string | RegExp)[] | undefined} filterUrls - A list of regexes. Only URLs that match one of the filter URLs will be loaded. * WARNING: The filter URLs are interpreted as regular expressions. Escape special characters if needed. */ filterUrls?: (string | RegExp)[]; /** * The size to chunk the sitemap URLs into for scraping. * @default {300} */ chunkSize?: number; } const DEFAULT_CHUNK_SIZE = 300; type SiteMapElement = { loc: string; changefreq?: string; lastmod?: string; priority?: string; }; export class SitemapLoader extends CheerioWebBaseLoader implements SitemapLoaderParams { allowUrlPatterns: (string | RegExp)[] | undefined; chunkSize: number; constructor(public webPath: string, params: SitemapLoaderParams = {}) { const paramsWithDefaults = { chunkSize: DEFAULT_CHUNK_SIZE, ...params }; let path = webPath.endsWith("/") ? webPath.slice(0, -1) : webPath; // Allow for custom sitemap paths to be passed in with the url. path = path.endsWith(".xml") ? path : `${path}/sitemap.xml`; super(path, paramsWithDefaults); this.webPath = path; this.allowUrlPatterns = paramsWithDefaults.filterUrls; this.chunkSize = paramsWithDefaults.chunkSize; } _checkUrlPatterns(url: string): boolean { if (!this.allowUrlPatterns) { return false; } return !this.allowUrlPatterns.some( (pattern) => !new RegExp(pattern).test(url) ); } async parseSitemap() { const $ = await CheerioWebBaseLoader._scrape( this.webPath, this.caller, this.timeout, this.textDecoder, { xmlMode: true, xml: true, } ); const elements: Array<SiteMapElement> = []; $("url").each((_, element) => { const loc = $(element).find("loc").text(); if (!loc) { return; } if (this._checkUrlPatterns(loc)) { return; } const changefreq = $(element).find("changefreq").text(); const lastmod = $(element).find("lastmod").text(); const priority = $(element).find("priority").text(); elements.push({ loc, changefreq, lastmod, priority }); }); $("sitemap").each((_, element) => { const loc = $(element).find("loc").text(); if (!loc) { return; } const changefreq = $(element).find("changefreq").text(); const lastmod = $(element).find("lastmod").text(); const priority = $(element).find("priority").text(); elements.push({ loc, changefreq, lastmod, priority }); }); return elements; } async _loadSitemapUrls( elements: Array<SiteMapElement> ): Promise<DocumentInterface[]> { const all = await CheerioWebBaseLoader.scrapeAll( elements.map((ele) => ele.loc), this.caller, this.timeout, this.textDecoder ); const documents: Array<DocumentInterface> = all.map(($, i) => { if (!elements[i]) { throw new Error("Scraped docs and elements not in sync"); } const text = $(this.selector).text(); const { loc: source, ...metadata } = elements[i]; // extract page metadata const description = $("meta[name='description']").attr("content"); const title = $("meta[property='og:title']").attr("content"); const lang = $("meta[property='og:locale']").attr("content"); return new Document({ pageContent: text, metadata: { ...metadata, description, title, lang, source: source.trim(), }, }); }); return documents; } async load(): Promise<Document[]> { const elements = await this.parseSitemap(); const chunks = chunkArray(elements, this.chunkSize); const documents: DocumentInterface[] = []; for await (const chunk of chunks) { const chunkedDocuments = await this._loadSitemapUrls(chunk); documents.push(...chunkedDocuments); } return documents; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/serpapi.ts
import { Document } from "@langchain/core/documents"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; /** * Interface representing the parameters for the SerpAPI loader. It * includes properties such as the search query and the API key. */ interface SerpAPIParameters { /** * Search Query */ q: string; apiKey?: string; } /** * Class representing a document loader for loading search results from * the SerpAPI. It extends the BaseDocumentLoader class. * @example * ```typescript * const loader = new SerpAPILoader({ q: "{query}", apiKey: "{apiKey}" }); * const docs = await loader.load(); * ``` */ export class SerpAPILoader extends BaseDocumentLoader { private apiKey: string; private searchQuery: string; constructor(params: SerpAPIParameters) { super(); const { apiKey = getEnvironmentVariable("SERPAPI_API_KEY"), q } = params; if (!apiKey) { throw new Error( "SerpAPI API key not set. You can set it as SERPAPI_API_KEY in your .env file, or pass it to SerpAPI." ); } this.apiKey = apiKey; this.searchQuery = q; } /** * Builds the URL for the SerpAPI search request. * @returns The URL for the search request. */ public buildUrl(): string { const params = new URLSearchParams(); params.append("api_key", this.apiKey); params.append("q", this.searchQuery); return `https://serpapi.com/search?${params.toString()}`; } /** * Extracts documents from the provided output. * @param output - The output to extract documents from. * @param responseType - The type of the response to extract documents from. * @returns An array of Documents. */ private extractDocuments(output: unknown, responseType: string): Document[] { const documents: Document[] = []; const results = Array.isArray(output) ? output : [output]; for (const result of results) { const pageContent = JSON.stringify(result); const metadata = { source: "SerpAPI", responseType, }; documents.push(new Document({ pageContent, metadata })); } return documents; } /** * Processes the response data from the SerpAPI search request and converts it into an array of Documents. * @param data - The response data from the SerpAPI search request. * @returns An array of Documents. */ public processResponseData(data: Record<string, unknown>): Document[] { const documents: Document[] = []; const responseTypes = [ "answer_box", "sports_results", "shopping_results", "knowledge_graph", "organic_results", ]; for (const responseType of responseTypes) { if (responseType in data) { documents.push( ...this.extractDocuments(data[responseType], responseType) ); } } return documents; } /** * Fetches the data from the provided URL and returns it as a JSON object. * If an error occurs during the fetch operation, an exception is thrown with the error message. * @param url - The URL to fetch data from. * @returns A promise that resolves to the fetched data as a JSON object. * @throws An error if the fetch operation fails. */ private async fetchData(url: string): Promise<Record<string, unknown>> { const response = await fetch(url); const data = await response.json(); if (data.error) { throw new Error( `Failed to load search results from SerpAPI due to: ${data.error}` ); } return data; } /** * Loads the search results from the SerpAPI. * @returns An array of Documents representing the search results. * @throws An error if the search results could not be loaded. */ public async load(): Promise<Document[]> { const url = this.buildUrl(); const data = await this.fetchData(url); try { return this.processResponseData(data); } catch (error) { console.error(error); throw new Error( `Failed to process search results from SerpAPI: ${error}` ); } } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/confluence.ts
import { htmlToText } from "html-to-text"; import { Document } from "@langchain/core/documents"; import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; /** * Interface representing the parameters for configuring the * ConfluencePagesLoader. */ export interface ConfluencePagesLoaderParams { baseUrl: string; spaceKey: string; username?: string; accessToken?: string; personalAccessToken?: string; limit?: number; expand?: string; maxRetries?: number; } /** * Interface representing a Confluence page. */ export interface ConfluencePage { id: string; title: string; type: string; body: { storage: { value: string; }; }; status: string; version?: { number: number; when: string; by: { displayName: string; }; }; } /** * Interface representing the response from the Confluence API. */ export interface ConfluenceAPIResponse { size: number; results: ConfluencePage[]; } /** * Class representing a document loader for loading pages from Confluence. * @example * ```typescript * const loader = new ConfluencePagesLoader({ * baseUrl: "https: * spaceKey: "~EXAMPLE362906de5d343d49dcdbae5dEXAMPLE", * username: "your-username", * accessToken: "your-access-token", * }); * const documents = await loader.load(); * console.log(documents); * ``` */ export class ConfluencePagesLoader extends BaseDocumentLoader { public readonly baseUrl: string; public readonly spaceKey: string; public readonly username?: string; public readonly accessToken?: string; public readonly limit: number; public readonly maxRetries: number; /** * expand parameter for confluence rest api * description can be found at https://developer.atlassian.com/server/confluence/expansions-in-the-rest-api/ */ public readonly expand?: string; public readonly personalAccessToken?: string; constructor({ baseUrl, spaceKey, username, accessToken, limit = 25, expand = "body.storage,version", personalAccessToken, maxRetries = 5, }: ConfluencePagesLoaderParams) { super(); this.baseUrl = baseUrl; this.spaceKey = spaceKey; this.username = username; this.accessToken = accessToken; this.limit = limit; this.expand = expand; this.personalAccessToken = personalAccessToken; this.maxRetries = maxRetries; } /** * Returns the authorization header for the request. * @returns The authorization header as a string, or undefined if no credentials were provided. */ private get authorizationHeader(): string | undefined { if (this.personalAccessToken) { return `Bearer ${this.personalAccessToken}`; } else if (this.username && this.accessToken) { const authToken = Buffer.from( `${this.username}:${this.accessToken}` ).toString("base64"); return `Basic ${authToken}`; } return undefined; } /** * Fetches all the pages in the specified space and converts each page to * a Document instance. * @param options the extra options of the load function * @param options.limit The limit parameter to overwrite the size to fetch pages. * @param options.start The start parameter to set inital offset to fetch pages. * @returns Promise resolving to an array of Document instances. */ public async load(options?: { start?: number; limit?: number; }): Promise<Document[]> { try { const pages = await this.fetchAllPagesInSpace( options?.start, options?.limit ); return pages.map((page) => this.createDocumentFromPage(page)); } catch (error) { console.error("Error:", error); return []; } } /** * Fetches data from the Confluence API using the provided URL. * @param url The URL to fetch data from. * @returns Promise resolving to the JSON response from the API. */ protected async fetchConfluenceData( url: string ): Promise<ConfluenceAPIResponse> { let retryCounter = 0; // eslint-disable-next-line no-constant-condition while (true) { retryCounter += 1; try { const initialHeaders: HeadersInit = { "Content-Type": "application/json", Accept: "application/json", }; const authHeader = this.authorizationHeader; if (authHeader) { initialHeaders.Authorization = authHeader; } const response = await fetch(url, { headers: initialHeaders, }); if (!response.ok) { throw new Error( `Failed to fetch ${url} from Confluence: ${response.status}. Retrying...` ); } return await response.json(); } catch (error) { if (retryCounter >= this.maxRetries) throw new Error( `Failed to fetch ${url} from Confluence (retry: ${retryCounter}): ${error}` ); } } } /** * Recursively fetches all the pages in the specified space. * @param start The start parameter to paginate through the results. * @returns Promise resolving to an array of ConfluencePage objects. */ private async fetchAllPagesInSpace( start = 0, limit = this.limit ): Promise<ConfluencePage[]> { const url = `${this.baseUrl}/rest/api/content?spaceKey=${this.spaceKey}&limit=${limit}&start=${start}&expand=${this.expand}`; const data = await this.fetchConfluenceData(url); if (data.size === 0) { return []; } const nextPageStart = start + data.size; const nextPageResults = await this.fetchAllPagesInSpace( nextPageStart, limit ); return data.results.concat(nextPageResults); } /** * Creates a Document instance from a ConfluencePage object. * @param page The ConfluencePage object to convert. * @returns A Document instance. */ private createDocumentFromPage(page: ConfluencePage): Document { const htmlContent = page.body.storage.value; // Handle both self-closing and regular macros for attachments and view-file const htmlWithoutOtherMacros = htmlContent.replace( /<ac:structured-macro\s+ac:name="(attachments|view-file)"[^>]*(?:\/?>|>.*?<\/ac:structured-macro>)/gs, "[ATTACHMENT]" ); // Extract and preserve code blocks with unique placeholders const codeBlocks: { language: string; code: string }[] = []; const htmlWithPlaceholders = htmlWithoutOtherMacros.replace( /<ac:structured-macro.*?<ac:parameter ac:name="language">(.*?)<\/ac:parameter>.*?<ac:plain-text-body><!\[CDATA\[([\s\S]*?)\]\]><\/ac:plain-text-body><\/ac:structured-macro>/g, (_, language, code) => { const placeholder = `CODE_BLOCK_${codeBlocks.length}`; codeBlocks.push({ language, code: code.trim() }); return `\n${placeholder}\n`; } ); // Convert the HTML content to plain text let plainTextContent = htmlToText(htmlWithPlaceholders, { wordwrap: false, preserveNewlines: true, }); // Reinsert code blocks with proper markdown formatting codeBlocks.forEach(({ language, code }, index) => { const placeholder = `CODE_BLOCK_${index}`; plainTextContent = plainTextContent.replace( placeholder, `\`\`\`${language}\n${code}\n\`\`\`` ); }); // Remove empty lines const textWithoutEmptyLines = plainTextContent.replace(/^\s*[\r\n]/gm, ""); // Rest of the method remains the same... return new Document({ pageContent: textWithoutEmptyLines, metadata: { id: page.id, status: page.status, title: page.title, type: page.type, url: `${this.baseUrl}/spaces/${this.spaceKey}/pages/${page.id}`, version: page.version?.number, updated_by: page.version?.by?.displayName, updated_at: page.version?.when, }, }); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/airtable.ts
/* eslint-disable @typescript-eslint/no-explicit-any */ import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; import { Document } from "@langchain/core/documents"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { AsyncCaller } from "@langchain/core/utils/async_caller"; export interface AirtableLoaderOptions { tableId: string; baseId: string; kwargs?: Record<string, any>; } interface AirtableRecord { id: string; fields: Record<string, any>; createdTime: string; } interface AirtableResponse { records: AirtableRecord[]; offset?: string; } export class AirtableLoader extends BaseDocumentLoader { private readonly apiToken: string; private readonly tableId: string; private readonly baseId: string; private readonly kwargs: Record<string, any>; private static readonly BASE_URL = "https://api.airtable.com/v0"; private asyncCaller: AsyncCaller; /** * Initializes the AirtableLoader with configuration options. * Retrieves the API token from environment variables and validates it. * * @param tableId - ID of the Airtable table. * @param baseId - ID of the Airtable base. * @param kwargs - Additional query parameters for Airtable requests. * @param config - Loader configuration for retry options. */ constructor({ tableId, baseId, kwargs = {} }: AirtableLoaderOptions) { super(); this.apiToken = getEnvironmentVariable("AIRTABLE_API_TOKEN") || ""; this.tableId = tableId; this.baseId = baseId; this.kwargs = kwargs; if (!this.apiToken) { throw new Error( "Missing Airtable API token. Please set AIRTABLE_API_TOKEN environment variable." ); } this.asyncCaller = new AsyncCaller({ maxRetries: 3, maxConcurrency: 5 }); } /** * Loads documents from Airtable, handling pagination and retries. * * @returns A promise that resolves to an array of Document objects. */ public async load(): Promise<Document[]> { const documents: Document[] = []; let offset: string | undefined; try { do { const url = this.constructUrl(offset); const data = await this.asyncCaller.call(() => this.fetchRecords(url)); data.records.forEach((record: AirtableRecord) => documents.push(this.createDocument(record)) ); offset = data.offset; } while (offset); } catch (error) { console.error("Error loading Airtable records:", error); throw new Error("Failed to load Airtable records"); } return documents; } /** * Asynchronous generator function for lazily loading documents from Airtable. * This method yields each document individually, enabling memory-efficient * handling of large datasets by fetching records in pages. * * @returns An asynchronous generator yielding Document objects one by one. */ public async *loadLazy(): AsyncGenerator<Document> { let offset: string | undefined; try { do { const url = this.constructUrl(offset); const data = await this.asyncCaller.call(() => this.fetchRecords(url)); for (const record of data.records) { yield this.createDocument(record); } offset = data.offset; } while (offset); } catch (error) { console.error("Error loading Airtable records lazily:", error); throw new Error("Failed to load Airtable records lazily"); } } /** * Constructs the Airtable API request URL with pagination and query parameters. * * @param offset - The pagination offset returned by the previous request. * @returns A fully constructed URL for the API request. */ private constructUrl(offset?: string): string { const url = new URL( `${AirtableLoader.BASE_URL}/${this.baseId}/${this.tableId}` ); if (offset) url.searchParams.append("offset", offset); if (this.kwargs.view) url.searchParams.append("view", this.kwargs.view); return url.toString(); } /** * Sends the API request to Airtable and handles the response. * Includes a timeout to prevent hanging on unresponsive requests. * * @param url - The Airtable API request URL. * @returns A promise that resolves to an AirtableResponse object. */ private async fetchRecords(url: string): Promise<AirtableResponse> { try { const response = await fetch(url, { headers: { Authorization: `Bearer ${this.apiToken}`, }, }); if (!response.ok) { throw new Error( `Airtable API request failed with status ${response.status}: ${response.statusText}` ); } return (await response.json()) as AirtableResponse; } catch (error) { console.error("Error during fetch:", error); throw error; } } /** * Converts an Airtable record into a Document object with metadata. * * @param record - An Airtable record to convert. * @returns A Document object with page content and metadata. */ private createDocument(record: AirtableRecord): Document { const metadata: Record<string, any> = { source: `${this.baseId}_${this.tableId}`, base_id: this.baseId, table_id: this.tableId, ...(this.kwargs.view && { view: this.kwargs.view }), }; return new Document({ pageContent: JSON.stringify(record), metadata }); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/pdf.ts
import { Document } from "@langchain/core/documents"; import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; /** * A document loader for loading data from PDFs. * @example * ```typescript * const loader = new WebPDFLoader(new Blob()); * const docs = await loader.load(); * console.log({ docs }); * ``` */ export class WebPDFLoader extends BaseDocumentLoader { protected blob: Blob; protected splitPages = true; private pdfjs: typeof PDFLoaderImports; protected parsedItemSeparator: string; constructor( blob: Blob, { splitPages = true, pdfjs = PDFLoaderImports, parsedItemSeparator = "", } = {} ) { super(); this.blob = blob; this.splitPages = splitPages ?? this.splitPages; this.pdfjs = pdfjs; this.parsedItemSeparator = parsedItemSeparator; } /** * Loads the contents of the PDF as documents. * @returns An array of Documents representing the retrieved data. */ async load(): Promise<Document[]> { const { getDocument, version } = await this.pdfjs(); const parsedPdf = await getDocument({ data: new Uint8Array(await this.blob.arrayBuffer()), useWorkerFetch: false, isEvalSupported: false, useSystemFonts: true, }).promise; const meta = await parsedPdf.getMetadata().catch(() => null); const documents: Document[] = []; for (let i = 1; i <= parsedPdf.numPages; i += 1) { const page = await parsedPdf.getPage(i); const content = await page.getTextContent(); if (content.items.length === 0) { continue; } // Eliminate excessive newlines // Source: https://github.com/albertcui/pdf-parse/blob/7086fc1cc9058545cdf41dd0646d6ae5832c7107/lib/pdf-parse.js#L16 let lastY; const textItems = []; for (const item of content.items) { if ("str" in item) { if (lastY === item.transform[5] || !lastY) { textItems.push(item.str); } else { textItems.push(`\n${item.str}`); } // eslint-disable-next-line prefer-destructuring lastY = item.transform[5]; } } const text = textItems.join(this.parsedItemSeparator); documents.push( new Document({ pageContent: text, metadata: { pdf: { version, info: meta?.info, metadata: meta?.metadata, totalPages: parsedPdf.numPages, }, loc: { pageNumber: i, }, }, }) ); } if (this.splitPages) { return documents; } if (documents.length === 0) { return []; } return [ new Document({ pageContent: documents.map((doc) => doc.pageContent).join("\n\n"), metadata: { pdf: { version, info: meta?.info, metadata: meta?.metadata, totalPages: parsedPdf.numPages, }, }, }), ]; return documents; } } async function PDFLoaderImports() { try { const { default: mod } = await import( "pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js" ); const { getDocument, version } = mod; return { getDocument, version }; } catch (e) { console.error(e); throw new Error( "Failed to load pdf-parse. Please install it with eg. `npm install pdf-parse`." ); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/couchbase.ts
/* eslint-disable import/no-extraneous-dependencies */ import { Cluster, QueryResult } from "couchbase"; import { Document } from "@langchain/core/documents"; import { BaseDocumentLoader, DocumentLoader, } from "@langchain/core/document_loaders/base"; /** * loader for couchbase document */ export class CouchbaseDocumentLoader extends BaseDocumentLoader implements DocumentLoader { private cluster: Cluster; private query: string; private pageContentFields?: string[]; private metadataFields?: string[]; /** * construct Couchbase document loader with a requirement for couchbase cluster client * @param client { Cluster } [ couchbase connected client to connect to database ] * @param query { string } [ query to get results from while loading the data ] * @param pageContentFields { Array<string> } [ filters fields of the document and shows these only ] * @param metadataFields { Array<string> } [ metadata fields required ] */ constructor( client: Cluster, query: string, pageContentFields?: string[], metadataFields?: string[] ) { super(); if (!client) { throw new Error("Couchbase client cluster must be provided."); } this.cluster = client; this.query = query; this.pageContentFields = pageContentFields; this.metadataFields = metadataFields; } /** * Function to load document based on query from couchbase * @returns {Promise<Document[]>} [ Returns a promise of all the documents as array ] */ async load(): Promise<Document[]> { const documents: Document[] = []; for await (const doc of this.lazyLoad()) { documents.push(doc); } return documents; } /** * Function to load documents based on iterator rather than full load * @returns {AsyncIterable<Document>} [ Returns an iterator to fetch documents ] */ async *lazyLoad(): AsyncIterable<Document> { // Run SQL++ Query const result: QueryResult = await this.cluster.query(this.query); for await (const row of result.rows) { let { metadataFields, pageContentFields } = this; if (!pageContentFields) { pageContentFields = Object.keys(row); } if (!metadataFields) { metadataFields = []; } const metadata = metadataFields.reduce( (obj, field) => ({ ...obj, [field]: row[field] }), {} ); const document = pageContentFields .map((k) => `${k}: ${JSON.stringify(row[k])}`) .join("\n"); yield new Document({ pageContent: document, metadata, }); } } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/gitbook.ts
import type { CheerioAPI } from "cheerio"; import { Document } from "@langchain/core/documents"; import { CheerioWebBaseLoader } from "./cheerio.js"; /** * Interface representing the parameters for configuring the * GitbookLoader. It has an optional property shouldLoadAllPaths, which * indicates whether all paths should be loaded. */ interface GitbookLoaderParams { shouldLoadAllPaths?: boolean; } /** * Class representing a document loader specifically designed for loading * documents from Gitbook. It extends the CheerioWebBaseLoader. */ export class GitbookLoader extends CheerioWebBaseLoader { shouldLoadAllPaths = false; private readonly baseUrl: string; constructor(public webPath: string, params: GitbookLoaderParams = {}) { const path = params.shouldLoadAllPaths === true ? `${webPath}/sitemap.xml` : webPath; super(path); this.baseUrl = webPath; this.webPath = path; this.shouldLoadAllPaths = params.shouldLoadAllPaths ?? this.shouldLoadAllPaths; } /** * Method that scrapes the web document using Cheerio and loads the * content based on the value of shouldLoadAllPaths. If shouldLoadAllPaths * is true, it calls the loadAllPaths() method to load all paths. * Otherwise, it calls the loadPath() method to load a single path. * @returns Promise resolving to an array of Document instances. */ public async load(): Promise<Document[]> { const $ = await this.scrape(); if (this.shouldLoadAllPaths === true) { return this.loadAllPaths($); } return this.loadPath($); } /** * Private method that loads the content of a single path from the Gitbook * web document. It extracts the page content by selecting all elements * inside the "main" element, filters out empty text nodes, and joins the * remaining text nodes with line breaks. It extracts the title by * selecting the first "h1" element inside the "main" element. It creates * a Document instance with the extracted page content and metadata * containing the source URL and title. * @param $ CheerioAPI instance representing the loaded web document. * @param url Optional string representing the URL of the web document. * @returns Array of Document instances. */ private loadPath($: CheerioAPI, url?: string): Document[] { const pageContent = $("main *") .contents() .toArray() .map((element) => element.type === "text" ? $(element).text().trim() : null ) .filter((text) => text) .join("\n"); const title = $("main h1").first().text().trim(); return [ new Document({ pageContent, metadata: { source: url ?? this.webPath, title }, }), ]; } /** * Private method that loads the content of all paths from the Gitbook web * document. It extracts the URLs of all paths from the "loc" elements in * the sitemap.xml. It iterates over each URL, scrapes the web document * using the _scrape() method, and calls the loadPath() method to load the * content of each path. It collects all the loaded documents and returns * them as an array. * @param $ CheerioAPI instance representing the loaded web document. * @returns Promise resolving to an array of Document instances. */ private async loadAllPaths($: CheerioAPI): Promise<Document[]> { const urls = $("loc") .toArray() .map((element) => $(element).text()); const documents: Document[] = []; for (const url of urls) { const buildUrl = url.includes(this.baseUrl) ? url : this.baseUrl + url; console.log(`Fetching text from ${buildUrl}`); const html = await GitbookLoader._scrape( buildUrl, this.caller, this.timeout ); documents.push(...this.loadPath(html, buildUrl)); } console.log(`Fetched ${documents.length} documents.`); return documents; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/assemblyai.ts
import { AssemblyAI, BaseServiceParams, TranscribeParams, SubtitleFormat, Transcript, TranscriptParagraph, TranscriptSentence, CreateTranscriptParameters, } from "assemblyai"; import { Document } from "@langchain/core/documents"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; import { AssemblyAIOptions } from "../../types/assemblyai-types.js"; export type * from "../../types/assemblyai-types.js"; const defaultOptions = { userAgent: { integration: { name: "LangChainJS", version: "1.0.1" }, }, }; /** * Base class for AssemblyAI loaders. */ abstract class AssemblyAILoader extends BaseDocumentLoader { protected client: AssemblyAI; /** * Create a new AssemblyAI loader. * @param assemblyAIOptions The options to configure the AssemblyAI loader. * Configure the `assemblyAIOptions.apiKey` with your AssemblyAI API key, or configure it as the `ASSEMBLYAI_API_KEY` environment variable. */ constructor(assemblyAIOptions?: AssemblyAIOptions) { super(); let options = assemblyAIOptions; if (!options) { options = {}; } if (!options.apiKey) { options.apiKey = getEnvironmentVariable("ASSEMBLYAI_API_KEY"); } if (!options.apiKey) { throw new Error("No AssemblyAI API key provided"); } this.client = new AssemblyAI({ ...defaultOptions, ...options, } as BaseServiceParams); } } abstract class CreateTranscriptLoader extends AssemblyAILoader { protected transcribeParams?: TranscribeParams | CreateTranscriptParameters; protected transcriptId?: string; /** * Transcribe audio or retrieve an existing transcript by its ID. * @param params The parameters to transcribe audio, or the ID of the transcript to retrieve. * @param assemblyAIOptions The options to configure the AssemblyAI loader. * Configure the `assemblyAIOptions.apiKey` with your AssemblyAI API key, or configure it as the `ASSEMBLYAI_API_KEY` environment variable. */ constructor( params: TranscribeParams | CreateTranscriptParameters | string, assemblyAIOptions?: AssemblyAIOptions ) { super(assemblyAIOptions); if (typeof params === "string") { this.transcriptId = params; } else { this.transcribeParams = params; } } protected async transcribeOrGetTranscript() { if (this.transcriptId) { return await this.client.transcripts.get(this.transcriptId); } if (this.transcribeParams) { let transcribeParams: TranscribeParams; if ("audio_url" in this.transcribeParams) { transcribeParams = { ...this.transcribeParams, audio: this.transcribeParams.audio_url, }; } else { transcribeParams = this.transcribeParams; } return await this.client.transcripts.transcribe(transcribeParams); } else { throw new Error("No transcript ID or transcribe parameters provided"); } } } /** * Transcribe audio and load the transcript as a document using AssemblyAI. */ export class AudioTranscriptLoader extends CreateTranscriptLoader { /** * Transcribe audio and load the transcript as a document using AssemblyAI. * @returns A promise that resolves to a single document containing the transcript text * as the page content, and the transcript object as the metadata. */ override async load(): Promise<Document<Transcript>[]> { const transcript = await this.transcribeOrGetTranscript(); return [ new Document({ pageContent: transcript.text as string, metadata: transcript, }), ]; } } /** * Transcribe audio and load the paragraphs of the transcript, creating a document for each paragraph. */ export class AudioTranscriptParagraphsLoader extends CreateTranscriptLoader { /** * Transcribe audio and load the paragraphs of the transcript, creating a document for each paragraph. * @returns A promise that resolves to an array of documents, each containing a paragraph of the transcript. */ override async load(): Promise<Document<TranscriptParagraph>[]> { const transcript = await this.transcribeOrGetTranscript(); const paragraphsResponse = await this.client.transcripts.paragraphs( transcript.id ); return paragraphsResponse.paragraphs.map( (p: TranscriptParagraph) => new Document({ pageContent: p.text, metadata: p, }) ); } } /** * Transcribe audio and load the sentences of the transcript, creating a document for each sentence. */ export class AudioTranscriptSentencesLoader extends CreateTranscriptLoader { /** * Transcribe audio and load the sentences of the transcript, creating a document for each sentence. * @returns A promise that resolves to an array of documents, each containing a sentence of the transcript. */ override async load(): Promise<Document<TranscriptSentence>[]> { const transcript = await this.transcribeOrGetTranscript(); const sentencesResponse = await this.client.transcripts.sentences( transcript.id ); return sentencesResponse.sentences.map( (p: TranscriptSentence) => new Document({ pageContent: p.text, metadata: p, }) ); } } /** * Transcribe audio and load subtitles for the transcript as `srt` or `vtt` format. */ export class AudioSubtitleLoader extends CreateTranscriptLoader { /** * Create a new AudioSubtitleLoader. * @param transcribeParams The parameters to transcribe audio. * @param subtitleFormat The format of the subtitles, either `srt` or `vtt`. * @param assemblyAIOptions The options to configure the AssemblyAI loader. * Configure the `assemblyAIOptions.apiKey` with your AssemblyAI API key, or configure it as the `ASSEMBLYAI_API_KEY` environment variable. */ constructor( transcribeParams: TranscribeParams | CreateTranscriptParameters, subtitleFormat: SubtitleFormat, assemblyAIOptions?: AssemblyAIOptions ); /** * Create a new AudioSubtitleLoader. * @param transcriptId The ID of the transcript to retrieve. * @param subtitleFormat The format of the subtitles, either `srt` or `vtt`. * @param assemblyAIOptions The options to configure the AssemblyAI loader. * Configure the `assemblyAIOptions.apiKey` with your AssemblyAI API key, or configure it as the `ASSEMBLYAI_API_KEY` environment variable. */ constructor( transcriptId: string, subtitleFormat: SubtitleFormat, assemblyAIOptions?: AssemblyAIOptions ); /** * Create a new AudioSubtitleLoader. * @param params The parameters to transcribe audio, or the ID of the transcript to retrieve. * @param subtitleFormat The format of the subtitles, either `srt` or `vtt`. * @param assemblyAIOptions The options to configure the AssemblyAI loader. * Configure the `assemblyAIOptions.apiKey` with your AssemblyAI API key, or configure it as the `ASSEMBLYAI_API_KEY` environment variable. */ constructor( params: TranscribeParams | CreateTranscriptParameters | string, private subtitleFormat: SubtitleFormat = "srt", assemblyAIOptions?: AssemblyAIOptions ) { super(params, assemblyAIOptions); this.subtitleFormat = subtitleFormat; } /** * Transcribe audio and load subtitles for the transcript as `srt` or `vtt` format. * @returns A promise that resolves a document containing the subtitles as the page content. */ override async load(): Promise<Document[]> { const transcript = await this.transcribeOrGetTranscript(); const subtitles = await this.client.transcripts.subtitles( transcript.id, this.subtitleFormat ); return [ new Document({ pageContent: subtitles, }), ]; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/playwright.ts
import type { LaunchOptions, Page, Browser, Response } from "playwright"; import { Document } from "@langchain/core/documents"; import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; import type { DocumentLoader } from "@langchain/core/document_loaders/base"; export { Page, Browser, Response }; export type PlaywrightGotoOptions = { referer?: string; timeout?: number; waitUntil?: "load" | "domcontentloaded" | "networkidle" | "commit"; }; /** * Type representing a function for evaluating JavaScript code on a web * page using Playwright. Takes a Page, Browser, and Response object as * parameters and returns a Promise that resolves to a string. */ export type PlaywrightEvaluate = ( page: Page, browser: Browser, response: Response | null ) => Promise<string>; export type PlaywrightWebBaseLoaderOptions = { launchOptions?: LaunchOptions; gotoOptions?: PlaywrightGotoOptions; evaluate?: PlaywrightEvaluate; }; /** * Class representing a document loader for scraping web pages using * Playwright. Extends the BaseDocumentLoader class and implements the * DocumentLoader interface. */ export class PlaywrightWebBaseLoader extends BaseDocumentLoader implements DocumentLoader { options: PlaywrightWebBaseLoaderOptions | undefined; constructor( public webPath: string, options?: PlaywrightWebBaseLoaderOptions ) { super(); this.options = options ?? undefined; } static async _scrape( url: string, options?: PlaywrightWebBaseLoaderOptions ): Promise<string> { const { chromium } = await PlaywrightWebBaseLoader.imports(); const browser = await chromium.launch({ headless: true, ...options?.launchOptions, }); const page = await browser.newPage(); const response = await page.goto(url, { timeout: 180000, waitUntil: "domcontentloaded", ...options?.gotoOptions, }); const bodyHTML = options?.evaluate ? await options?.evaluate(page, browser, response) : await page.content(); await browser.close(); return bodyHTML; } /** * Method that calls the _scrape method to perform the scraping of the web * page specified by the webPath property. Returns a Promise that resolves * to the scraped HTML content of the web page. * @returns Promise that resolves to the scraped HTML content of the web page. */ async scrape(): Promise<string> { return PlaywrightWebBaseLoader._scrape(this.webPath, this.options); } /** * Method that calls the scrape method and returns the scraped HTML * content as a Document object. Returns a Promise that resolves to an * array of Document objects. * @returns Promise that resolves to an array of Document objects. */ async load(): Promise<Document[]> { const text = await this.scrape(); const metadata = { source: this.webPath }; return [new Document({ pageContent: text, metadata })]; } /** * Static method that imports the necessary Playwright modules. Returns a * Promise that resolves to an object containing the imported modules. * @returns Promise that resolves to an object containing the imported modules. */ static async imports(): Promise<{ chromium: typeof import("playwright").chromium; }> { try { const { chromium } = await import("playwright"); return { chromium }; } catch (e) { console.error(e); throw new Error( "Please install playwright as a dependency with, e.g. `yarn add playwright`" ); } } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/recursive_url.ts
import { JSDOM } from "jsdom"; import { Document } from "@langchain/core/documents"; import { AsyncCaller } from "@langchain/core/utils/async_caller"; import { BaseDocumentLoader, DocumentLoader, } from "@langchain/core/document_loaders/base"; export interface RecursiveUrlLoaderOptions { excludeDirs?: string[]; extractor?: (text: string) => string; maxDepth?: number; timeout?: number; preventOutside?: boolean; callerOptions?: ConstructorParameters<typeof AsyncCaller>[0]; } export class RecursiveUrlLoader extends BaseDocumentLoader implements DocumentLoader { private caller: AsyncCaller; private url: string; private excludeDirs: string[]; private extractor: (text: string) => string; private maxDepth: number; private timeout: number; private preventOutside: boolean; constructor(url: string, options: RecursiveUrlLoaderOptions) { super(); this.caller = new AsyncCaller({ maxConcurrency: 64, maxRetries: 0, ...options.callerOptions, }); this.url = url; this.excludeDirs = options.excludeDirs ?? []; this.extractor = options.extractor ?? ((s: string) => s); this.maxDepth = options.maxDepth ?? 2; this.timeout = options.timeout ?? 10000; this.preventOutside = options.preventOutside ?? true; } private async fetchWithTimeout( resource: string, options: { timeout: number } & RequestInit ): Promise<Response> { const { timeout, ...rest } = options; return this.caller.call(() => fetch(resource, { ...rest, signal: AbortSignal.timeout(timeout) }) ); } private getChildLinks(html: string, baseUrl: string): Array<string> { const allLinks = Array.from( new JSDOM(html).window.document.querySelectorAll("a") ).map((a) => a.href); const absolutePaths = []; // eslint-disable-next-line no-script-url const invalidPrefixes = ["javascript:", "mailto:", "#"]; const invalidSuffixes = [ ".css", ".js", ".ico", ".png", ".jpg", ".jpeg", ".gif", ".svg", ]; for (const link of allLinks) { if ( invalidPrefixes.some((prefix) => link.startsWith(prefix)) || invalidSuffixes.some((suffix) => link.endsWith(suffix)) ) continue; let standardizedLink: string; if (link.startsWith("http")) { standardizedLink = link; } else if (link.startsWith("//")) { const base = new URL(baseUrl); standardizedLink = base.protocol + link; } else { standardizedLink = new URL(link, baseUrl).href; } if (this.excludeDirs.some((exDir) => standardizedLink.startsWith(exDir))) continue; if (link.startsWith("http")) { const isAllowed = !this.preventOutside || link.startsWith(baseUrl); if (isAllowed) absolutePaths.push(link); } else if (link.startsWith("//")) { const base = new URL(baseUrl); absolutePaths.push(base.protocol + link); } else { const newLink = new URL(link, baseUrl).href; absolutePaths.push(newLink); } } return Array.from(new Set(absolutePaths)); } private extractMetadata(rawHtml: string, url: string) { // eslint-disable-next-line @typescript-eslint/no-explicit-any const metadata: Record<string, any> = { source: url }; const { document } = new JSDOM(rawHtml).window; const title = document.getElementsByTagName("title")[0]; if (title) { metadata.title = title.textContent; } const description = document.querySelector("meta[name=description]"); if (description) { metadata.description = description.getAttribute("content"); } const html = document.getElementsByTagName("html")[0]; if (html) { metadata.language = html.getAttribute("lang"); } return metadata; } private async getUrlAsDoc(url: string): Promise<Document | null> { let res; try { res = await this.fetchWithTimeout(url, { timeout: this.timeout }); res = await res.text(); } catch (e) { return null; } return { pageContent: this.extractor(res), metadata: this.extractMetadata(res, url), }; } private async getChildUrlsRecursive( inputUrl: string, visited: Set<string> = new Set<string>(), depth = 0 ): Promise<Document[]> { if (depth >= this.maxDepth) return []; let url = inputUrl; if (!inputUrl.endsWith("/")) url += "/"; const isExcluded = this.excludeDirs.some((exDir) => url.startsWith(exDir)); if (isExcluded) return []; let res; try { res = await this.fetchWithTimeout(url, { timeout: this.timeout }); res = await res.text(); } catch (e) { return []; } const childUrls: string[] = this.getChildLinks(res, url); const results = await Promise.all( childUrls.map((childUrl) => (async () => { if (visited.has(childUrl)) return null; visited.add(childUrl); const childDoc = await this.getUrlAsDoc(childUrl); if (!childDoc) return null; if (childUrl.endsWith("/")) { const childUrlResponses = await this.getChildUrlsRecursive( childUrl, visited, depth + 1 ); return [childDoc, ...childUrlResponses]; } return [childDoc]; })() ) ); return results.flat().filter((docs) => docs !== null) as Document[]; } async load(): Promise<Document[]> { const rootDoc = await this.getUrlAsDoc(this.url); if (!rootDoc) return []; const docs = [rootDoc]; docs.push( ...(await this.getChildUrlsRecursive(this.url, new Set([this.url]))) ); return docs; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders
lc_public_repos/langchainjs/libs/langchain-community/src/document_loaders/web/taskade.ts
import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { Document } from "@langchain/core/documents"; import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; /** * Interface representing the parameters for configuring the TaskadeLoader. * It includes optional properties for the personal access token and project id. */ export interface TaskadeLoaderParams { personalAccessToken?: string; projectId: string; } /** * Interface representing a Taskade project. It includes properties for the * id, text, parentId and completed. */ export interface TaskadeProject { tasks: Array<{ id: string; text: string; parentId: string; completed: boolean; }>; } /** * Class representing a document loader for loading Taskade project. It * extends the BaseDocumentLoader and implements the TaskadeLoaderParams * interface. The constructor takes a config object as a parameter, which * contains the personal access token and project ID. * @example * ```typescript * const loader = new TaskadeProjectLoader({ * personalAccessToken: "TASKADE_PERSONAL_ACCESS_TOKEN", * projectId: "projectId", * }); * const docs = await loader.load(); * ``` */ export class TaskadeProjectLoader extends BaseDocumentLoader implements TaskadeLoaderParams { public readonly personalAccessToken?: string; public readonly projectId: string; private headers: Record<string, string> = {}; constructor({ personalAccessToken = getEnvironmentVariable( "TASKADE_PERSONAL_ACCESS_TOKEN" ), projectId, }: TaskadeLoaderParams) { super(); this.personalAccessToken = personalAccessToken; this.projectId = projectId; if (this.personalAccessToken) { this.headers = { Authorization: `Bearer ${this.personalAccessToken}`, }; } } /** * Fetches the Taskade project using the Taskade API and returns it as a * TaskadeProject object. * @returns A Promise that resolves to a TaskadeProject object. */ private async getTaskadeProject(): Promise<TaskadeProject> { const tasks = []; let after: string | null = null; let hasMoreTasks = true; while (hasMoreTasks) { const queryParamsString: string = new URLSearchParams({ limit: "100", ...(after == null ? {} : { after }), }).toString(); const url = `https://www.taskade.com/api/v1/projects/${this.projectId}/tasks?${queryParamsString}`; const response = await fetch(url, { headers: this.headers }); const data = await response.json(); if (!response.ok) { throw new Error( `Unable to get Taskade project: ${response.status} ${JSON.stringify( data )}` ); } if (!data) { throw new Error("Unable to get Taskade project"); } if (data.items.length === 0) { hasMoreTasks = false; } else { after = data.items[data.items.length - 1].id; } tasks.push(...data.items); } return { tasks }; } /** * Fetches the Taskade project using the Taskade API, creates a Document instance * with the JSON representation of the file as the page content and the * API URL as the metadata, and returns it. * @returns A Promise that resolves to an array of Document instances. */ public async load(): Promise<Document[]> { const data = await this.getTaskadeProject(); const metadata = { projectId: this.projectId }; const text = data.tasks.map((t) => t.text).join("\n"); return [new Document({ pageContent: text, metadata })]; } }