index
int64 0
0
| repo_id
stringclasses 596
values | file_path
stringlengths 31
168
| content
stringlengths 1
6.2M
|
|---|---|---|---|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/indexes
|
lc_public_repos/langchainjs/libs/langchain-community/src/indexes/tests/memory.test.ts
|
import { describe, expect, test, jest } from "@jest/globals";
import { InMemoryRecordManager } from "../memory.js";
describe("MemoryRecordmanagerTest", () => {
let recordManager: InMemoryRecordManager;
beforeAll(async () => {
recordManager = new InMemoryRecordManager();
await recordManager.createSchema();
});
afterEach(async () => {
// Clear records
recordManager.records.clear();
});
test("Test upsertion", async () => {
const keys = ["a", "b", "c"];
await recordManager.update(keys);
const readKeys = await recordManager.listKeys();
expect(readKeys).toEqual(expect.arrayContaining(keys));
expect(readKeys).toHaveLength(keys.length);
});
test("Test upsertion with timeAtLeast", async () => {
// Mock getTime to return 100.
const unmockedGetTime = recordManager.getTime;
recordManager.getTime = jest.fn(() => Promise.resolve(100));
const keys = ["a", "b", "c"];
await expect(
recordManager.update(keys, { timeAtLeast: 110 })
).rejects.toThrowError();
const readKeys = await recordManager.listKeys();
expect(readKeys).toHaveLength(0);
// Set getTime back to normal.
recordManager.getTime = unmockedGetTime;
});
test("Test update timestamp", async () => {
const unmockedGetTime = recordManager.getTime;
recordManager.getTime = jest.fn(() => Promise.resolve(100));
try {
const keys = ["a", "b", "c"];
await recordManager.update(keys);
const res = recordManager.records;
res.forEach((row) => expect(row.updatedAt).toEqual(100));
recordManager.getTime = jest.fn(() => Promise.resolve(200));
await recordManager.update(keys);
const res2 = recordManager.records;
res2.forEach((row) => expect(row.updatedAt).toEqual(200));
} finally {
recordManager.getTime = unmockedGetTime;
}
});
test("Test update with groupIds", async () => {
const keys = ["a", "b", "c"];
await recordManager.update(keys, {
groupIds: ["group1", "group1", "group2"],
});
const res = Array.from(recordManager.records).filter(
([_key, doc]) => doc.groupId === "group1"
);
expect(res.length).toEqual(2);
res.forEach(([_, row]) => expect(row.groupId).toEqual("group1"));
});
test("Exists", async () => {
const keys = ["a", "b", "c"];
await recordManager.update(keys);
const exists = await recordManager.exists(keys);
expect(exists).toEqual([true, true, true]);
const nonExistentKeys = ["d", "e", "f"];
const nonExists = await recordManager.exists(nonExistentKeys);
expect(nonExists).toEqual([false, false, false]);
const mixedKeys = ["a", "e", "c"];
const mixedExists = await recordManager.exists(mixedKeys);
expect(mixedExists).toEqual([true, false, true]);
});
test("Delete", async () => {
const keys = ["a", "b", "c"];
await recordManager.update(keys);
await recordManager.deleteKeys(["a", "c"]);
const readKeys = await recordManager.listKeys();
expect(readKeys).toEqual(["b"]);
});
test("List keys", async () => {
const unmockedGetTime = recordManager.getTime;
recordManager.getTime = jest.fn(() => Promise.resolve(100));
try {
const keys = ["a", "b", "c"];
await recordManager.update(keys);
const readKeys = await recordManager.listKeys();
expect(readKeys).toEqual(expect.arrayContaining(keys));
expect(readKeys).toHaveLength(keys.length);
// All keys inserted after 90: should be all keys
const readKeysAfterInsertedAfter = await recordManager.listKeys({
after: 90,
});
expect(readKeysAfterInsertedAfter).toEqual(expect.arrayContaining(keys));
// All keys inserted after 110: should be none
const readKeysAfterInsertedBefore = await recordManager.listKeys({
after: 110,
});
expect(readKeysAfterInsertedBefore).toEqual([]);
// All keys inserted before 110: should be all keys
const readKeysBeforeInsertedBefore = await recordManager.listKeys({
before: 110,
});
expect(readKeysBeforeInsertedBefore).toEqual(
expect.arrayContaining(keys)
);
// All keys inserted before 90: should be none
const readKeysBeforeInsertedAfter = await recordManager.listKeys({
before: 90,
});
expect(readKeysBeforeInsertedAfter).toEqual([]);
// Set one key to updated at 120 and one at 80
recordManager.getTime = jest.fn(() => Promise.resolve(120));
await recordManager.update(["a"]);
recordManager.getTime = jest.fn(() => Promise.resolve(80));
await recordManager.update(["b"]);
// All keys updated after 90 and before 110: should only be "c" now
const readKeysBeforeAndAfter = await recordManager.listKeys({
before: 110,
after: 90,
});
expect(readKeysBeforeAndAfter).toEqual(["c"]);
} finally {
recordManager.getTime = unmockedGetTime;
}
});
test("List keys with groupIds", async () => {
const keys = ["a", "b", "c"];
await recordManager.update(keys, {
groupIds: ["group1", "group1", "group2"],
});
const readKeys = await recordManager.listKeys({ groupIds: ["group1"] });
expect(readKeys).toEqual(["a", "b"]);
});
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/indexes
|
lc_public_repos/langchainjs/libs/langchain-community/src/indexes/tests/sqlite.int.test.ts
|
import { describe, expect, test, jest } from "@jest/globals";
import { SQLiteRecordManager } from "../sqlite.js";
describe("SQLiteRecordManager", () => {
const tableName = "upsertion_record";
let recordManager: SQLiteRecordManager;
beforeAll(async () => {
const localPath = ":memory:";
recordManager = new SQLiteRecordManager("test", {
tableName,
localPath,
});
await recordManager.createSchema();
});
afterEach(async () => {
recordManager.db.exec(`DELETE FROM "${tableName}"`);
await recordManager.createSchema();
});
afterAll(() => {
recordManager.db.close();
});
test("Test upsertion", async () => {
const keys = ["a", "b", "c"];
await recordManager.update(keys);
const readKeys = await recordManager.listKeys();
expect(readKeys).toEqual(expect.arrayContaining(keys));
expect(readKeys).toHaveLength(keys.length);
});
test("Test upsertion with timeAtLeast", async () => {
// Mock getTime to return 100.
const unmockedGetTime = recordManager.getTime;
recordManager.getTime = jest.fn(() => Promise.resolve(100));
const keys = ["a", "b", "c"];
await expect(
recordManager.update(keys, { timeAtLeast: 110 })
).rejects.toThrowError();
const readKeys = await recordManager.listKeys();
expect(readKeys).toHaveLength(0);
// Set getTime back to normal.
recordManager.getTime = unmockedGetTime;
});
interface RecordRow {
// Define the structure of the rows returned from the database query
// Adjust the properties based on your table schema
id: number;
key: string;
updated_at: number;
group_id: string;
}
test("Test update timestamp", async () => {
const unmockedGetTime = recordManager.getTime;
recordManager.getTime = jest.fn(() => Promise.resolve(100));
try {
const keys = ["a", "b", "c"];
await recordManager.update(keys);
const rows = recordManager.db
.prepare<RecordRow[]>(`SELECT * FROM "${tableName}"`)
.all() as RecordRow[];
rows.forEach((row) => expect(row.updated_at).toEqual(100));
recordManager.getTime = jest.fn(() => Promise.resolve(200));
await recordManager.update(keys);
const rows2 = (await recordManager.db
.prepare(`SELECT * FROM "${tableName}"`)
.all()) as RecordRow[];
rows2.forEach((row) => expect(row.updated_at).toEqual(200));
} finally {
recordManager.getTime = unmockedGetTime;
}
});
test("Test update with groupIds", async () => {
const keys = ["a", "b", "c"];
await recordManager.update(keys, {
groupIds: ["group1", "group1", "group2"],
});
const rows = recordManager.db
.prepare(`SELECT * FROM "${tableName}" WHERE group_id = ?`)
.all("group1") as RecordRow[];
expect(rows.length).toEqual(2);
rows.forEach((row) => expect(row.group_id).toEqual("group1"));
});
test("Exists", async () => {
const keys = ["a", "b", "c"];
await recordManager.update(keys);
const exists = await recordManager.exists(keys);
expect(exists).toEqual([true, true, true]);
const nonExistentKeys = ["d", "e", "f"];
const nonExists = await recordManager.exists(nonExistentKeys);
expect(nonExists).toEqual([false, false, false]);
const mixedKeys = ["a", "e", "c"];
const mixedExists = await recordManager.exists(mixedKeys);
expect(mixedExists).toEqual([true, false, true]);
});
test("Delete", async () => {
const keys = ["a", "b", "c"];
await recordManager.update(keys);
await recordManager.deleteKeys(["a", "c"]);
const readKeys = await recordManager.listKeys();
expect(readKeys).toEqual(["b"]);
});
test("List keys", async () => {
const unmockedGetTime = recordManager.getTime;
recordManager.getTime = jest.fn(() => Promise.resolve(100));
try {
const keys = ["a", "b", "c"];
await recordManager.update(keys);
const readKeys = await recordManager.listKeys();
expect(readKeys).toEqual(expect.arrayContaining(keys));
expect(readKeys).toHaveLength(keys.length);
// All keys inserted after 90: should be all keys
const readKeysAfterInsertedAfter = await recordManager.listKeys({
after: 90,
});
expect(readKeysAfterInsertedAfter).toEqual(expect.arrayContaining(keys));
// All keys inserted after 110: should be none
const readKeysAfterInsertedBefore = await recordManager.listKeys({
after: 110,
});
expect(readKeysAfterInsertedBefore).toEqual([]);
// All keys inserted before 110: should be all keys
const readKeysBeforeInsertedBefore = await recordManager.listKeys({
before: 110,
});
expect(readKeysBeforeInsertedBefore).toEqual(
expect.arrayContaining(keys)
);
// All keys inserted before 90: should be none
const readKeysBeforeInsertedAfter = await recordManager.listKeys({
before: 90,
});
expect(readKeysBeforeInsertedAfter).toEqual([]);
// Set one key to updated at 120 and one at 80
recordManager.getTime = jest.fn(() => Promise.resolve(120));
await recordManager.update(["a"]);
recordManager.getTime = jest.fn(() => Promise.resolve(80));
await recordManager.update(["b"]);
// All keys updated after 90 and before 110: should only be "c" now
const readKeysBeforeAndAfter = await recordManager.listKeys({
before: 110,
after: 90,
});
expect(readKeysBeforeAndAfter).toEqual(["c"]);
} finally {
recordManager.getTime = unmockedGetTime;
}
});
test("List keys with groupIds", async () => {
const keys = ["a", "b", "c"];
await recordManager.update(keys, {
groupIds: ["group1", "group1", "group2"],
});
const readKeys = await recordManager.listKeys({ groupIds: ["group1"] });
expect(readKeys).toEqual(["a", "b"]);
});
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/indexes
|
lc_public_repos/langchainjs/libs/langchain-community/src/indexes/tests/indexing.int.test.ts
|
import { Document } from "@langchain/core/documents";
import { FakeEmbeddings } from "@langchain/core/utils/testing";
import { index } from "@langchain/core/indexing";
import { BaseDocumentLoader } from "@langchain/core/document_loaders/base";
import { InMemoryRecordManager } from "../memory.js";
import { PGVectorStore } from "../../vectorstores/pgvector.js";
import { sleep } from "../../utils/time.js";
class MockLoader extends BaseDocumentLoader {
constructor(public docs: Document[]) {
super();
}
async load(): Promise<Document[]> {
return this.docs;
}
}
describe.skip("Indexing API", () => {
let recordManager: InMemoryRecordManager;
let vectorstore: PGVectorStore;
const tableName = "testlangchain";
beforeAll(async () => {
const config = {
postgresConnectionOptions: {
type: "postgres",
host: "127.0.0.1",
port: 5432,
user: "myuser",
password: "ChangeMe",
database: "api",
},
tableName,
};
recordManager = new InMemoryRecordManager();
await recordManager.createSchema();
vectorstore = await PGVectorStore.initialize(new FakeEmbeddings(), config);
});
afterEach(async () => {
recordManager.records.clear();
await vectorstore.pool.query(`DROP TABLE "${tableName}"`);
await vectorstore.ensureTableInDatabase();
// Because the indexing API relies on timestamps, without this the tests are flaky.
await sleep(1000);
});
afterAll(async () => {
await vectorstore.end();
});
test("Test indexing sanity", async () => {
const docs = [
{
pageContent: "Document 1 Content",
metadata: { source: "test" },
},
{
pageContent: "Document 2 Content",
metadata: { source: "test" },
},
{
pageContent: "Document 3 Content",
metadata: { source: "test" },
},
];
const initialIndexingResult = await index({
docsSource: docs,
recordManager,
vectorStore: vectorstore,
});
expect(initialIndexingResult.numAdded).toEqual(3);
const secondIndexingResult = await index({
docsSource: docs,
recordManager,
vectorStore: vectorstore,
});
expect(secondIndexingResult.numAdded).toEqual(0);
expect(secondIndexingResult.numSkipped).toEqual(3);
const res = await vectorstore.pool.query(`SELECT * FROM "${tableName}"`);
expect(recordManager.records.size).toEqual(3);
expect(res.rowCount).toEqual(3);
});
test("Test indexing with cleanup full", async () => {
const docs = [
{
pageContent: "Document 1 Content",
metadata: { source: "test" },
},
{
pageContent: "Document 2 Content",
metadata: { source: "test" },
},
{
pageContent: "Document 3 Content",
metadata: { source: "test" },
},
];
await index({
docsSource: docs,
recordManager,
vectorStore: vectorstore,
options: { cleanup: "full" },
});
const secondIndexingResult = await index({
docsSource: [],
recordManager,
vectorStore: vectorstore,
options: {
cleanup: "full",
},
});
expect(secondIndexingResult.numAdded).toEqual(0);
expect(secondIndexingResult.numSkipped).toEqual(0);
expect(secondIndexingResult.numDeleted).toEqual(3);
const res = await vectorstore.pool.query(`SELECT * FROM "${tableName}"`);
expect(recordManager.records.size).toEqual(0);
expect(res.rowCount).toEqual(0);
});
test("Test indexing with updated page content (full)", async () => {
const docs = [
{
pageContent: "Document 1 Content",
metadata: { source: "test" },
},
{
pageContent: "Document 2 Content",
metadata: { source: "test" },
},
{
pageContent: "Document 3 Content",
metadata: { source: "test" },
},
];
await index({
docsSource: docs,
recordManager,
vectorStore: vectorstore,
options: {
cleanup: "full",
},
});
docs[0].pageContent = "Document 1 Content Updated";
const secondIndexingResult = await index({
docsSource: docs,
recordManager,
vectorStore: vectorstore,
options: {
cleanup: "full",
},
});
expect(secondIndexingResult.numAdded).toEqual(1);
expect(secondIndexingResult.numDeleted).toEqual(1);
expect(secondIndexingResult.numSkipped).toEqual(2);
const res = await vectorstore.pool.query(`SELECT * FROM "${tableName}"`);
expect(recordManager.records.size).toEqual(3);
expect(res.rowCount).toEqual(3);
});
test("Test indexing with updated metadata (full)", async () => {
const docs: Document[] = [
{
pageContent: "Document 1 Content",
metadata: { source: "test" },
},
{
pageContent: "Document 2 Content",
metadata: { source: "test" },
},
{
pageContent: "Document 3 Content",
metadata: { source: "test" },
},
];
await index({
docsSource: docs,
recordManager,
vectorStore: vectorstore,
options: {
cleanup: "full",
},
});
docs[0].metadata.field = "value";
const secondIndexingResult = await index({
docsSource: docs,
recordManager,
vectorStore: vectorstore,
options: {
cleanup: "full",
},
});
expect(secondIndexingResult.numAdded).toEqual(1);
expect(secondIndexingResult.numDeleted).toEqual(1);
expect(secondIndexingResult.numSkipped).toEqual(2);
});
test("Test indexing with updated page content (incremental)", async () => {
const docs = [
{
pageContent: "Document 1 Content",
metadata: { source: "test" },
},
{
pageContent: "Document 2 Content",
metadata: { source: "test" },
},
{
pageContent: "Document 3 Content",
metadata: { source: "test" },
},
];
await index({
docsSource: docs,
recordManager,
vectorStore: vectorstore,
options: {
cleanup: "incremental",
sourceIdKey: "source",
},
});
docs[0].pageContent = "Document 1 Content Updated";
const secondIndexingResult = await index({
docsSource: docs,
recordManager,
vectorStore: vectorstore,
options: {
cleanup: "incremental",
sourceIdKey: "source",
},
});
expect(secondIndexingResult.numAdded).toEqual(1);
expect(secondIndexingResult.numDeleted).toEqual(1);
expect(secondIndexingResult.numSkipped).toEqual(2);
const res = await vectorstore.pool.query(`SELECT * FROM "${tableName}"`);
expect(recordManager.records.size).toEqual(3);
expect(res.rowCount).toEqual(3);
});
test("Test indexing with updated metadata (incremental)", async () => {
const docs: Document[] = [
{
pageContent: "Document 1 Content",
metadata: { source: "test" },
},
{
pageContent: "Document 2 Content",
metadata: { source: "test" },
},
{
pageContent: "Document 3 Content",
metadata: { source: "test" },
},
];
await index({
docsSource: docs,
recordManager,
vectorStore: vectorstore,
options: {
cleanup: "incremental",
sourceIdKey: "source",
},
});
docs[0].metadata.field = "value";
const secondIndexingResult = await index({
docsSource: docs,
recordManager,
vectorStore: vectorstore,
options: {
cleanup: "incremental",
sourceIdKey: "source",
},
});
expect(secondIndexingResult.numAdded).toEqual(1);
expect(secondIndexingResult.numDeleted).toEqual(1);
expect(secondIndexingResult.numSkipped).toEqual(2);
});
test("Test indexing with updated page content without cleanup", async () => {
const docs: Document[] = [
{
pageContent: "Document 1 Content",
metadata: { source: "test" },
},
{
pageContent: "Document 2 Content",
metadata: { source: "test" },
},
{
pageContent: "Document 3 Content",
metadata: { source: "test" },
},
];
await index({ docsSource: docs, recordManager, vectorStore: vectorstore });
docs[0].pageContent = "Document 1 Content Updated";
const secondIndexingResult = await index({
docsSource: docs,
recordManager,
vectorStore: vectorstore,
});
expect(secondIndexingResult.numAdded).toEqual(1);
expect(secondIndexingResult.numDeleted).toEqual(0);
expect(secondIndexingResult.numSkipped).toEqual(2);
});
test("Test indexing with forced update", async () => {
const docs: Document[] = [
{
pageContent: "Document 1 Content",
metadata: { source: "test" },
},
{
pageContent: "Document 2 Content",
metadata: { source: "test" },
},
{
pageContent: "Document 3 Content",
metadata: { source: "test" },
},
];
await index({
docsSource: docs,
recordManager,
vectorStore: vectorstore,
options: {
cleanup: "full",
},
});
// Force update is mostly useful when you are re-indexing with updated embeddings.
// Some vector stores (such as PGVectorStore) do not support overwriting records
// and will throw an error if you try to do so. We must therefore delete the records
// before re-indexing.
await vectorstore.pool.query(`DELETE FROM "${tableName}"`);
const secondIndexingResult = await index({
docsSource: docs,
recordManager,
vectorStore: vectorstore,
options: {
cleanup: "full",
forceUpdate: true,
},
});
expect(secondIndexingResult.numAdded).toEqual(0);
expect(secondIndexingResult.numDeleted).toEqual(0);
expect(secondIndexingResult.numUpdated).toEqual(3);
});
test("Test indexing with duplicate documents", async () => {
const docs: Document[] = [
{
pageContent: "Document 1 Content",
metadata: { source: "test" },
},
{
pageContent: "Document 1 Content",
metadata: { source: "test" },
},
];
const indexingResult = await index({
docsSource: docs,
recordManager,
vectorStore: vectorstore,
});
expect(indexingResult.numAdded).toEqual(1);
expect(indexingResult.numSkipped).toEqual(0);
});
test("Test indexing with doc loader", async () => {
const mockLoader = new MockLoader([
{
pageContent: "Document 1 Content",
metadata: { source: "test" },
},
{
pageContent: "Document 2 Content",
metadata: { source: "test" },
},
{
pageContent: "Document 3 Content",
metadata: { source: "test" },
},
]);
const indexingResult = await index({
docsSource: mockLoader,
recordManager,
vectorStore: vectorstore,
});
expect(indexingResult.numAdded).toEqual(3);
});
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/indexes
|
lc_public_repos/langchainjs/libs/langchain-community/src/indexes/tests/postgres.int.test.ts
|
import { describe, expect, test, jest } from "@jest/globals";
import pg, { PoolConfig } from "pg";
import {
PostgresRecordManager,
PostgresRecordManagerOptions,
} from "../postgres.js";
describe.skip("PostgresRecordManager", () => {
const tableName = "upsertion_record";
const config = {
postgresConnectionOptions: {
type: "postgres",
host: "127.0.0.1",
port: 5432,
user: "myuser",
password: "ChangeMe",
database: "api",
} as PoolConfig,
tableName,
} as PostgresRecordManagerOptions;
let recordManager: PostgresRecordManager;
beforeAll(async () => {
recordManager = new PostgresRecordManager("test", config);
await recordManager.createSchema();
});
afterEach(async () => {
// Drop table, then recreate it for the next test.
await recordManager.pool.query(`DROP TABLE "${tableName}"`);
await recordManager.createSchema();
});
afterAll(async () => {
await recordManager.end();
});
test("Test provided postgres pool instance", async () => {
const pool = new pg.Pool(config.postgresConnectionOptions);
const providedPoolRecordManager = new PostgresRecordManager("test", {
...config,
pool,
});
expect(providedPoolRecordManager.pool).toBe(pool);
});
test("Test explicit schema definition", async () => {
// configure explicit schema with record manager
config.schema = "newSchema";
const explicitSchemaRecordManager = new PostgresRecordManager(
"test",
config
);
// create new schema for test
// console.log("creating new schema in test");
await explicitSchemaRecordManager.pool.query('CREATE SCHEMA "newSchema"');
// create table in new schema
// console.log("calling createSchema function from test");
await explicitSchemaRecordManager.createSchema();
// drop created schema
await explicitSchemaRecordManager.pool.query(
`DROP SCHEMA IF EXISTS "newSchema" CASCADE`
);
// end record manager connection
await explicitSchemaRecordManager.end();
});
test("Test upsertion", async () => {
const keys = ["a", "b", "c"];
await recordManager.update(keys);
const readKeys = await recordManager.listKeys();
expect(readKeys).toEqual(expect.arrayContaining(keys));
expect(readKeys).toHaveLength(keys.length);
});
test("Test upsertion with timeAtLeast", async () => {
// Mock getTime to return 100.
const unmockedGetTime = recordManager.getTime;
recordManager.getTime = jest.fn(() => Promise.resolve(100));
const keys = ["a", "b", "c"];
await expect(
recordManager.update(keys, { timeAtLeast: 110 })
).rejects.toThrowError();
const readKeys = await recordManager.listKeys();
expect(readKeys).toHaveLength(0);
// Set getTime back to normal.
recordManager.getTime = unmockedGetTime;
});
test("Test update timestamp", async () => {
const unmockedGetTime = recordManager.getTime;
recordManager.getTime = jest.fn(() => Promise.resolve(100));
try {
const keys = ["a", "b", "c"];
await recordManager.update(keys);
const res = await recordManager.pool.query(
`SELECT * FROM "${tableName}"`
);
res.rows.forEach((row) => expect(row.updated_at).toEqual(100));
recordManager.getTime = jest.fn(() => Promise.resolve(200));
await recordManager.update(keys);
const res2 = await recordManager.pool.query(
`SELECT * FROM "${tableName}"`
);
res2.rows.forEach((row) => expect(row.updated_at).toEqual(200));
} finally {
recordManager.getTime = unmockedGetTime;
}
});
test("Test update with groupIds", async () => {
const keys = ["a", "b", "c"];
await recordManager.update(keys, {
groupIds: ["group1", "group1", "group2"],
});
const res = await recordManager.pool.query(
`SELECT * FROM "${tableName}" WHERE group_id = ANY($1)`,
[["group1"]]
);
expect(res.rowCount).toEqual(2);
res.rows.forEach((row) => expect(row.group_id).toEqual("group1"));
});
test("Exists", async () => {
const keys = ["a", "b", "c"];
await recordManager.update(keys);
const exists = await recordManager.exists(keys);
expect(exists).toEqual([true, true, true]);
const nonExistentKeys = ["d", "e", "f"];
const nonExists = await recordManager.exists(nonExistentKeys);
expect(nonExists).toEqual([false, false, false]);
const mixedKeys = ["a", "e", "c"];
const mixedExists = await recordManager.exists(mixedKeys);
expect(mixedExists).toEqual([true, false, true]);
});
test("Delete", async () => {
const keys = ["a", "b", "c"];
await recordManager.update(keys);
await recordManager.deleteKeys(["a", "c"]);
const readKeys = await recordManager.listKeys();
expect(readKeys).toEqual(["b"]);
});
test("List keys", async () => {
const unmockedGetTime = recordManager.getTime;
recordManager.getTime = jest.fn(() => Promise.resolve(100));
try {
const keys = ["a", "b", "c"];
await recordManager.update(keys);
const readKeys = await recordManager.listKeys();
expect(readKeys).toEqual(expect.arrayContaining(keys));
expect(readKeys).toHaveLength(keys.length);
// All keys inserted after 90: should be all keys
const readKeysAfterInsertedAfter = await recordManager.listKeys({
after: 90,
});
expect(readKeysAfterInsertedAfter).toEqual(expect.arrayContaining(keys));
// All keys inserted after 110: should be none
const readKeysAfterInsertedBefore = await recordManager.listKeys({
after: 110,
});
expect(readKeysAfterInsertedBefore).toEqual([]);
// All keys inserted before 110: should be all keys
const readKeysBeforeInsertedBefore = await recordManager.listKeys({
before: 110,
});
expect(readKeysBeforeInsertedBefore).toEqual(
expect.arrayContaining(keys)
);
// All keys inserted before 90: should be none
const readKeysBeforeInsertedAfter = await recordManager.listKeys({
before: 90,
});
expect(readKeysBeforeInsertedAfter).toEqual([]);
// Set one key to updated at 120 and one at 80
recordManager.getTime = jest.fn(() => Promise.resolve(120));
await recordManager.update(["a"]);
recordManager.getTime = jest.fn(() => Promise.resolve(80));
await recordManager.update(["b"]);
// All keys updated after 90 and before 110: should only be "c" now
const readKeysBeforeAndAfter = await recordManager.listKeys({
before: 110,
after: 90,
});
expect(readKeysBeforeAndAfter).toEqual(["c"]);
} finally {
recordManager.getTime = unmockedGetTime;
}
});
test("List keys with groupIds", async () => {
const keys = ["a", "b", "c"];
await recordManager.update(keys, {
groupIds: ["group1", "group1", "group2"],
});
const readKeys = await recordManager.listKeys({ groupIds: ["group1"] });
expect(readKeys).toEqual(["a", "b"]);
});
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/load/serializable.ts
|
export * from "@langchain/core/load/serializable";
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/load/import_constants.ts
|
// Auto-generated by `scripts/create-entrypoints.js`. Do not edit manually.
export const optionalImportEntrypoints: string[] = [
"langchain_community/tools/aws_lambda",
"langchain_community/tools/aws_sfn",
"langchain_community/tools/duckduckgo_search",
"langchain_community/tools/discord",
"langchain_community/tools/gmail",
"langchain_community/tools/google_calendar",
"langchain_community/agents/toolkits/aws_sfn",
"langchain_community/agents/toolkits/stagehand",
"langchain_community/embeddings/bedrock",
"langchain_community/embeddings/cloudflare_workersai",
"langchain_community/embeddings/cohere",
"langchain_community/embeddings/gradient_ai",
"langchain_community/embeddings/hf",
"langchain_community/embeddings/hf_transformers",
"langchain_community/embeddings/ibm",
"langchain_community/embeddings/jina",
"langchain_community/embeddings/llama_cpp",
"langchain_community/embeddings/premai",
"langchain_community/embeddings/tensorflow",
"langchain_community/embeddings/tencent_hunyuan",
"langchain_community/embeddings/tencent_hunyuan/web",
"langchain_community/embeddings/zhipuai",
"langchain_community/llms/arcjet",
"langchain_community/llms/bedrock",
"langchain_community/llms/bedrock/web",
"langchain_community/llms/cohere",
"langchain_community/llms/gradient_ai",
"langchain_community/llms/hf",
"langchain_community/llms/ibm",
"langchain_community/llms/llama_cpp",
"langchain_community/llms/portkey",
"langchain_community/llms/raycast",
"langchain_community/llms/replicate",
"langchain_community/llms/sagemaker_endpoint",
"langchain_community/llms/watsonx_ai",
"langchain_community/llms/writer",
"langchain_community/llms/layerup_security",
"langchain_community/vectorstores/analyticdb",
"langchain_community/vectorstores/astradb",
"langchain_community/vectorstores/azure_aisearch",
"langchain_community/vectorstores/azure_cosmosdb",
"langchain_community/vectorstores/cassandra",
"langchain_community/vectorstores/chroma",
"langchain_community/vectorstores/clickhouse",
"langchain_community/vectorstores/closevector/node",
"langchain_community/vectorstores/closevector/web",
"langchain_community/vectorstores/cloudflare_vectorize",
"langchain_community/vectorstores/convex",
"langchain_community/vectorstores/couchbase",
"langchain_community/vectorstores/elasticsearch",
"langchain_community/vectorstores/faiss",
"langchain_community/vectorstores/googlevertexai",
"langchain_community/vectorstores/hnswlib",
"langchain_community/vectorstores/hanavector",
"langchain_community/vectorstores/lancedb",
"langchain_community/vectorstores/libsql",
"langchain_community/vectorstores/milvus",
"langchain_community/vectorstores/momento_vector_index",
"langchain_community/vectorstores/mongodb_atlas",
"langchain_community/vectorstores/myscale",
"langchain_community/vectorstores/neo4j_vector",
"langchain_community/vectorstores/neon",
"langchain_community/vectorstores/opensearch",
"langchain_community/vectorstores/pgvector",
"langchain_community/vectorstores/pinecone",
"langchain_community/vectorstores/qdrant",
"langchain_community/vectorstores/redis",
"langchain_community/vectorstores/rockset",
"langchain_community/vectorstores/singlestore",
"langchain_community/vectorstores/supabase",
"langchain_community/vectorstores/tigris",
"langchain_community/vectorstores/typeorm",
"langchain_community/vectorstores/typesense",
"langchain_community/vectorstores/upstash",
"langchain_community/vectorstores/usearch",
"langchain_community/vectorstores/vercel_postgres",
"langchain_community/vectorstores/voy",
"langchain_community/vectorstores/weaviate",
"langchain_community/vectorstores/xata",
"langchain_community/vectorstores/zep",
"langchain_community/vectorstores/zep_cloud",
"langchain_community/chat_models/arcjet",
"langchain_community/chat_models/bedrock",
"langchain_community/chat_models/bedrock/web",
"langchain_community/chat_models/ibm",
"langchain_community/chat_models/iflytek_xinghuo",
"langchain_community/chat_models/iflytek_xinghuo/web",
"langchain_community/chat_models/llama_cpp",
"langchain_community/chat_models/portkey",
"langchain_community/chat_models/premai",
"langchain_community/chat_models/tencent_hunyuan",
"langchain_community/chat_models/tencent_hunyuan/web",
"langchain_community/chat_models/webllm",
"langchain_community/chat_models/zhipuai",
"langchain_community/callbacks/handlers/llmonitor",
"langchain_community/callbacks/handlers/lunary",
"langchain_community/callbacks/handlers/upstash_ratelimit",
"langchain_community/retrievers/amazon_kendra",
"langchain_community/retrievers/amazon_knowledge_base",
"langchain_community/retrievers/dria",
"langchain_community/retrievers/metal",
"langchain_community/retrievers/supabase",
"langchain_community/retrievers/vectara_summary",
"langchain_community/retrievers/zep",
"langchain_community/structured_query/chroma",
"langchain_community/structured_query/qdrant",
"langchain_community/structured_query/supabase",
"langchain_community/structured_query/vectara",
"langchain_community/retrievers/zep_cloud",
"langchain_community/graphs/neo4j_graph",
"langchain_community/graphs/memgraph_graph",
"langchain_community/document_compressors/ibm",
"langchain_community/document_transformers/html_to_text",
"langchain_community/document_transformers/mozilla_readability",
"langchain_community/storage/cassandra",
"langchain_community/storage/convex",
"langchain_community/storage/ioredis",
"langchain_community/storage/upstash_redis",
"langchain_community/storage/vercel_kv",
"langchain_community/stores/message/astradb",
"langchain_community/stores/message/cassandra",
"langchain_community/stores/message/cloudflare_d1",
"langchain_community/stores/message/convex",
"langchain_community/stores/message/dynamodb",
"langchain_community/stores/message/firestore",
"langchain_community/stores/message/ipfs_datastore",
"langchain_community/stores/message/ioredis",
"langchain_community/stores/message/momento",
"langchain_community/stores/message/mongodb",
"langchain_community/stores/message/planetscale",
"langchain_community/stores/message/postgres",
"langchain_community/stores/message/redis",
"langchain_community/stores/message/upstash_redis",
"langchain_community/stores/message/xata",
"langchain_community/stores/message/zep_cloud",
"langchain_community/memory/motorhead_memory",
"langchain_community/memory/zep",
"langchain_community/memory/zep_cloud",
"langchain_community/indexes/postgres",
"langchain_community/indexes/sqlite",
"langchain_community/document_loaders/web/apify_dataset",
"langchain_community/document_loaders/web/assemblyai",
"langchain_community/document_loaders/web/azure_blob_storage_container",
"langchain_community/document_loaders/web/azure_blob_storage_file",
"langchain_community/document_loaders/web/browserbase",
"langchain_community/document_loaders/web/cheerio",
"langchain_community/document_loaders/web/puppeteer",
"langchain_community/document_loaders/web/playwright",
"langchain_community/document_loaders/web/college_confidential",
"langchain_community/document_loaders/web/gitbook",
"langchain_community/document_loaders/web/hn",
"langchain_community/document_loaders/web/imsdb",
"langchain_community/document_loaders/web/figma",
"langchain_community/document_loaders/web/firecrawl",
"langchain_community/document_loaders/web/github",
"langchain_community/document_loaders/web/taskade",
"langchain_community/document_loaders/web/notionapi",
"langchain_community/document_loaders/web/pdf",
"langchain_community/document_loaders/web/recursive_url",
"langchain_community/document_loaders/web/s3",
"langchain_community/document_loaders/web/sitemap",
"langchain_community/document_loaders/web/sonix_audio",
"langchain_community/document_loaders/web/confluence",
"langchain_community/document_loaders/web/couchbase",
"langchain_community/document_loaders/web/spider",
"langchain_community/document_loaders/web/youtube",
"langchain_community/document_loaders/fs/chatgpt",
"langchain_community/document_loaders/fs/srt",
"langchain_community/document_loaders/fs/pdf",
"langchain_community/document_loaders/fs/docx",
"langchain_community/document_loaders/fs/epub",
"langchain_community/document_loaders/fs/csv",
"langchain_community/document_loaders/fs/notion",
"langchain_community/document_loaders/fs/obsidian",
"langchain_community/document_loaders/fs/unstructured",
"langchain_community/document_loaders/fs/openai_whisper_audio",
"langchain_community/document_loaders/fs/pptx",
"langchain_community/utils/convex",
"langchain_community/utils/cassandra",
"langchain_community/experimental/multimodal_embeddings/googlevertexai",
"langchain_community/experimental/hubs/makersuite/googlemakersuitehub",
"langchain_community/experimental/tools/pyinterpreter",
"langchain_community/chains/graph_qa/cypher",
];
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/load/map_keys.ts
|
export interface SerializedFields {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
[key: string]: any;
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/load/import_type.ts
|
// Auto-generated by `scripts/create-entrypoints.js`. Do not edit manually.
export interface OptionalImportMap {}
export interface SecretMap {
ALIBABA_API_KEY?: string;
AUTHENTICATOR?: string;
AWS_ACCESS_KEY_ID?: string;
AWS_SECRETE_ACCESS_KEY?: string;
AWS_SECRET_ACCESS_KEY?: string;
AWS_SESSION_TOKEN?: string;
AZURE_AISEARCH_ENDPOINT?: string;
AZURE_AISEARCH_KEY?: string;
AZURE_COSMOSDB_CONNECTION_STRING?: string;
BAIDU_API_KEY?: string;
BAIDU_SECRET_KEY?: string;
BEDROCK_AWS_ACCESS_KEY_ID?: string;
BEDROCK_AWS_SECRET_ACCESS_KEY?: string;
CLOUDFLARE_API_TOKEN?: string;
COHERE_API_KEY?: string;
DATABERRY_API_KEY?: string;
DRIA_API_KEY?: string;
FIREWORKS_API_KEY?: string;
FRIENDLI_TEAM?: string;
FRIENDLI_TOKEN?: string;
GOOGLE_API_KEY?: string;
GOOGLE_PALM_API_KEY?: string;
GOOGLE_PLACES_API_KEY?: string;
GOOGLE_ROUTES_API_KEY?: string;
GRADIENT_ACCESS_TOKEN?: string;
GRADIENT_WORKSPACE_ID?: string;
HUGGINGFACEHUB_API_KEY?: string;
IBM_CLOUD_API_KEY?: string;
IFLYTEK_API_KEY?: string;
IFLYTEK_API_SECRET?: string;
MILVUS_PASSWORD?: string;
MILVUS_SSL?: string;
MILVUS_USERNAME?: string;
MINIMAX_API_KEY?: string;
MINIMAX_GROUP_ID?: string;
MOONSHOT_API_KEY?: string;
NOVITA_API_KEY?: string;
PLANETSCALE_DATABASE_URL?: string;
PLANETSCALE_HOST?: string;
PLANETSCALE_PASSWORD?: string;
PLANETSCALE_USERNAME?: string;
PREM_API_KEY?: string;
QDRANT_API_KEY?: string;
QDRANT_URL?: string;
REDIS_PASSWORD?: string;
REDIS_URL?: string;
REDIS_USERNAME?: string;
REMOTE_RETRIEVER_AUTH_BEARER?: string;
REPLICATE_API_TOKEN?: string;
SEARXNG_API_BASE?: string;
TENCENT_SECRET_ID?: string;
TENCENT_SECRET_KEY?: string;
TOGETHER_AI_API_KEY?: string;
TURBOPUFFER_API_KEY?: string;
UPSTASH_REDIS_REST_TOKEN?: string;
UPSTASH_REDIS_REST_URL?: string;
VECTARA_API_KEY?: string;
VECTARA_CORPUS_ID?: string;
VECTARA_CUSTOMER_ID?: string;
WATSONX_AI_APIKEY?: string;
WATSONX_AI_AUTH_TYPE?: string;
WATSONX_AI_BEARER_TOKEN?: string;
WATSONX_AI_PASSWORD?: string;
WATSONX_AI_URL?: string;
WATSONX_AI_USERNAME?: string;
WATSONX_PROJECT_ID?: string;
WRITER_API_KEY?: string;
WRITER_ORG_ID?: string;
YC_API_KEY?: string;
YC_IAM_TOKEN?: string;
ZEP_API_KEY?: string;
ZEP_API_URL?: string;
ZHIPUAI_API_KEY?: string;
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/load/import_map.ts
|
// Auto-generated by build script. Do not edit manually.
export * as load__serializable from "../load/serializable.js";
export * as tools__aiplugin from "../tools/aiplugin.js";
export * as tools__bingserpapi from "../tools/bingserpapi.js";
export * as tools__brave_search from "../tools/brave_search.js";
export * as tools__calculator from "../tools/calculator.js";
export * as tools__connery from "../tools/connery.js";
export * as tools__dadjokeapi from "../tools/dadjokeapi.js";
export * as tools__dynamic from "../tools/dynamic.js";
export * as tools__dataforseo_api_search from "../tools/dataforseo_api_search.js";
export * as tools__google_custom_search from "../tools/google_custom_search.js";
export * as tools__google_places from "../tools/google_places.js";
export * as tools__google_routes from "../tools/google_routes.js";
export * as tools__ifttt from "../tools/ifttt.js";
export * as tools__searchapi from "../tools/searchapi.js";
export * as tools__searxng_search from "../tools/searxng_search.js";
export * as tools__serpapi from "../tools/serpapi.js";
export * as tools__serper from "../tools/serper.js";
export * as tools__stackexchange from "../tools/stackexchange.js";
export * as tools__tavily_search from "../tools/tavily_search.js";
export * as tools__wikipedia_query_run from "../tools/wikipedia_query_run.js";
export * as tools__wolframalpha from "../tools/wolframalpha.js";
export * as agents__toolkits__base from "../agents/toolkits/base.js";
export * as agents__toolkits__connery from "../agents/toolkits/connery/index.js";
export * as embeddings__alibaba_tongyi from "../embeddings/alibaba_tongyi.js";
export * as embeddings__baidu_qianfan from "../embeddings/baidu_qianfan.js";
export * as embeddings__deepinfra from "../embeddings/deepinfra.js";
export * as embeddings__fireworks from "../embeddings/fireworks.js";
export * as embeddings__minimax from "../embeddings/minimax.js";
export * as embeddings__ollama from "../embeddings/ollama.js";
export * as embeddings__togetherai from "../embeddings/togetherai.js";
export * as embeddings__voyage from "../embeddings/voyage.js";
export * as llms__ai21 from "../llms/ai21.js";
export * as llms__aleph_alpha from "../llms/aleph_alpha.js";
export * as llms__cloudflare_workersai from "../llms/cloudflare_workersai.js";
export * as llms__deepinfra from "../llms/deepinfra.js";
export * as llms__fireworks from "../llms/fireworks.js";
export * as llms__friendli from "../llms/friendli.js";
export * as llms__ollama from "../llms/ollama.js";
export * as llms__togetherai from "../llms/togetherai.js";
export * as llms__yandex from "../llms/yandex.js";
export * as vectorstores__prisma from "../vectorstores/prisma.js";
export * as vectorstores__turbopuffer from "../vectorstores/turbopuffer.js";
export * as vectorstores__vectara from "../vectorstores/vectara.js";
export * as chat_models__alibaba_tongyi from "../chat_models/alibaba_tongyi.js";
export * as chat_models__baiduwenxin from "../chat_models/baiduwenxin.js";
export * as chat_models__cloudflare_workersai from "../chat_models/cloudflare_workersai.js";
export * as chat_models__deepinfra from "../chat_models/deepinfra.js";
export * as chat_models__fireworks from "../chat_models/fireworks.js";
export * as chat_models__friendli from "../chat_models/friendli.js";
export * as chat_models__minimax from "../chat_models/minimax.js";
export * as chat_models__moonshot from "../chat_models/moonshot.js";
export * as chat_models__novita from "../chat_models/novita.js";
export * as chat_models__ollama from "../chat_models/ollama.js";
export * as chat_models__togetherai from "../chat_models/togetherai.js";
export * as chat_models__yandex from "../chat_models/yandex.js";
export * as retrievers__bm25 from "../retrievers/bm25.js";
export * as retrievers__chaindesk from "../retrievers/chaindesk.js";
export * as retrievers__databerry from "../retrievers/databerry.js";
export * as retrievers__remote from "../retrievers/remote/index.js";
export * as retrievers__tavily_search_api from "../retrievers/tavily_search_api.js";
export * as retrievers__vespa from "../retrievers/vespa.js";
export * as caches__cloudflare_kv from "../caches/cloudflare_kv.js";
export * as caches__ioredis from "../caches/ioredis.js";
export * as caches__momento from "../caches/momento.js";
export * as caches__upstash_redis from "../caches/upstash_redis.js";
export * as stores__doc__base from "../stores/doc/base.js";
export * as stores__doc__gcs from "../stores/doc/gcs.js";
export * as stores__doc__in_memory from "../stores/doc/in_memory.js";
export * as stores__message__file_system from "../stores/message/file_system.js";
export * as stores__message__in_memory from "../stores/message/in_memory.js";
export * as memory__chat_memory from "../memory/chat_memory.js";
export * as indexes__base from "../indexes/base.js";
export * as indexes__memory from "../indexes/memory.js";
export * as document_loaders__web__airtable from "../document_loaders/web/airtable.js";
export * as document_loaders__web__html from "../document_loaders/web/html.js";
export * as document_loaders__web__searchapi from "../document_loaders/web/searchapi.js";
export * as document_loaders__web__serpapi from "../document_loaders/web/serpapi.js";
export * as document_loaders__web__sort_xyz_blockchain from "../document_loaders/web/sort_xyz_blockchain.js";
export * as utils__event_source_parse from "../utils/event_source_parse.js";
export * as experimental__callbacks__handlers__datadog from "../experimental/callbacks/handlers/datadog.js";
export * as experimental__graph_transformers__llm from "../experimental/graph_transformers/llm.js";
export * as experimental__chat_models__ollama_functions from "../experimental/chat_models/ollama_functions.js";
export * as experimental__llms__chrome_ai from "../experimental/llms/chrome_ai.js";
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/load/index.ts
|
import { load as coreLoad } from "@langchain/core/load";
import { type OptionalImportMap, type SecretMap } from "./import_type.js";
import * as importMap from "./import_map.js";
import { optionalImportEntrypoints } from "./import_constants.js";
export {
optionalImportEntrypoints,
importMap,
type OptionalImportMap,
type SecretMap,
};
/**
* Load a LangChain module from a serialized text representation.
* NOTE: This functionality is currently in beta.
* Loaded classes may change independently of semver.
* @param text Serialized text representation of the module.
* @param secretsMap
* @param optionalImportsMap
* @returns A loaded instance of a LangChain module.
*/
export async function load<T>(
text: string,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
secretsMap: Record<string, any> = {},
// eslint-disable-next-line @typescript-eslint/no-explicit-any
optionalImportsMap: OptionalImportMap & Record<string, any> = {}
): Promise<T> {
return coreLoad(text, {
secretsMap,
optionalImportsMap,
optionalImportEntrypoints,
importMap,
});
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/doc/in_memory.ts
|
export * from "langchain/stores/doc/in_memory";
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/doc/gcs.ts
|
import { Storage, File } from "@google-cloud/storage";
import { Document } from "@langchain/core/documents";
import { Docstore } from "langchain/stores/doc/base";
/**
* Interface that defines the configuration for the
* GoogleCloudStorageDocstore. It includes the bucket name and an optional
* prefix.
*/
export interface GoogleCloudStorageDocstoreConfiguration {
/** The identifier for the GCS bucket */
bucket: string;
/**
* An optional prefix to prepend to each object name.
* Often used to create a pseudo-hierarchy.
*/
prefix?: string;
}
/**
* Class that provides an interface for interacting with Google Cloud
* Storage (GCS) as a document store. It extends the Docstore class and
* implements methods to search, add, and add a document to the GCS
* bucket.
*/
export class GoogleCloudStorageDocstore extends Docstore {
bucket: string;
prefix = "";
storage: Storage;
constructor(config: GoogleCloudStorageDocstoreConfiguration) {
super();
this.bucket = config.bucket;
this.prefix = config.prefix ?? this.prefix;
this.storage = new Storage();
}
/**
* Searches for a document in the GCS bucket and returns it as a Document
* instance.
* @param search The name of the document to search for in the GCS bucket
* @returns A Promise that resolves to a Document instance representing the found document
*/
async search(search: string): Promise<Document> {
const file = this.getFile(search);
const [fileMetadata] = await file.getMetadata();
const metadata = fileMetadata?.metadata;
const [dataBuffer] = await file.download();
const pageContent = dataBuffer.toString();
const ret = new Document({
pageContent,
metadata,
});
return ret;
}
/**
* Adds multiple documents to the GCS bucket.
* @param texts An object where each key is the name of a document and the value is the Document instance to be added
* @returns A Promise that resolves when all documents have been added
*/
async add(texts: Record<string, Document>): Promise<void> {
await Promise.all(
Object.keys(texts).map((key) => this.addDocument(key, texts[key]))
);
}
/**
* Adds a single document to the GCS bucket.
* @param name The name of the document to be added
* @param document The Document instance to be added
* @returns A Promise that resolves when the document has been added
*/
async addDocument(name: string, document: Document): Promise<void> {
const file = this.getFile(name);
await file.save(document.pageContent);
await file.setMetadata({ metadata: document.metadata });
}
/**
* Gets a file from the GCS bucket.
* @param name The name of the file to get from the GCS bucket
* @returns A File instance representing the fetched file
*/
private getFile(name: string): File {
const filename = this.prefix + name;
const file = this.storage.bucket(this.bucket).file(filename);
return file;
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/doc/base.ts
|
import { Document } from "@langchain/core/documents";
/**
* Abstract class for a document store. All document stores should extend
* this class.
*/
export abstract class Docstore {
abstract search(search: string): Promise<Document>;
abstract add(texts: Record<string, Document>): Promise<void>;
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/doc
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/doc/tests/gcs.int.test.ts
|
import { describe, test, expect } from "@jest/globals";
import { Document } from "@langchain/core/documents";
import { GoogleCloudStorageDocstore } from "../gcs.js";
describe.skip("GoogleCloudStorageDocstore", () => {
const bucket = "INSERT_BUCKET_HERE";
test("save", async () => {
const name = "test1";
const pageContent = "This is a test";
const document = new Document({ pageContent });
const store = new GoogleCloudStorageDocstore({
bucket,
});
await store.addDocument(name, document);
});
test("save metadata", async () => {
const name = "test2";
const pageContent = "This is a metadata test";
const metadata = {
meta1: "one",
meta2: "two",
};
const document = new Document({ pageContent, metadata });
const store = new GoogleCloudStorageDocstore({
bucket,
});
await store.addDocument(name, document);
});
test("save prefix", async () => {
const prefix = "prefix/";
const name = "test3";
const pageContent = "This is a prefix test";
const document = new Document({ pageContent });
const store = new GoogleCloudStorageDocstore({
bucket,
prefix,
});
await store.addDocument(name, document);
});
test("load", async () => {
const name = "test1";
const store = new GoogleCloudStorageDocstore({
bucket,
});
const document = await store.search(name);
// console.log(document);
expect(document.pageContent).toEqual("This is a test");
});
test("load metadata", async () => {
const name = "test2";
const store = new GoogleCloudStorageDocstore({
bucket,
});
const document = await store.search(name);
// console.log(document);
expect(document.pageContent).toEqual("This is a metadata test");
expect(document.metadata.meta1).toEqual("one");
});
test("load prefix", async () => {
const prefix = "prefix/";
const name = "test3";
const store = new GoogleCloudStorageDocstore({
bucket,
prefix,
});
const document = await store.search(name);
// console.log(document);
expect(document.pageContent).toEqual("This is a prefix test");
});
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/tests/redis_upstash.int.test.ts
|
/* eslint-disable no-promise-executor-return */
/* eslint-disable no-process-env */
/* eslint-disable @typescript-eslint/no-non-null-assertion */
import { test, expect, describe } from "@jest/globals";
import { HumanMessage, AIMessage } from "@langchain/core/messages";
import { UpstashRedisChatMessageHistory } from "../message/upstash_redis.js";
const config = {
url: process.env.UPSTASH_REDIS_REST_URL!,
token: process.env.UPSTASH_REDIS_REST_TOKEN!,
};
describe.skip("UpstashRedisChatMessageHistory", () => {
test("Test Redis Upstash history store", async () => {
const chatHistory = new UpstashRedisChatMessageHistory({
sessionId: new Date().toISOString(),
config,
});
const blankResult = await chatHistory.getMessages();
expect(blankResult).toStrictEqual([]);
await chatHistory.addUserMessage("Who is the best vocalist?");
await chatHistory.addAIChatMessage("Ozzy Osbourne");
const expectedMessages = [
new HumanMessage("Who is the best vocalist?"),
new AIMessage("Ozzy Osbourne"),
];
const resultWithHistory = await chatHistory.getMessages();
expect(resultWithHistory).toEqual(expectedMessages);
});
test("Test clear Redis Upstash history store", async () => {
const chatHistory = new UpstashRedisChatMessageHistory({
sessionId: new Date().toISOString(),
config,
});
await chatHistory.addUserMessage("Who is the best vocalist?");
await chatHistory.addAIChatMessage("Ozzy Osbourne");
const expectedMessages = [
new HumanMessage("Who is the best vocalist?"),
new AIMessage("Ozzy Osbourne"),
];
const resultWithHistory = await chatHistory.getMessages();
expect(resultWithHistory).toEqual(expectedMessages);
await chatHistory.clear();
const blankResult = await chatHistory.getMessages();
expect(blankResult).toStrictEqual([]);
});
test("Test Redis Upstash history with a TTL", async () => {
const chatHistory = new UpstashRedisChatMessageHistory({
sessionId: new Date().toISOString(),
sessionTTL: 5,
config,
});
const blankResult = await chatHistory.getMessages();
expect(blankResult).toStrictEqual([]);
await chatHistory.addUserMessage("Who is the best vocalist?");
await chatHistory.addAIChatMessage("Ozzy Osbourne");
const expectedMessages = [
new HumanMessage("Who is the best vocalist?"),
new AIMessage("Ozzy Osbourne"),
];
const resultWithHistory = await chatHistory.getMessages();
expect(resultWithHistory).toEqual(expectedMessages);
await new Promise((resolve) => setTimeout(resolve, 5000));
const expiredResult = await chatHistory.getMessages();
expect(expiredResult).toStrictEqual([]);
});
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/tests/postgres.int.test.ts
|
import pg from "pg";
import { HumanMessage, AIMessage } from "@langchain/core/messages";
import { PostgresChatMessageHistory } from "../message/postgres.js";
describe.skip("Postgres Chat History", () => {
let chatHistory: PostgresChatMessageHistory;
let pool: pg.Pool;
const tableName = "test";
const sessionId = "test-session-id";
beforeAll(async () => {
pool = new pg.Pool({
host: "127.0.0.1",
port: 5432,
user: "myuser",
password: "ChangeMe",
database: "api",
});
chatHistory = new PostgresChatMessageHistory({
tableName,
sessionId,
pool,
});
});
afterEach(async () => {
await chatHistory.clear();
});
afterAll(async () => {
await chatHistory.end();
});
test("Test postgres history store", async () => {
const blankResult = await chatHistory.getMessages();
expect(blankResult).toStrictEqual([]);
await chatHistory.addUserMessage("Who is the best vocalist?");
await chatHistory.addAIMessage("Ozzy Osbourne");
const expectedMessages = [
new HumanMessage("Who is the best vocalist?"),
new AIMessage("Ozzy Osbourne"),
];
const resultWithHistory = await chatHistory.getMessages();
expect(resultWithHistory).toEqual(expectedMessages);
});
test("Test clear postgres history store", async () => {
await chatHistory.addUserMessage("Who is the best vocalist?");
await chatHistory.addAIMessage("Ozzy Osbourne");
const expectedMessages = [
new HumanMessage("Who is the best vocalist?"),
new AIMessage("Ozzy Osbourne"),
];
const resultWithHistory = await chatHistory.getMessages();
expect(resultWithHistory).toEqual(expectedMessages);
await chatHistory.clear();
const blankResult = await chatHistory.getMessages();
expect(blankResult).toStrictEqual([]);
});
test("Returns messages in correct order", async () => {
await chatHistory.addUserMessage("Who is the best vocalist?");
await chatHistory.addAIMessage("Ozzy Osbourne");
await chatHistory.addUserMessage("What is the best song?");
await chatHistory.addAIMessage("Crazy Train");
const expectedMessages = [
new HumanMessage("Who is the best vocalist?"),
new AIMessage("Ozzy Osbourne"),
new HumanMessage("What is the best song?"),
new AIMessage("Crazy Train"),
];
const resultWithHistory = await chatHistory.getMessages();
expect(resultWithHistory).toEqual(expectedMessages);
});
test("Handles multiple sessions", async () => {
const newSessionId = "new-session-id";
const newChatHistory = new PostgresChatMessageHistory({
tableName,
sessionId: newSessionId,
pool,
});
try {
await chatHistory.addUserMessage("Who is the best vocalist?");
await chatHistory.addAIMessage("Ozzy Osbourne");
await newChatHistory.addUserMessage("What is the best song?");
await newChatHistory.addAIMessage("Crazy Train");
const expectedMessages = [
new HumanMessage("Who is the best vocalist?"),
new AIMessage("Ozzy Osbourne"),
];
const newExpectedMessages = [
new HumanMessage("What is the best song?"),
new AIMessage("Crazy Train"),
];
const resultWithHistory = await chatHistory.getMessages();
expect(resultWithHistory).toEqual(expectedMessages);
const newResultWithHistory = await newChatHistory.getMessages();
expect(newResultWithHistory).toEqual(newExpectedMessages);
await newChatHistory.clear();
const blankResult = await newChatHistory.getMessages();
expect(blankResult).toStrictEqual([]);
// Ensure that the original chat history is still intact after clearing the new chat history
const resultWithHistoryAfterClear = await chatHistory.getMessages();
expect(resultWithHistoryAfterClear).toEqual(expectedMessages);
} finally {
await newChatHistory.clear();
}
});
test("Can store & retrieve message IDs", async () => {
const blankResult = await chatHistory.getMessages();
expect(blankResult).toStrictEqual([]);
const aiMessageId = "ai-message-id";
const aiMessage = new AIMessage({
content: "Ozzy Osbourne",
id: aiMessageId,
});
await chatHistory.addMessage(aiMessage);
const expectedMessages = [aiMessage];
const resultWithHistory = await chatHistory.getMessages();
expect(resultWithHistory).toHaveLength(1);
expect(resultWithHistory).toEqual(expectedMessages);
expect(resultWithHistory[0].id).toEqual(aiMessageId);
});
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/tests/planetscale.int.test.ts
|
/* eslint-disable no-promise-executor-return */
/* eslint-disable no-process-env */
/* eslint-disable @typescript-eslint/no-non-null-assertion */
import { test, expect } from "@jest/globals";
import { HumanMessage, AIMessage } from "@langchain/core/messages";
import { PlanetScaleChatMessageHistory } from "../message/planetscale.js";
const config = {
url: process.env.PLANETSCALE_DATABASE_URL!,
};
describe("PlanetScaleChatMessageHistory", () => {
test.skip("Test Planetscale history store", async () => {
const chatHistory = new PlanetScaleChatMessageHistory({
sessionId: new Date().toISOString(),
config,
});
const blankResult = await chatHistory.getMessages();
expect(blankResult).toStrictEqual([]);
await chatHistory.addUserMessage("Who is the best vocalist?");
await chatHistory.addAIChatMessage("Ozzy Osbourne");
const expectedMessages = [
new HumanMessage("Who is the best vocalist?"),
new AIMessage("Ozzy Osbourne"),
];
const resultWithHistory = await chatHistory.getMessages();
expect(resultWithHistory).toEqual(expectedMessages);
});
test.skip("Test clear Planetscale history store", async () => {
const chatHistory = new PlanetScaleChatMessageHistory({
sessionId: new Date().toISOString(),
config,
});
await chatHistory.addUserMessage("Who is the best vocalist?");
await chatHistory.addAIChatMessage("Ozzy Osbourne");
const expectedMessages = [
new HumanMessage("Who is the best vocalist?"),
new AIMessage("Ozzy Osbourne"),
];
const resultWithHistory = await chatHistory.getMessages();
expect(resultWithHistory).toEqual(expectedMessages);
await chatHistory.clear();
const blankResult = await chatHistory.getMessages();
expect(blankResult).toStrictEqual([]);
});
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/tests/firestore.int.test.ts
|
/* eslint-disable no-process-env */
/* eslint-disable @typescript-eslint/no-non-null-assertion */
import { test, expect } from "@jest/globals";
import { HumanMessage, AIMessage } from "@langchain/core/messages";
import admin from "firebase-admin";
import { FirestoreChatMessageHistory } from "../message/firestore.js";
const sessionId = Date.now().toString();
// firebase emulators:start --only firestore --project your-project-id
// FIRESTORE_EMULATOR_HOST="localhost:8080" yarn test:single -- firestore.int.test.ts
test.skip("Test firestore message history store", async () => {
const messageHistory = new FirestoreChatMessageHistory({
collectionName: "langchain",
sessionId,
userId: "a@example.com",
config: { projectId: "your-project-id" },
});
await messageHistory.addUserMessage("My name's Jonas");
await messageHistory.addAIChatMessage("Nice to meet you, Jonas!");
await messageHistory.addUserMessage("Nice to meet you too!");
const expectedMessages = [
new HumanMessage("My name's Jonas"),
new AIMessage("Nice to meet you, Jonas!"),
new HumanMessage("Nice to meet you too!"),
];
expect(await messageHistory.getMessages()).toEqual(expectedMessages);
const messageHistory2 = new FirestoreChatMessageHistory({
collections: ["langchain"],
docs: ["langchain-doc-id"],
sessionId,
userId: "a@example.com",
config: {
projectId: "YOUR-PROJECT-ID",
credential: admin.credential.cert({
projectId: "YOUR-PROJECT-ID",
privateKey:
"-----BEGIN PRIVATE KEY-----\nnCHANGE-ME\n-----END PRIVATE KEY-----\n",
clientEmail: "CHANGE-ME@CHANGE-ME-TOO.iam.gserviceaccount.com",
}),
},
});
expect(await messageHistory2.getMessages()).toEqual(expectedMessages);
await messageHistory.clear();
expect(await messageHistory.getMessages()).toEqual([]);
});
test.skip("Test firestore works with nested collections", async () => {
const messageHistory = new FirestoreChatMessageHistory({
collections: ["chats", "bots"],
docs: ["chat-id", "bot-id"],
sessionId: "user-id",
userId: "a@example.com",
config: {
projectId: "YOUR-PROJECT-ID",
credential: admin.credential.cert({
projectId: "YOUR-PROJECT-ID",
privateKey:
"-----BEGIN PRIVATE KEY-----\nnCHANGE-ME\n-----END PRIVATE KEY-----\n",
clientEmail: "CHANGE-ME@CHANGE-ME-TOO.iam.gserviceaccount.com",
}),
},
});
const message = new HumanMessage(
`My name's Jonas and the current time is ${new Date().toLocaleTimeString()}`
);
await messageHistory.addMessage(message);
const gotMessages = await messageHistory.getMessages();
expect(gotMessages).toEqual([message]);
// clear the collection
await messageHistory.clear();
// verify that the collection is empty
const messagesAfterClear = await messageHistory.getMessages();
expect(messagesAfterClear).toEqual([]);
});
test.skip("Test firestore works with when only a list of one collection is passed.", async () => {
const messageHistory = new FirestoreChatMessageHistory({
collections: ["only-one"],
sessionId: "user-id",
userId: "a@example.com",
config: {
projectId: "YOUR-PROJECT-ID",
credential: admin.credential.cert({
projectId: "YOUR-PROJECT-ID",
privateKey:
"-----BEGIN PRIVATE KEY-----\nnCHANGE-ME\n-----END PRIVATE KEY-----\n",
clientEmail: "CHANGE-ME@CHANGE-ME-TOO.iam.gserviceaccount.com",
}),
},
});
const message = new HumanMessage(
`My name's Jonas and the current time is ${new Date().toLocaleTimeString()}`
);
await messageHistory.addMessage(message);
const gotMessages = await messageHistory.getMessages();
expect(gotMessages).toEqual([message]);
// clear the collection
await messageHistory.clear();
// verify that the collection is empty
const messagesAfterClear = await messageHistory.getMessages();
expect(messagesAfterClear).toEqual([]);
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/tests/cassandra.int.test.ts
|
// /* eslint-disable no-process-env */
// Hangs when run with other tests, uncomment for development
// import { test, expect, describe } from "@jest/globals";
// import { AIMessage, HumanMessage } from "@langchain/core/messages";
// import { CassandraClientFactory } from "../../utils/cassandra.js";
// import { CassandraChatMessageHistory } from "../message/cassandra.js";
test("Empty test to prevent runner from complaining", async () => {});
// const cassandraConfig = {
// serviceProviderArgs: {
// astra: {
// token: process.env.ASTRA_TOKEN as string,
// endpoint: process.env.ASTRA_DB_ENDPOINT as string,
// },
// },
// keyspace: "test",
// table: "test_message_history",
// };
// let client;
// // For internal testing:
// // 1. switch "describe.skip(" to "describe("
// // 2. Export OPENAI_API_KEY, ASTRA_DB_ENDPOINT, and ASTRA_TOKEN
// // 3. cd langchainjs/libs/langchain-community
// // 4. yarn test:single src/stores/tests/cassandra.int.test.ts
// // Once manual testing is complete, re-instate the ".skip"
// describe.skip("CassandraChatMessageHistory", () => {
// beforeAll(async () => {
// client = await CassandraClientFactory.getClient(cassandraConfig);
// await client.execute("DROP TABLE IF EXISTS test.test_message_history;");
// });
// test("CassandraChatMessageHistory: empty history", async () => {
// const messageHistory = new CassandraChatMessageHistory({
// ...cassandraConfig,
// sessionId: "test_session_A123",
// });
// expect(await messageHistory.getMessages()).toEqual([]);
// });
// test("CassandraChatMessageHistory: add and get messages", async () => {
// const messageHistory = new CassandraChatMessageHistory({
// ...cassandraConfig,
// sessionId: "test_session_B123",
// });
// await messageHistory.addUserMessage("I am a nice human.");
// await messageHistory.addAIChatMessage(
// "Yes you seem to be. I am a nice AI."
// );
// await messageHistory.addUserMessage("We will see about that.");
// const expectedMessages = [
// new HumanMessage("I am a nice human."),
// new AIMessage("Yes you seem to be. I am a nice AI."),
// new HumanMessage("We will see about that."),
// ];
// expect(await messageHistory.getMessages()).toEqual(expectedMessages);
// const messageHistoryDifferentSession = new CassandraChatMessageHistory({
// ...cassandraConfig,
// sessionId: "test_session_B456",
// });
// expect(await messageHistoryDifferentSession.getMessages()).toEqual([]);
// const messageHistorySameSession = new CassandraChatMessageHistory({
// ...cassandraConfig,
// sessionId: "test_session_B123",
// });
// expect(await messageHistorySameSession.getMessages()).toEqual(
// expectedMessages
// );
// });
// test("CassandraChatMessageHistory: clear messages", async () => {
// const messageHistory = new CassandraChatMessageHistory({
// ...cassandraConfig,
// sessionId: "test_session_C123",
// });
// await messageHistory.addUserMessage("I am a nice human.");
// await messageHistory.addAIChatMessage(
// "Yes you seem to be. I am a nice AI."
// );
// await messageHistory.addUserMessage("We will see about that.");
// const expectedMessages = [
// new HumanMessage("I am a nice human."),
// new AIMessage("Yes you seem to be. I am a nice AI."),
// new HumanMessage("We will see about that."),
// ];
// const messageHistoryToClear = new CassandraChatMessageHistory({
// ...cassandraConfig,
// sessionId: "test_session_C789",
// });
// await messageHistoryToClear.addUserMessage("Hello.");
// await messageHistoryToClear.addAIChatMessage("Hello. How may I help?");
// const expectedMessagesToClear = [
// new HumanMessage("Hello."),
// new AIMessage("Hello. How may I help?"),
// ];
// expect(await messageHistoryToClear.getMessages()).toEqual(
// expectedMessagesToClear
// );
// await messageHistoryToClear.clear();
// expect(await messageHistoryToClear.getMessages()).toEqual([]);
// expect(await messageHistory.getMessages()).toEqual(expectedMessages);
// });
// });
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/tests/dynamodb.int.test.ts
|
/* eslint-disable no-process-env */
/* eslint-disable @typescript-eslint/no-non-null-assertion */
import { test, expect, beforeEach, afterEach } from "@jest/globals";
import { HumanMessage, AIMessage } from "@langchain/core/messages";
import { DynamoDBChatMessageHistory } from "../message/dynamodb.js";
describe("DynamoDB message history store", () => {
let messageHistory: DynamoDBChatMessageHistory;
let message1: HumanMessage;
let message2: AIMessage;
let noKwargsMessage3: HumanMessage;
const sessionId = new Date().toISOString();
beforeEach(() => {
messageHistory = new DynamoDBChatMessageHistory({
tableName: "langchain",
sessionId,
config: {
region: process.env.AWS_REGION!,
credentials: {
accessKeyId: process.env.AWS_ACCESS_KEY_ID!,
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY!,
},
},
});
message1 = new HumanMessage({
content: "Hi I am Michael",
additional_kwargs: {
someArg: "Love is love",
createdAt: new Date().toISOString(),
},
});
message2 = new AIMessage({
content: "Nice to meet you, Michael!",
additional_kwargs: {
someArg: "Langchain is awesome",
createdAt: new Date().toISOString(),
},
});
noKwargsMessage3 = new HumanMessage({
content: "Nice to meet you too!",
});
});
afterEach(async () => {
await messageHistory.clear();
});
test("should add and retrieve messages", async () => {
await messageHistory.addMessage(message1);
await messageHistory.addMessage(message2);
await messageHistory.addMessage(noKwargsMessage3);
const expectedMessages = [message1, message2, noKwargsMessage3];
expect(await messageHistory.getMessages()).toEqual(expectedMessages);
});
test("should retrieve messages from a new instance", async () => {
await messageHistory.addMessage(message1);
await messageHistory.addMessage(message2);
await messageHistory.addMessage(noKwargsMessage3);
const messageHistory2 = new DynamoDBChatMessageHistory({
tableName: "langchain",
sessionId,
config: {
region: process.env.AWS_REGION!,
credentials: {
accessKeyId: process.env.AWS_ACCESS_KEY_ID!,
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY!,
},
},
});
const expectedMessages = [message1, message2, noKwargsMessage3];
expect(await messageHistory2.getMessages()).toEqual(expectedMessages);
});
test("should clear messages", async () => {
expect(await messageHistory.getMessages()).toEqual([]);
});
test("should add multiple messages", async () => {
const expectedMessages = [message1, message2, noKwargsMessage3];
await messageHistory.addMessages(expectedMessages);
expect(await messageHistory.getMessages()).toEqual(expectedMessages);
});
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/tests/xata.int.test.ts
|
/* eslint-disable no-process-env */
// eslint-disable-next-line import/no-extraneous-dependencies
import { BaseClient } from "@xata.io/client";
import { AIMessage, HumanMessage } from "@langchain/core/messages";
import { XataChatMessageHistory } from "../message/xata.js";
describe("XataChatMessageHistory", () => {
const randomSessionId = (): string =>
[...Array(6)]
.map(() => "abcdefghijklmnopqrstuvwxyz"[Math.floor(Math.random() * 26)])
.join("");
afterAll(async () => {
const xata = new BaseClient({
databaseURL: process.env.XATA_DB_URL,
apiKey: process.env.XATA_API_KEY,
branch: process.env.XATA_BRANCH || "main",
});
const records = await xata.db.memory.select(["id"]).getAll();
await xata.db.memory.delete(records.map((m) => m.id));
});
test.skip("Test Xata history store", async () => {
const xata = new BaseClient({
databaseURL: process.env.XATA_DB_URL,
apiKey: process.env.XATA_API_KEY,
branch: process.env.XATA_BRANCH || "main",
});
const chatHistory = new XataChatMessageHistory({
sessionId: randomSessionId(),
client: xata,
apiKey: process.env.XATA_API_KEY,
});
const blankResult = await chatHistory.getMessages();
expect(blankResult).toStrictEqual([]);
await chatHistory.addUserMessage("Who is the best vocalist?");
await chatHistory.addAIChatMessage("Ozzy Osbourne");
const expectedMessages = [
new HumanMessage("Who is the best vocalist?"),
new AIMessage("Ozzy Osbourne"),
];
const resultWithHistory = await chatHistory.getMessages();
expect(resultWithHistory).toEqual(expectedMessages);
});
test.skip("Test Xata don't create table", async () => {
const xata = new BaseClient({
databaseURL: process.env.XATA_DB_URL,
apiKey: process.env.XATA_API_KEY,
branch: process.env.XATA_BRANCH || "main",
});
const t = () => {
// eslint-disable-next-line no-new
new XataChatMessageHistory({
sessionId: randomSessionId(),
client: xata,
createTable: true,
});
};
expect(t).toThrowError(
"If createTable is set, an apiKey must be provided to XataChatMessageHistoryInput, either directly or through the config object"
);
const t1 = () => {
// eslint-disable-next-line no-new
new XataChatMessageHistory({
sessionId: randomSessionId(),
client: xata,
createTable: false,
});
};
expect(t1).not.toThrow();
});
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/tests/astradb.int.test.ts
|
/* eslint-disable no-process-env */
import { DataAPIClient, Db } from "@datastax/astra-db-ts";
import { AIMessage, HumanMessage } from "@langchain/core/messages";
import { AstraDBChatMessageHistory } from "../message/astradb.js";
let db: Db;
describe.skip("AstraDBChatMessageHistory", () => {
beforeAll(() => {
expect(process.env.ASTRA_DB_APPLICATION_TOKEN).toBeDefined();
expect(process.env.ASTRA_DB_ENDPOINT).toBeDefined();
const client = new DataAPIClient(
process.env.ASTRA_DB_APPLICATION_TOKEN as string
);
db = client.db(process.env.ASTRA_DB_ENDPOINT as string, {
namespace: process.env.ASTRA_DB_NAMESPACE,
});
});
beforeEach(async () => {
try {
await db.dropCollection("test_messages");
} catch (e) {
// console.debug("Collection doesn't exist yet, skipping drop");
}
await db.createCollection("test_messages");
});
test("Test Asta DB Chat History", async () => {
const collection = await db.collection("test_messages");
const sessionId = "langchain_test_messages_session";
const history = new AstraDBChatMessageHistory({ collection, sessionId });
await history.addUserMessage(
"What TS client allows me to connect to Astra DB?"
);
await history.addAIChatMessage("@datastax/astra-db-ts");
const expectedMessages = [
new HumanMessage("What TS client allows me to connect to Astra DB?"),
new AIMessage("@datastax/astra-db-ts"),
];
const getResults = await history.getMessages();
expect(getResults).toEqual(expectedMessages);
});
test("Test clear Asta DB Chat History", async () => {
const sessionId = "langchain_test_messages_session";
// tests creation via static method
const history = await AstraDBChatMessageHistory.initialize({
token: process.env.ASTRA_DB_APPLICATION_TOKEN ?? "token",
endpoint: process.env.ASTRA_DB_ENDPOINT ?? "endpoint",
collectionName: "test_messages",
namespace: process.env.ASTRA_DB_NAMESPACE,
sessionId,
});
await history.addUserMessage(
"What TS client allows me to connect to Astra DB?"
);
await history.addAIChatMessage("@datastax/astra-db-ts");
const expectedMessages = [
new HumanMessage("What TS client allows me to connect to Astra DB?"),
new AIMessage("@datastax/astra-db-ts"),
];
const getResults = await history.getMessages();
expect(getResults).toEqual(expectedMessages);
await history.clear();
const emptyResults = await history.getMessages();
expect(emptyResults).toStrictEqual([]);
});
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/tests/file_chat_history.int.test.ts
|
/* eslint-disable no-promise-executor-return */
import { expect } from "@jest/globals";
import { promises as fs } from "node:fs";
import { HumanMessage, AIMessage } from "@langchain/core/messages";
import { v4 as uuid } from "uuid";
import {
FILE_HISTORY_DEFAULT_FILE_PATH,
FileSystemChatMessageHistory,
} from "../message/file_system.js";
afterAll(async () => {
try {
await fs.unlink(FILE_HISTORY_DEFAULT_FILE_PATH);
} catch {
// Ignore error if the file does not exist
}
});
test("FileSystemChatMessageHistory works", async () => {
const input = {
sessionId: uuid(),
};
const chatHistory = new FileSystemChatMessageHistory(input);
const blankResult = await chatHistory.getMessages();
expect(blankResult).toStrictEqual([]);
await chatHistory.addUserMessage("Who is the best vocalist?");
await chatHistory.addAIMessage("Ozzy Osbourne");
const expectedMessages = [
new HumanMessage("Who is the best vocalist?"),
new AIMessage("Ozzy Osbourne"),
];
const resultWithHistory = await chatHistory.getMessages();
expect(resultWithHistory).toEqual(expectedMessages);
});
test("FileSystemChatMessageHistory persist sessions", async () => {
const input = {
sessionId: uuid(),
};
const chatHistory1 = new FileSystemChatMessageHistory(input);
const blankResult = await chatHistory1.getMessages();
expect(blankResult).toStrictEqual([]);
await chatHistory1.addUserMessage("Who is the best vocalist?");
await chatHistory1.addAIMessage("Ozzy Osbourne");
const chatHistory2 = new FileSystemChatMessageHistory(input);
const expectedMessages = [
new HumanMessage("Who is the best vocalist?"),
new AIMessage("Ozzy Osbourne"),
];
const resultWithHistory = await chatHistory2.getMessages();
expect(resultWithHistory).toEqual(expectedMessages);
});
test("FileSystemChatMessageHistory clear session", async () => {
const input = {
sessionId: uuid(),
userId: uuid(),
};
const chatHistory = new FileSystemChatMessageHistory(input);
await chatHistory.addUserMessage("Who is the best vocalist?");
await chatHistory.addAIMessage("Ozzy Osbourne");
const expectedMessages = [
new HumanMessage("Who is the best vocalist?"),
new AIMessage("Ozzy Osbourne"),
];
const resultWithHistory = await chatHistory.getMessages();
expect(resultWithHistory).toEqual(expectedMessages);
await chatHistory.clear();
const blankResult = await chatHistory.getMessages();
expect(blankResult).toStrictEqual([]);
});
test("FileSystemChatMessageHistory clear all sessions", async () => {
const input1 = {
sessionId: uuid(),
userId: "user1",
};
const chatHistory1 = new FileSystemChatMessageHistory(input1);
await chatHistory1.addUserMessage("Who is the best vocalist?");
await chatHistory1.addAIMessage("Ozzy Osbourne");
const input2 = {
sessionId: uuid(),
userId: "user1",
};
const chatHistory2 = new FileSystemChatMessageHistory(input2);
await chatHistory2.addUserMessage("Who is the best vocalist?");
await chatHistory2.addAIMessage("Ozzy Osbourne");
const expectedMessages = [
new HumanMessage("Who is the best vocalist?"),
new AIMessage("Ozzy Osbourne"),
];
const result1 = await chatHistory1.getMessages();
expect(result1).toEqual(expectedMessages);
const result2 = await chatHistory2.getMessages();
expect(result2).toEqual(expectedMessages);
await chatHistory1.clearAllSessions();
const deletedResult1 = await chatHistory1.getMessages();
const deletedResult2 = await chatHistory2.getMessages();
expect(deletedResult1).toStrictEqual([]);
expect(deletedResult2).toStrictEqual([]);
});
test("FileSystemChatMessageHistory set context and get all sessions", async () => {
const session1 = {
sessionId: uuid(),
userId: "user1",
};
const context1 = { title: "Best vocalist" };
const chatHistory1 = new FileSystemChatMessageHistory(session1);
await chatHistory1.setContext(context1);
await chatHistory1.addUserMessage("Who is the best vocalist?");
await chatHistory1.addAIMessage("Ozzy Osbourne");
const chatHistory2 = new FileSystemChatMessageHistory({
sessionId: uuid(),
userId: "user1",
});
const context2 = { title: "Best guitarist" };
await chatHistory2.addUserMessage("Who is the best guitarist?");
await chatHistory2.addAIMessage("Jimi Hendrix");
await chatHistory2.setContext(context2);
const sessions = await chatHistory1.getAllSessions();
expect(sessions.length).toBe(2);
expect(sessions[0].context).toEqual(context1);
expect(sessions[1].context).toEqual(context2);
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/tests/ipfs_datastore.int.test.ts
|
/* eslint-disable no-process-env */
/* eslint-disable @typescript-eslint/no-non-null-assertion */
import { test, expect } from "@jest/globals";
import { HumanMessage, AIMessage } from "@langchain/core/messages";
import { MemoryDatastore } from "datastore-core";
import { IPFSDatastoreChatMessageHistory } from "../message/ipfs_datastore.js";
describe.skip("IPFSDatastoreChatMessageHistory", () => {
const datastore = new MemoryDatastore();
test("IPFSDatastoreChatMessageHistory: empty history", async () => {
const messageHistory = new IPFSDatastoreChatMessageHistory({
datastore,
sessionId: "test_session_A123",
});
expect(await messageHistory.getMessages()).toEqual([]);
});
test("IPFSDatastoreChatMessageHistory: add and get messages", async () => {
const messageHistory = new IPFSDatastoreChatMessageHistory({
datastore,
sessionId: "test_session_B123",
});
await messageHistory.addUserMessage("I am a nice human.");
await messageHistory.addAIChatMessage(
"Yes you seem to be. I am a nice AI."
);
await messageHistory.addUserMessage("We will see about that.");
const expectedMessages = [
new HumanMessage("I am a nice human."),
new AIMessage("Yes you seem to be. I am a nice AI."),
new HumanMessage("We will see about that."),
];
expect(await messageHistory.getMessages()).toEqual(expectedMessages);
const messageHistoryDifferentSession = new IPFSDatastoreChatMessageHistory({
datastore,
sessionId: "test_session_B456",
});
expect(await messageHistoryDifferentSession.getMessages()).toEqual([]);
const messageHistorySameSession = new IPFSDatastoreChatMessageHistory({
datastore,
sessionId: "test_session_B123",
});
expect(await messageHistorySameSession.getMessages()).toEqual(
expectedMessages
);
});
test("IPFSDatastoreChatMessageHistory: clear messages", async () => {
const messageHistory = new IPFSDatastoreChatMessageHistory({
datastore,
sessionId: "test_session_C123",
});
await messageHistory.addUserMessage("I am a nice human.");
await messageHistory.addAIChatMessage(
"Yes you seem to be. I am a nice AI."
);
await messageHistory.addUserMessage("We will see about that.");
const expectedMessages = [
new HumanMessage("I am a nice human."),
new AIMessage("Yes you seem to be. I am a nice AI."),
new HumanMessage("We will see about that."),
];
const messageHistoryToClear = new IPFSDatastoreChatMessageHistory({
datastore,
sessionId: "test_session_C789",
});
await messageHistoryToClear.addUserMessage("Hello.");
await messageHistoryToClear.addAIChatMessage("Hello. How may I help?");
const expectedMessagesToClear = [
new HumanMessage("Hello."),
new AIMessage("Hello. How may I help?"),
];
expect(await messageHistoryToClear.getMessages()).toEqual(
expectedMessagesToClear
);
await messageHistoryToClear.clear();
expect(await messageHistoryToClear.getMessages()).toEqual([]);
expect(await messageHistory.getMessages()).toEqual(expectedMessages);
});
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/message/firestore.ts
|
import type { AppOptions } from "firebase-admin";
import { getApps, initializeApp } from "firebase-admin/app";
import {
getFirestore,
DocumentData,
Firestore,
DocumentReference,
FieldValue,
} from "firebase-admin/firestore";
import { BaseListChatMessageHistory } from "@langchain/core/chat_history";
import {
BaseMessage,
StoredMessage,
mapChatMessagesToStoredMessages,
mapStoredMessagesToChatMessages,
} from "@langchain/core/messages";
/**
* Interface for FirestoreDBChatMessageHistory. It includes the collection
* name, session ID, user ID, and optionally, the app index and
* configuration for the Firebase app.
*/
export interface FirestoreDBChatMessageHistory {
/**
* An array of collection names, should match the length of `docs` field.
* @TODO make required variable in 0.2
*/
collections?: string[];
/**
* An array of doc names, should match the length of `collections` field,
* or undefined if the collections field has a length of 1. In this case,
* it will default to use `sessionId` as the doc name.
* @TODO make required variable in 0.2
*/
docs?: string[];
/**
* @deprecated Will be removed in 0.2 use `collections` field instead.
*/
collectionName?: string;
sessionId: string;
userId: string;
appIdx?: number;
config?: AppOptions;
}
/**
* Class for managing chat message history using Google's Firestore as a
* storage backend. Extends the BaseListChatMessageHistory class.
* @example
* ```typescript
* const chatHistory = new FirestoreChatMessageHistory({
* collectionName: "langchain",
* sessionId: "lc-example",
* userId: "a@example.com",
* config: { projectId: "your-project-id" },
* });
*
* const chain = new ConversationChain({
* llm: new ChatOpenAI(),
* memory: new BufferMemory({ chatHistory }),
* });
*
* const response = await chain.invoke({
* input: "What did I just say my name was?",
* });
* console.log({ response });
* ```
*/
export class FirestoreChatMessageHistory extends BaseListChatMessageHistory {
lc_namespace = ["langchain", "stores", "message", "firestore"];
private collections: string[];
private docs: string[];
private sessionId: string;
private userId: string;
private appIdx: number;
private config: AppOptions;
private firestoreClient: Firestore;
private document: DocumentReference<DocumentData> | null;
constructor({
collectionName,
collections,
docs,
sessionId,
userId,
appIdx = 0,
config,
}: FirestoreDBChatMessageHistory) {
super();
if (collectionName && collections) {
throw new Error(
"Can not pass in collectionName and collections. Please use collections only."
);
}
if (!collectionName && !collections) {
throw new Error(
"Must pass in a list of collections. Fields `collectionName` and `collections` are both undefined."
);
}
if (collections || docs) {
// This checks that the 'collections' and 'docs' arrays have the same length,
// which means each collection has a corresponding document name. The only exception allowed is
// when there is exactly one collection provided and 'docs' is not defined. In this case, it is
// assumed that the 'sessionId' will be used as the document name for that single collection.
if (
!(
collections?.length === docs?.length ||
(collections?.length === 1 && !docs)
)
) {
throw new Error(
"Collections and docs options must have the same length, or collections must have a length of 1 if docs is not defined."
);
}
}
this.collections = collections || ([collectionName] as string[]);
this.docs = docs || ([sessionId] as string[]);
this.sessionId = sessionId;
this.userId = userId;
this.document = null;
this.appIdx = appIdx;
if (config) this.config = config;
try {
this.ensureFirestore();
} catch (error) {
throw new Error(`Unknown response type`);
}
}
private ensureFirestore(): void {
let app;
// Check if the app is already initialized else get appIdx
if (!getApps().length) app = initializeApp(this.config);
else app = getApps()[this.appIdx];
this.firestoreClient = getFirestore(app);
this.document = this.collections.reduce<DocumentReference<DocumentData>>(
(acc, collection, index) =>
acc.collection(collection).doc(this.docs[index]),
this.firestoreClient as unknown as DocumentReference<DocumentData>
);
}
/**
* Method to retrieve all messages from the Firestore collection
* associated with the current session. Returns an array of BaseMessage
* objects.
* @returns Array of stored messages
*/
async getMessages(): Promise<BaseMessage[]> {
if (!this.document) {
throw new Error("Document not initialized");
}
const querySnapshot = await this.document
.collection("messages")
.orderBy("createdAt", "asc")
.get()
.catch((err) => {
throw new Error(`Unknown response type: ${err.toString()}`);
});
const response: StoredMessage[] = [];
querySnapshot.forEach((doc) => {
const { type, data } = doc.data();
response.push({ type, data });
});
return mapStoredMessagesToChatMessages(response);
}
/**
* Method to add a new message to the Firestore collection. The message is
* passed as a BaseMessage object.
* @param message The message to be added as a BaseMessage object.
*/
public async addMessage(message: BaseMessage) {
const messages = mapChatMessagesToStoredMessages([message]);
await this.upsertMessage(messages[0]);
}
private async upsertMessage(message: StoredMessage): Promise<void> {
if (!this.document) {
throw new Error("Document not initialized");
}
await this.document.set(
{
id: this.sessionId,
user_id: this.userId,
},
{ merge: true }
);
await this.document
.collection("messages")
.add({
type: message.type,
data: message.data,
createdBy: this.userId,
createdAt: FieldValue.serverTimestamp(),
})
.catch((err) => {
throw new Error(`Unknown response type: ${err.toString()}`);
});
}
/**
* Method to delete all messages from the Firestore collection associated
* with the current session.
*/
public async clear(): Promise<void> {
if (!this.document) {
throw new Error("Document not initialized");
}
await this.document
.collection("messages")
.get()
.then((querySnapshot) => {
querySnapshot.docs.forEach((snapshot) => {
snapshot.ref.delete().catch((err) => {
throw new Error(`Unknown response type: ${err.toString()}`);
});
});
})
.catch((err) => {
throw new Error(`Unknown response type: ${err.toString()}`);
});
await this.document.delete().catch((err) => {
throw new Error(`Unknown response type: ${err.toString()}`);
});
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/message/convex.ts
|
/* eslint-disable @typescript-eslint/no-explicit-any */
// eslint-disable-next-line import/no-extraneous-dependencies
import {
DocumentByInfo,
DocumentByName,
FieldPaths,
FunctionReference,
GenericActionCtx,
GenericDataModel,
NamedTableInfo,
TableNamesInDataModel,
IndexNames,
makeFunctionReference,
} from "convex/server";
import { BaseListChatMessageHistory } from "@langchain/core/chat_history";
import {
BaseMessage,
mapChatMessagesToStoredMessages,
mapStoredMessagesToChatMessages,
} from "@langchain/core/messages";
/**
* Type that defines the config required to initialize the
* ConvexChatMessageHistory class. At minimum it needs a sessionId
* and an ActionCtx.
*/
export type ConvexChatMessageHistoryInput<
DataModel extends GenericDataModel,
TableName extends TableNamesInDataModel<DataModel> = "messages",
IndexName extends IndexNames<
NamedTableInfo<DataModel, TableName>
> = "bySessionId",
SessionIdFieldName extends FieldPaths<
NamedTableInfo<DataModel, TableName>
> = "sessionId",
MessageTextFieldName extends FieldPaths<
NamedTableInfo<DataModel, TableName>
> = "message",
InsertMutation extends FunctionReference<
"mutation",
"internal",
{ table: string; document: object }
> = any,
LookupQuery extends FunctionReference<
"query",
"internal",
{ table: string; index: string; keyField: string; key: string },
object[]
> = any,
DeleteManyMutation extends FunctionReference<
"mutation",
"internal",
{ table: string; index: string; keyField: string; key: string }
> = any
> = {
readonly ctx: GenericActionCtx<DataModel>;
readonly sessionId: DocumentByName<DataModel, TableName>[SessionIdFieldName];
/**
* Defaults to "messages"
*/
readonly table?: TableName;
/**
* Defaults to "bySessionId"
*/
readonly index?: IndexName;
/**
* Defaults to "sessionId"
*/
readonly sessionIdField?: SessionIdFieldName;
/**
* Defaults to "message"
*/
readonly messageTextFieldName?: MessageTextFieldName;
/**
* Defaults to `internal.langchain.db.insert`
*/
readonly insert?: InsertMutation;
/**
* Defaults to `internal.langchain.db.lookup`
*/
readonly lookup?: LookupQuery;
/**
* Defaults to `internal.langchain.db.deleteMany`
*/
readonly deleteMany?: DeleteManyMutation;
};
export class ConvexChatMessageHistory<
DataModel extends GenericDataModel,
SessionIdFieldName extends FieldPaths<
NamedTableInfo<DataModel, TableName>
> = "sessionId",
TableName extends TableNamesInDataModel<DataModel> = "messages",
IndexName extends IndexNames<
NamedTableInfo<DataModel, TableName>
> = "bySessionId",
MessageTextFieldName extends FieldPaths<
NamedTableInfo<DataModel, TableName>
> = "message",
InsertMutation extends FunctionReference<
"mutation",
"internal",
{ table: string; document: object }
> = any,
LookupQuery extends FunctionReference<
"query",
"internal",
{ table: string; index: string; keyField: string; key: string },
object[]
> = any,
DeleteManyMutation extends FunctionReference<
"mutation",
"internal",
{ table: string; index: string; keyField: string; key: string }
> = any
> extends BaseListChatMessageHistory {
lc_namespace = ["langchain", "stores", "message", "convex"];
private readonly ctx: GenericActionCtx<DataModel>;
private readonly sessionId: DocumentByInfo<
NamedTableInfo<DataModel, TableName>
>[SessionIdFieldName];
private readonly table: TableName;
private readonly index: IndexName;
private readonly sessionIdField: SessionIdFieldName;
private readonly messageTextFieldName: MessageTextFieldName;
private readonly insert: InsertMutation;
private readonly lookup: LookupQuery;
private readonly deleteMany: DeleteManyMutation;
constructor(
config: ConvexChatMessageHistoryInput<
DataModel,
TableName,
IndexName,
SessionIdFieldName,
MessageTextFieldName,
InsertMutation,
LookupQuery,
DeleteManyMutation
>
) {
super();
this.ctx = config.ctx;
this.sessionId = config.sessionId;
this.table = config.table ?? ("messages" as TableName);
this.index = config.index ?? ("bySessionId" as IndexName);
this.sessionIdField =
config.sessionIdField ?? ("sessionId" as SessionIdFieldName);
this.messageTextFieldName =
config.messageTextFieldName ?? ("message" as MessageTextFieldName);
this.insert =
config.insert ?? (makeFunctionReference("langchain/db:insert") as any);
this.lookup =
config.lookup ?? (makeFunctionReference("langchain/db:lookup") as any);
this.deleteMany =
config.deleteMany ??
(makeFunctionReference("langchain/db:deleteMany") as any);
}
async getMessages(): Promise<BaseMessage[]> {
const convexDocuments: any[] = await this.ctx.runQuery(this.lookup, {
table: this.table,
index: this.index,
keyField: this.sessionIdField,
key: this.sessionId,
} as any);
return mapStoredMessagesToChatMessages(
convexDocuments.map((doc) => doc[this.messageTextFieldName])
);
}
async addMessage(message: BaseMessage): Promise<void> {
const messages = mapChatMessagesToStoredMessages([message]);
// TODO: Remove chunking when Convex handles the concurrent requests correctly
const PAGE_SIZE = 16;
for (let i = 0; i < messages.length; i += PAGE_SIZE) {
await Promise.all(
messages.slice(i, i + PAGE_SIZE).map((message) =>
this.ctx.runMutation(this.insert, {
table: this.table,
document: {
[this.sessionIdField]: this.sessionId,
[this.messageTextFieldName]: message,
},
} as any)
)
);
}
}
async clear(): Promise<void> {
await this.ctx.runMutation(this.deleteMany, {
table: this.table,
index: this.index,
keyField: this.sessionIdField,
key: this.sessionId,
} as any);
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/message/postgres.ts
|
import { BaseListChatMessageHistory } from "@langchain/core/chat_history";
import {
BaseMessage,
StoredMessage,
mapChatMessagesToStoredMessages,
mapStoredMessagesToChatMessages,
} from "@langchain/core/messages";
import pg from "pg";
/**
* Type definition for the input parameters required when instantiating a
* PostgresChatMessageHistory object.
*/
export type PostgresChatMessageHistoryInput = {
/**
* Name of the table to use when storing and retrieving chat message
*/
tableName?: string;
/**
* Session ID to use when storing and retrieving chat message history.
*/
sessionId: string;
/**
* Configuration object for the Postgres pool. If provided the
* PostgresChatMessageHistory object will create a new pool using
* the provided configuration. Otherwise it will use the provided
* pool.
*/
poolConfig?: pg.PoolConfig;
/**
* Postgres pool to use. If provided the PostgresChatMessageHistory
* object will use the provided pool. Otherwise it will create a
* new pool using the provided configuration.
*/
pool?: pg.Pool;
/**
* If true, the table name will be escaped. ('lAnGcHaIn' will be escaped to '"lAnGcHaIn"')
*/
escapeTableName?: boolean;
};
export interface StoredPostgresMessageData {
name: string | undefined;
role: string | undefined;
content: string;
additional_kwargs?: Record<string, unknown>;
type: string;
tool_call_id: string | undefined;
}
/**
* Class for managing chat message history using a Postgres Database as a
* storage backend. Extends the BaseListChatMessageHistory class.
* @example
* ```typescript
* const chatHistory = new PostgresChatMessageHistory({
* tableName: "langchain_chat_histories",
* sessionId: "lc-example",
* pool: new pg.Pool({
* host: "127.0.0.1",
* port: 5432,
* user: "myuser",
* password: "ChangeMe",
* database: "api",
* }),
* });
* ```
*/
export class PostgresChatMessageHistory extends BaseListChatMessageHistory {
lc_namespace = ["langchain", "stores", "message", "postgres"];
pool: pg.Pool;
tableName = "langchain_chat_histories";
sessionId: string;
private initialized = false;
/**
* Creates a new PostgresChatMessageHistory.
* @param {PostgresChatMessageHistoryInput} fields The input fields for the PostgresChatMessageHistory.
* @param {string} fields.tableName The name of the table name to use. Defaults to `langchain_chat_histories`.
* @param {string} fields.sessionId The session ID to use when storing and retrieving chat message history.
* @param {pg.Pool} fields.pool The Postgres pool to use. If provided, the PostgresChatMessageHistory will use the provided pool.
* @param {pg.PoolConfig} fields.poolConfig The configuration object for the Postgres pool. If no pool is provided, the conig will be used to create a new pool.
* If `pool` is provided, it will be used as the Postgres pool even if `poolConfig` is also provided.
* @throws If neither `pool` nor `poolConfig` is provided.
*/
constructor(fields: PostgresChatMessageHistoryInput) {
super(fields);
const { tableName, sessionId, pool, poolConfig, escapeTableName } = fields;
// Ensure that either a client or config is provided
if (!pool && !poolConfig) {
throw new Error(
"PostgresChatMessageHistory requires either a pool instance or pool config"
);
}
this.pool = pool ?? new pg.Pool(poolConfig);
const _tableName = tableName || this.tableName;
this.tableName = escapeTableName
? pg.escapeIdentifier(_tableName)
: _tableName;
this.sessionId = sessionId;
}
/**
* Checks if the table has been created and creates it if it hasn't.
* @returns Promise that resolves when the table's existence is ensured.
*/
private async ensureTable(): Promise<void> {
if (this.initialized) return;
const query = `
CREATE TABLE IF NOT EXISTS ${this.tableName} (
id SERIAL PRIMARY KEY,
session_id VARCHAR(255) NOT NULL,
message JSONB NOT NULL
);`;
try {
await this.pool.query(query);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (e: any) {
// This error indicates that the table already exists
// Due to asynchronous nature of the code, it is possible that
// the table is created between the time we check if it exists
// and the time we try to create it. It can be safely ignored.
// If it's not this error, rethrow it.
if (!("code" in e) || e.code !== "23505") {
throw e;
}
}
this.initialized = true;
}
async addMessage(message: BaseMessage): Promise<void> {
await this.ensureTable();
const { data, type } = mapChatMessagesToStoredMessages([message])[0];
const query = `INSERT INTO ${this.tableName} (session_id, message) VALUES ($1, $2)`;
await this.pool.query(query, [this.sessionId, { ...data, type }]);
}
async getMessages(): Promise<BaseMessage[]> {
await this.ensureTable();
const query = `SELECT message FROM ${this.tableName} WHERE session_id = $1 ORDER BY id`;
const res = await this.pool.query(query, [this.sessionId]);
const storedMessages: StoredMessage[] = res.rows.map(
(row: { message: StoredPostgresMessageData }) => {
const { type, ...data } = row.message;
return { type, data };
}
);
return mapStoredMessagesToChatMessages(storedMessages);
}
async clear(): Promise<void> {
await this.ensureTable();
const query = `DELETE FROM ${this.tableName} WHERE session_id = $1`;
await this.pool.query(query, [this.sessionId]);
}
/**
* End the Postgres pool.
*/
async end(): Promise<void> {
await this.pool.end();
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/message/cloudflare_d1.ts
|
import { v4 } from "uuid";
import type { D1Database } from "@cloudflare/workers-types";
import { BaseListChatMessageHistory } from "@langchain/core/chat_history";
import {
BaseMessage,
StoredMessage,
StoredMessageData,
mapChatMessagesToStoredMessages,
mapStoredMessagesToChatMessages,
} from "@langchain/core/messages";
/**
* @deprecated Install and import from "@langchain/cloudflare" instead.
*
* Type definition for the input parameters required when instantiating a
* CloudflareD1MessageHistory object.
*/
export type CloudflareD1MessageHistoryInput = {
tableName?: string;
sessionId: string;
database?: D1Database;
};
/**
* @deprecated Install and import from "@langchain/cloudflare" instead.
*
* Interface for the data transfer object used when selecting stored
* messages from the Cloudflare D1 database.
*/
interface selectStoredMessagesDTO {
id: string;
session_id: string;
type: string;
content: string;
role: string | null;
name: string | null;
additional_kwargs: string;
}
/**
* @deprecated Install and import from "@langchain/cloudflare" instead.
*
* Class for storing and retrieving chat message history from a
* Cloudflare D1 database. Extends the BaseListChatMessageHistory class.
* @example
* ```typescript
* const memory = new BufferMemory({
* returnMessages: true,
* chatHistory: new CloudflareD1MessageHistory({
* tableName: "stored_message",
* sessionId: "example",
* database: env.DB,
* }),
* });
*
* const chainInput = { input };
*
* const res = await memory.chatHistory.invoke(chainInput);
* await memory.saveContext(chainInput, {
* output: res,
* });
* ```
*/
export class CloudflareD1MessageHistory extends BaseListChatMessageHistory {
lc_namespace = ["langchain", "stores", "message", "cloudflare_d1"];
public database: D1Database;
private tableName: string;
private sessionId: string;
private tableInitialized: boolean;
constructor(fields: CloudflareD1MessageHistoryInput) {
super(fields);
const { sessionId, database, tableName } = fields;
if (database) {
this.database = database;
} else {
throw new Error(
"Either a client or config must be provided to CloudflareD1MessageHistory"
);
}
this.tableName = tableName || "langchain_chat_histories";
this.tableInitialized = false;
this.sessionId = sessionId;
}
/**
* Private method to ensure that the necessary table exists in the
* Cloudflare D1 database before performing any operations. If the table
* does not exist, it is created.
* @returns Promise that resolves to void.
*/
private async ensureTable(): Promise<void> {
if (this.tableInitialized) {
return;
}
const query = `CREATE TABLE IF NOT EXISTS ${this.tableName} (id TEXT PRIMARY KEY, session_id TEXT, type TEXT, content TEXT, role TEXT, name TEXT, additional_kwargs TEXT);`;
await this.database.prepare(query).bind().all();
const idIndexQuery = `CREATE INDEX IF NOT EXISTS id_index ON ${this.tableName} (id);`;
await this.database.prepare(idIndexQuery).bind().all();
const sessionIdIndexQuery = `CREATE INDEX IF NOT EXISTS session_id_index ON ${this.tableName} (session_id);`;
await this.database.prepare(sessionIdIndexQuery).bind().all();
this.tableInitialized = true;
}
/**
* Method to retrieve all messages from the Cloudflare D1 database for the
* current session.
* @returns Promise that resolves to an array of BaseMessage objects.
*/
async getMessages(): Promise<BaseMessage[]> {
await this.ensureTable();
const query = `SELECT * FROM ${this.tableName} WHERE session_id = ?`;
const rawStoredMessages = await this.database
.prepare(query)
.bind(this.sessionId)
.all();
const storedMessagesObject =
rawStoredMessages.results as unknown as selectStoredMessagesDTO[];
const orderedMessages: StoredMessage[] = storedMessagesObject.map(
(message) => {
const data = {
content: message.content,
additional_kwargs: JSON.parse(message.additional_kwargs),
} as StoredMessageData;
if (message.role) {
data.role = message.role;
}
if (message.name) {
data.name = message.name;
}
return {
type: message.type,
data,
};
}
);
return mapStoredMessagesToChatMessages(orderedMessages);
}
/**
* Method to add a new message to the Cloudflare D1 database for the current
* session.
* @param message The BaseMessage object to be added to the database.
* @returns Promise that resolves to void.
*/
async addMessage(message: BaseMessage): Promise<void> {
await this.ensureTable();
const messageToAdd = mapChatMessagesToStoredMessages([message]);
const query = `INSERT INTO ${this.tableName} (id, session_id, type, content, role, name, additional_kwargs) VALUES(?, ?, ?, ?, ?, ?, ?)`;
const id = v4();
await this.database
.prepare(query)
.bind(
id,
this.sessionId,
messageToAdd[0].type || null,
messageToAdd[0].data.content || null,
messageToAdd[0].data.role || null,
messageToAdd[0].data.name || null,
JSON.stringify(messageToAdd[0].data.additional_kwargs)
)
.all();
}
/**
* Method to delete all messages from the Cloudflare D1 database for the
* current session.
* @returns Promise that resolves to void.
*/
async clear(): Promise<void> {
await this.ensureTable();
const query = `DELETE FROM ? WHERE session_id = ? `;
await this.database
.prepare(query)
.bind(this.tableName, this.sessionId)
.all();
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/message/zep_cloud.ts
|
import { Zep, ZepClient } from "@getzep/zep-cloud";
import { Memory, NotFoundError, RoleType } from "@getzep/zep-cloud/api";
import { BaseChatMessageHistory } from "@langchain/core/chat_history";
import {
AIMessage,
BaseMessage,
HumanMessage,
MessageType,
} from "@langchain/core/messages";
import {
condenseZepMemoryIntoHumanMessage,
zepMemoryToMessages,
} from "../../memory/zep_cloud.js";
export const getZepMessageRoleType = (role: MessageType): RoleType => {
switch (role) {
case "human":
return "user";
case "ai":
return "assistant";
case "system":
return "system";
case "function":
return "function";
case "tool":
return "tool";
default:
return "norole";
}
};
/**
* Interface defining the structure of the input data for the ZepMemory
* class. It includes properties like humanPrefix, aiPrefix, memoryKey, sessionId, memoryType and apiKey.
*/
interface ZepMemoryInput {
sessionId: string;
client: ZepClient;
memoryType: Zep.MemoryType;
humanPrefix?: string;
aiPrefix?: string;
// Whether to return separate messages for chat history with a SystemMessage containing (facts and summary) or return a single HumanMessage with the entire memory context.
// Defaults to false (return a single HumanMessage) in order to allow more flexibility with different models.
separateMessages?: boolean;
}
/**
* Class used to manage the memory of a chat session, including loading
* and saving the chat history, and clearing the memory when needed. It
* uses the ZepClient to interact with the Zep service for managing the
* chat session's memory.
*
*/
export class ZepCloudChatMessageHistory
extends BaseChatMessageHistory
implements ZepMemoryInput
{
lc_namespace: string[] = [];
sessionId: string;
client: ZepClient;
memoryType: Zep.MemoryType;
humanPrefix = "human";
aiPrefix = "ai";
separateMessages = false;
constructor(fields: ZepMemoryInput) {
super();
this.sessionId = fields.sessionId;
this.memoryType = fields.memoryType;
this.client = fields.client;
if (fields.humanPrefix) {
this.humanPrefix = fields.humanPrefix;
}
if (fields.aiPrefix) {
this.aiPrefix = fields.aiPrefix;
}
if (fields.separateMessages) {
this.separateMessages = fields.separateMessages;
}
}
private async getMemory(): Promise<Memory | null> {
try {
return this.client.memory.get(this.sessionId, {
memoryType: this.memoryType,
});
} catch (error) {
// eslint-disable-next-line no-instanceof/no-instanceof
if (error instanceof NotFoundError) {
console.warn(
`Session ${this.sessionId} not found in Zep. Returning None`
);
} else {
console.error("Error getting memory: ", error);
}
return null;
}
}
async getMessages(): Promise<BaseMessage[]> {
const memory = await this.getMemory();
if (!memory) {
return [];
}
return this.separateMessages
? zepMemoryToMessages(memory)
: [condenseZepMemoryIntoHumanMessage(memory)];
}
async addAIChatMessage(
message: string,
metadata?: Record<string, unknown>
): Promise<void> {
await this.addMessage(new AIMessage({ content: message }), metadata);
}
async addMessage(
message: BaseMessage,
metadata?: Record<string, unknown>
): Promise<void> {
const messageToSave = message;
if (message._getType() === "ai") {
messageToSave.name = this.aiPrefix;
} else if (message._getType() === "human") {
messageToSave.name = this.humanPrefix;
}
if (message.content === null) {
throw new Error("Message content cannot be null");
}
if (Array.isArray(message.content)) {
throw new Error("Message content cannot be a list");
}
await this.client.memory.add(this.sessionId, {
messages: [
{
content: message.content,
role: message.name ?? message._getType(),
roleType: getZepMessageRoleType(message._getType()),
metadata,
},
],
});
}
async addUserMessage(
message: string,
metadata?: Record<string, unknown>
): Promise<void> {
await this.addMessage(new HumanMessage({ content: message }, metadata));
}
clear(): Promise<void> {
console.warn("Clearing memory", this.sessionId);
return Promise.resolve(undefined);
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/message/in_memory.ts
|
export { InMemoryChatMessageHistory as ChatMessageHistory } from "@langchain/core/chat_history";
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/message/astradb.ts
|
import { BaseListChatMessageHistory } from "@langchain/core/chat_history";
import {
BaseMessage,
StoredMessage,
mapChatMessagesToStoredMessages,
mapStoredMessagesToChatMessages,
} from "@langchain/core/messages";
import { DataAPIClient, Collection } from "@datastax/astra-db-ts";
export interface AstraDBChatMessageHistoryInput {
token: string;
endpoint: string;
collectionName: string;
namespace?: string;
sessionId: string;
}
export interface AstraDBChatMessageHistoryProps {
collection: Collection;
sessionId: string;
}
/**
* Class for storing chat message history with Astra DB. It extends the
* BaseListChatMessageHistory class and provides methods to get, add, and
* clear messages.
* @example
*
* ```typescript
* const client = new AstraDB(
* process.env.ASTRA_DB_APPLICATION_TOKEN,
* process.env.ASTRA_DB_ENDPOINT,
* process.env.ASTRA_DB_NAMESPACE
* );
*
* const collection = await client.collection("test_chat");
*
* const chatHistory = new AstraDBChatMessageHistory({
* collection,
* sessionId: "YOUR_SESSION_ID",
* });
*
* const messages = await chatHistory.getMessages();
*
* await chatHistory.clear();
*/
export class AstraDBChatMessageHistory extends BaseListChatMessageHistory {
lc_namespace = ["langchain", "stores", "message", "astradb"];
private sessionId: string;
private collection: Collection;
constructor({ collection, sessionId }: AstraDBChatMessageHistoryProps) {
super();
this.sessionId = sessionId;
this.collection = collection;
}
/**
* async initializer function to return a new instance of AstraDBChatMessageHistory in a single step
* @param AstraDBChatMessageHistoryInput
* @returns Promise<AstraDBChatMessageHistory>
*
* @example
* const chatHistory = await AstraDBChatMessageHistory.initialize({
* token: process.env.ASTRA_DB_APPLICATION_TOKEN,
* endpoint: process.env.ASTRA_DB_ENDPOINT,
* namespace: process.env.ASTRA_DB_NAMESPACE,
* collectionName:"test_chat",
* sessionId: "YOUR_SESSION_ID"
* });
*/
static async initialize({
token,
endpoint,
collectionName,
namespace,
sessionId,
}: AstraDBChatMessageHistoryInput): Promise<AstraDBChatMessageHistory> {
const client = new DataAPIClient(token, { caller: ["langchainjs"] });
const db = client.db(endpoint, { namespace });
const collection = await db.collection(collectionName);
return new AstraDBChatMessageHistory({ collection, sessionId });
}
async getMessages(): Promise<BaseMessage[]> {
const docs = this.collection.find({
sessionId: this.sessionId,
});
const docsArray = await docs.toArray();
const sortedDocs = docsArray.sort((a, b) => a.timestamp - b.timestamp);
const storedMessages: StoredMessage[] = sortedDocs.map((doc) => ({
type: doc.type,
data: doc.data,
}));
return mapStoredMessagesToChatMessages(storedMessages);
}
async addMessage(message: BaseMessage): Promise<void> {
const messages = mapChatMessagesToStoredMessages([message]);
const { type, data } = messages[0];
await this.collection.insertOne({
sessionId: this.sessionId,
timestamp: Date.now(),
type,
data,
});
}
async clear(): Promise<void> {
await this.collection.deleteMany({
sessionId: this.sessionId,
});
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/message/mongodb.ts
|
import { Collection, Document as MongoDBDocument, ObjectId } from "mongodb";
import { BaseListChatMessageHistory } from "@langchain/core/chat_history";
import {
BaseMessage,
mapChatMessagesToStoredMessages,
mapStoredMessagesToChatMessages,
} from "@langchain/core/messages";
/** @deprecated Install and import from the "@langchain/mongodb" integration package instead. */
export interface MongoDBChatMessageHistoryInput {
collection: Collection<MongoDBDocument>;
sessionId: string;
}
/**
* @deprecated Install and import from the "@langchain/mongodb" integration package instead.
* @example
* ```typescript
* const chatHistory = new MongoDBChatMessageHistory({
* collection: myCollection,
* sessionId: 'unique-session-id',
* });
* const messages = await chatHistory.getMessages();
* await chatHistory.clear();
* ```
*/
export class MongoDBChatMessageHistory extends BaseListChatMessageHistory {
lc_namespace = ["langchain", "stores", "message", "mongodb"];
private collection: Collection<MongoDBDocument>;
private sessionId: string;
constructor({ collection, sessionId }: MongoDBChatMessageHistoryInput) {
super();
this.collection = collection;
this.sessionId = sessionId;
}
async getMessages(): Promise<BaseMessage[]> {
const document = await this.collection.findOne({
_id: new ObjectId(this.sessionId),
});
const messages = document?.messages || [];
return mapStoredMessagesToChatMessages(messages);
}
async addMessage(message: BaseMessage): Promise<void> {
const messages = mapChatMessagesToStoredMessages([message]);
await this.collection.updateOne(
{ _id: new ObjectId(this.sessionId) },
{
$push: { messages: { $each: messages } },
},
{ upsert: true }
);
}
async clear(): Promise<void> {
await this.collection.deleteOne({ _id: new ObjectId(this.sessionId) });
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/message/file_system.ts
|
import { promises as fs } from "node:fs";
import { dirname } from "node:path";
import { BaseListChatMessageHistory } from "@langchain/core/chat_history";
import {
BaseMessage,
StoredMessage,
mapChatMessagesToStoredMessages,
mapStoredMessagesToChatMessages,
} from "@langchain/core/messages";
export const FILE_HISTORY_DEFAULT_FILE_PATH = ".history/history.json";
/**
* Represents a lightweight file chat session.
*/
export type FileChatSession = {
id: string;
context: Record<string, unknown>;
};
/**
* Represents a stored chat session.
*/
export type StoredFileChatSession = FileChatSession & {
messages: StoredMessage[];
};
/**
* Type for the store of chat sessions.
*/
export type FileChatStore = {
[userId: string]: Record<string, StoredFileChatSession>;
};
/**
* Type for the input to the `FileSystemChatMessageHistory` constructor.
*/
export interface FileSystemChatMessageHistoryInput {
sessionId: string;
userId?: string;
filePath?: string;
}
let store: FileChatStore;
/**
* Store chat message history using a local JSON file.
* For demo and development purposes only.
*
* @example
* ```typescript
* const model = new ChatOpenAI({
* model: "gpt-3.5-turbo",
* temperature: 0,
* });
* const prompt = ChatPromptTemplate.fromMessages([
* [
* "system",
* "You are a helpful assistant. Answer all questions to the best of your ability.",
* ],
* ["placeholder", "chat_history"],
* ["human", "{input}"],
* ]);
*
* const chain = prompt.pipe(model).pipe(new StringOutputParser());
* const chainWithHistory = new RunnableWithMessageHistory({
* runnable: chain,
* inputMessagesKey: "input",
* historyMessagesKey: "chat_history",
* getMessageHistory: async (sessionId) => {
* const chatHistory = new FileSystemChatMessageHistory({
* sessionId: sessionId,
* userId: "userId", // Optional
* })
* return chatHistory;
* },
* });
* await chainWithHistory.invoke(
* { input: "What did I just say my name was?" },
* { configurable: { sessionId: "session-id" } }
* );
* ```
*/
export class FileSystemChatMessageHistory extends BaseListChatMessageHistory {
lc_namespace = ["langchain", "stores", "message", "file"];
private sessionId: string;
private userId: string;
private filePath: string;
constructor(chatHistoryInput: FileSystemChatMessageHistoryInput) {
super();
this.sessionId = chatHistoryInput.sessionId;
this.userId = chatHistoryInput.userId ?? "";
this.filePath = chatHistoryInput.filePath ?? FILE_HISTORY_DEFAULT_FILE_PATH;
}
private async init(): Promise<void> {
if (store) {
return;
}
try {
store = await this.loadStore();
} catch (error) {
console.error("Error initializing FileSystemChatMessageHistory:", error);
throw error;
}
}
protected async loadStore(): Promise<FileChatStore> {
try {
await fs.access(this.filePath, fs.constants.F_OK);
const store = await fs.readFile(this.filePath, "utf-8");
return JSON.parse(store) as FileChatStore;
} catch (_error) {
const error = _error as NodeJS.ErrnoException;
if (error.code === "ENOENT") {
return {};
}
throw new Error(
`Error loading FileSystemChatMessageHistory store: ${error}`
);
}
}
protected async saveStore(): Promise<void> {
try {
await fs.mkdir(dirname(this.filePath), { recursive: true });
await fs.writeFile(this.filePath, JSON.stringify(store));
} catch (error) {
throw new Error(
`Error saving FileSystemChatMessageHistory store: ${error}`
);
}
}
async getMessages(): Promise<BaseMessage[]> {
await this.init();
const messages = store[this.userId]?.[this.sessionId]?.messages ?? [];
return mapStoredMessagesToChatMessages(messages);
}
async addMessage(message: BaseMessage): Promise<void> {
await this.init();
const messages = await this.getMessages();
messages.push(message);
const storedMessages = mapChatMessagesToStoredMessages(messages);
store[this.userId] ??= {};
store[this.userId][this.sessionId] = {
...store[this.userId][this.sessionId],
messages: storedMessages,
};
await this.saveStore();
}
async clear(): Promise<void> {
await this.init();
if (store[this.userId]) {
delete store[this.userId][this.sessionId];
}
await this.saveStore();
}
async getContext(): Promise<Record<string, unknown>> {
await this.init();
return store[this.userId]?.[this.sessionId]?.context ?? {};
}
async setContext(context: Record<string, unknown>): Promise<void> {
await this.init();
store[this.userId] ??= {};
store[this.userId][this.sessionId] = {
...store[this.userId][this.sessionId],
context,
};
await this.saveStore();
}
async clearAllSessions() {
await this.init();
delete store[this.userId];
await this.saveStore();
}
async getAllSessions(): Promise<FileChatSession[]> {
await this.init();
const userSessions = store[this.userId]
? Object.values(store[this.userId]).map((session) => ({
id: session.id,
context: session.context,
}))
: [];
return userSessions;
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/message/dynamodb.ts
|
import {
DynamoDBClient,
DynamoDBClientConfig,
GetItemCommand,
GetItemCommandInput,
UpdateItemCommand,
UpdateItemCommandInput,
DeleteItemCommand,
DeleteItemCommandInput,
AttributeValue,
} from "@aws-sdk/client-dynamodb";
import { BaseListChatMessageHistory } from "@langchain/core/chat_history";
import {
BaseMessage,
StoredMessage,
mapChatMessagesToStoredMessages,
mapStoredMessagesToChatMessages,
} from "@langchain/core/messages";
/**
* Interface defining the fields required to create an instance of
* `DynamoDBChatMessageHistory`. It includes the DynamoDB table name,
* session ID, partition key, sort key, message attribute name, and
* DynamoDB client configuration.
*/
export interface DynamoDBChatMessageHistoryFields {
tableName: string;
sessionId: string;
partitionKey?: string;
sortKey?: string;
messageAttributeName?: string;
config?: DynamoDBClientConfig;
key?: Record<string, AttributeValue>;
}
/**
* Interface defining the structure of a chat message as it is stored in
* DynamoDB.
*/
interface DynamoDBSerializedChatMessage {
M: {
type: {
S: string;
};
text: {
S: string;
};
role?: {
S: string;
};
additional_kwargs?: {
S: string;
};
};
}
/**
* Class providing methods to interact with a DynamoDB table to store and
* retrieve chat messages. It extends the `BaseListChatMessageHistory`
* class.
*/
export class DynamoDBChatMessageHistory extends BaseListChatMessageHistory {
lc_namespace = ["langchain", "stores", "message", "dynamodb"];
get lc_secrets(): { [key: string]: string } | undefined {
return {
"config.credentials.accessKeyId": "AWS_ACCESS_KEY_ID",
"config.credentials.secretAccessKey": "AWS_SECRETE_ACCESS_KEY",
"config.credentials.sessionToken": "AWS_SESSION_TOKEN",
};
}
private tableName: string;
private sessionId: string;
private client: DynamoDBClient;
private partitionKey = "id";
private sortKey?: string;
private messageAttributeName = "messages";
private dynamoKey: Record<string, AttributeValue> = {};
/**
* Transforms a `StoredMessage` into a `DynamoDBSerializedChatMessage`.
* The `DynamoDBSerializedChatMessage` format is suitable for storing in DynamoDB.
*
* @param message - The `StoredMessage` to be transformed.
* @returns The transformed `DynamoDBSerializedChatMessage`.
*/
private createDynamoDBSerializedChatMessage(
message: StoredMessage
): DynamoDBSerializedChatMessage {
const {
type,
data: { content, role, additional_kwargs },
} = message;
const isAdditionalKwargs =
additional_kwargs && Object.keys(additional_kwargs).length;
const dynamoSerializedMessage: DynamoDBSerializedChatMessage = {
M: {
type: {
S: type,
},
text: {
S: content,
},
additional_kwargs: isAdditionalKwargs
? { S: JSON.stringify(additional_kwargs) }
: { S: "{}" },
},
};
if (role) {
dynamoSerializedMessage.M.role = { S: role };
}
return dynamoSerializedMessage;
}
constructor({
tableName,
sessionId,
partitionKey,
sortKey,
messageAttributeName,
config,
key = {},
}: DynamoDBChatMessageHistoryFields) {
super();
this.tableName = tableName;
this.sessionId = sessionId;
this.client = new DynamoDBClient(config ?? {});
this.partitionKey = partitionKey ?? this.partitionKey;
this.sortKey = sortKey;
this.messageAttributeName =
messageAttributeName ?? this.messageAttributeName;
this.dynamoKey = key;
// override dynamoKey with partition key and sort key when key not specified
if (Object.keys(this.dynamoKey).length === 0) {
this.dynamoKey[this.partitionKey] = { S: this.sessionId };
if (this.sortKey) {
this.dynamoKey[this.sortKey] = { S: this.sortKey };
}
}
}
/**
* Retrieves all messages from the DynamoDB table and returns them as an
* array of `BaseMessage` instances.
* @returns Array of stored messages
*/
async getMessages(): Promise<BaseMessage[]> {
try {
const params: GetItemCommandInput = {
TableName: this.tableName,
Key: this.dynamoKey,
};
const response = await this.client.send(new GetItemCommand(params));
const items = response.Item
? response.Item[this.messageAttributeName]?.L ?? []
: [];
const messages = items
.filter(
(
item
): item is AttributeValue & { M: DynamoDBSerializedChatMessage } =>
item.M !== undefined
)
.map((item) => {
const data: {
role?: string;
content: string | undefined;
additional_kwargs?: Record<string, unknown>;
} = {
role: item.M?.role?.S,
content: item.M?.text.S,
additional_kwargs: item.M?.additional_kwargs?.S
? JSON.parse(item.M?.additional_kwargs.S)
: undefined,
};
return {
type: item.M?.type.S,
data,
};
})
.filter(
(x): x is StoredMessage =>
x.type !== undefined && x.data.content !== undefined
);
return mapStoredMessagesToChatMessages(messages);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (error: any) {
throw new Error(`Error getting messages: ${error.message}`);
}
}
/**
* Adds a new message to the DynamoDB table.
* @param message The message to be added to the DynamoDB table.
*/
async addMessage(message: BaseMessage) {
try {
const messages = mapChatMessagesToStoredMessages([message]);
const params: UpdateItemCommandInput = {
TableName: this.tableName,
Key: this.dynamoKey,
ExpressionAttributeNames: {
"#m": this.messageAttributeName,
},
ExpressionAttributeValues: {
":empty_list": {
L: [],
},
":m": {
L: messages.map(this.createDynamoDBSerializedChatMessage),
},
},
UpdateExpression:
"SET #m = list_append(if_not_exists(#m, :empty_list), :m)",
};
await this.client.send(new UpdateItemCommand(params));
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (error: any) {
throw new Error(`Error adding message: ${error.message}`);
}
}
/**
* Adds new messages to the DynamoDB table.
* @param messages The messages to be added to the DynamoDB table.
*/
async addMessages(messages: BaseMessage[]): Promise<void> {
try {
const storedMessages = mapChatMessagesToStoredMessages(messages);
const dynamoMessages = storedMessages.map(
this.createDynamoDBSerializedChatMessage
);
const params: UpdateItemCommandInput = {
TableName: this.tableName,
Key: this.dynamoKey,
ExpressionAttributeNames: {
"#m": this.messageAttributeName,
},
ExpressionAttributeValues: {
":empty_list": {
L: [],
},
":m": {
L: dynamoMessages,
},
},
UpdateExpression:
"SET #m = list_append(if_not_exists(#m, :empty_list), :m)",
};
await this.client.send(new UpdateItemCommand(params));
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (error: any) {
throw new Error(`Error adding messages: ${error.message}`);
}
}
/**
* Deletes all messages from the DynamoDB table.
*/
async clear(): Promise<void> {
try {
const params: DeleteItemCommandInput = {
TableName: this.tableName,
Key: this.dynamoKey,
};
await this.client.send(new DeleteItemCommand(params));
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (error: any) {
throw new Error(`Error clearing messages: ${error.message}`);
}
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/message/planetscale.ts
|
import {
Client as PlanetScaleClient,
Config as PlanetScaleConfig,
Connection as PlanetScaleConnection,
} from "@planetscale/database";
import { BaseListChatMessageHistory } from "@langchain/core/chat_history";
import {
BaseMessage,
StoredMessage,
StoredMessageData,
mapChatMessagesToStoredMessages,
mapStoredMessagesToChatMessages,
} from "@langchain/core/messages";
/**
* Type definition for the input parameters required when instantiating a
* PlanetScaleChatMessageHistory object.
*/
export type PlanetScaleChatMessageHistoryInput = {
tableName?: string;
sessionId: string;
config?: PlanetScaleConfig;
client?: PlanetScaleClient;
};
/**
* Interface for the data transfer object used when selecting stored
* messages from the PlanetScale database.
*/
interface selectStoredMessagesDTO {
id: string;
session_id: string;
type: string;
content: string;
role: string | null;
name: string | null;
additional_kwargs: string;
}
/**
* Class for storing and retrieving chat message history from a
* PlanetScale database. Extends the BaseListChatMessageHistory class.
* @example
* ```typescript
* const chatHistory = new PlanetScaleChatMessageHistory({
* tableName: "stored_message",
* sessionId: "lc-example",
* config: {
* url: "ADD_YOURS_HERE",
* },
* });
* const chain = new ConversationChain({
* llm: new ChatOpenAI(),
* memory: chatHistory,
* });
* const response = await chain.invoke({
* input: "What did I just say my name was?",
* });
* console.log({ response });
* ```
*/
export class PlanetScaleChatMessageHistory extends BaseListChatMessageHistory {
lc_namespace = ["langchain", "stores", "message", "planetscale"];
get lc_secrets() {
return {
"config.host": "PLANETSCALE_HOST",
"config.username": "PLANETSCALE_USERNAME",
"config.password": "PLANETSCALE_PASSWORD",
"config.url": "PLANETSCALE_DATABASE_URL",
};
}
public client: PlanetScaleClient;
private connection: PlanetScaleConnection;
private tableName: string;
private sessionId: string;
private tableInitialized: boolean;
constructor(fields: PlanetScaleChatMessageHistoryInput) {
super(fields);
const { sessionId, config, client, tableName } = fields;
if (client) {
this.client = client;
} else if (config) {
this.client = new PlanetScaleClient(config);
} else {
throw new Error(
"Either a client or config must be provided to PlanetScaleChatMessageHistory"
);
}
this.connection = this.client.connection();
this.tableName = tableName || "langchain_chat_histories";
this.tableInitialized = false;
this.sessionId = sessionId;
}
/**
* Private method to ensure that the necessary table exists in the
* PlanetScale database before performing any operations. If the table
* does not exist, it is created.
* @returns Promise that resolves to void.
*/
private async ensureTable(): Promise<void> {
if (this.tableInitialized) {
return;
}
const query = `CREATE TABLE IF NOT EXISTS ${this.tableName} (id BINARY(16) PRIMARY KEY, session_id VARCHAR(255), type VARCHAR(255), content VARCHAR(255), role VARCHAR(255), name VARCHAR(255), additional_kwargs VARCHAR(255));`;
await this.connection.execute(query);
const indexQuery = `ALTER TABLE ${this.tableName} MODIFY id BINARY(16) DEFAULT (UUID_TO_BIN(UUID()));`;
await this.connection.execute(indexQuery);
this.tableInitialized = true;
}
/**
* Method to retrieve all messages from the PlanetScale database for the
* current session.
* @returns Promise that resolves to an array of BaseMessage objects.
*/
async getMessages(): Promise<BaseMessage[]> {
await this.ensureTable();
const query = `SELECT * FROM ${this.tableName} WHERE session_id = :session_id`;
const params = {
session_id: this.sessionId,
};
const rawStoredMessages = await this.connection.execute(query, params);
const storedMessagesObject =
rawStoredMessages.rows as unknown as selectStoredMessagesDTO[];
const orderedMessages: StoredMessage[] = storedMessagesObject.map(
(message) => {
const data = {
content: message.content,
additional_kwargs: JSON.parse(message.additional_kwargs),
} as StoredMessageData;
if (message.role) {
data.role = message.role;
}
if (message.name) {
data.name = message.name;
}
return {
type: message.type,
data,
};
}
);
return mapStoredMessagesToChatMessages(orderedMessages);
}
/**
* Method to add a new message to the PlanetScale database for the current
* session.
* @param message The BaseMessage object to be added to the database.
* @returns Promise that resolves to void.
*/
async addMessage(message: BaseMessage): Promise<void> {
await this.ensureTable();
const messageToAdd = mapChatMessagesToStoredMessages([message]);
const query = `INSERT INTO ${this.tableName} (session_id, type, content, role, name, additional_kwargs) VALUES (:session_id, :type, :content, :role, :name, :additional_kwargs)`;
const params = {
session_id: this.sessionId,
type: messageToAdd[0].type,
content: messageToAdd[0].data.content,
role: messageToAdd[0].data.role,
name: messageToAdd[0].data.name,
additional_kwargs: JSON.stringify(messageToAdd[0].data.additional_kwargs),
};
await this.connection.execute(query, params);
}
/**
* Method to delete all messages from the PlanetScale database for the
* current session.
* @returns Promise that resolves to void.
*/
async clear(): Promise<void> {
await this.ensureTable();
const query = `DELETE FROM ${this.tableName} WHERE session_id = :session_id`;
const params = {
session_id: this.sessionId,
};
await this.connection.execute(query, params);
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/message/momento.ts
|
/* eslint-disable no-instanceof/no-instanceof */
import {
CacheDelete,
CacheListFetch,
CacheListPushBack,
ICacheClient,
InvalidArgumentError,
CollectionTtl,
} from "@gomomento/sdk-core";
import { BaseListChatMessageHistory } from "@langchain/core/chat_history";
import {
BaseMessage,
StoredMessage,
mapChatMessagesToStoredMessages,
mapStoredMessagesToChatMessages,
} from "@langchain/core/messages";
import { ensureCacheExists } from "../../utils/momento.js";
/**
* The settings to instantiate the Momento chat message history.
*/
export interface MomentoChatMessageHistoryProps {
/**
* The session ID to use to store the data.
*/
sessionId: string;
/**
* The Momento cache client.
*/
client: ICacheClient;
/**
* The name of the cache to use to store the data.
*/
cacheName: string;
/**
* The time to live for the cache items in seconds.
* If not specified, the cache client default is used.
*/
sessionTtl?: number;
/**
* If true, ensure that the cache exists before returning.
* If false, the cache is not checked for existence.
* Defaults to true.
*/
ensureCacheExists?: true;
}
/**
* A class that stores chat message history using Momento Cache. It
* interacts with a Momento cache client to perform operations like
* fetching, adding, and deleting messages.
* @example
* ```typescript
* const chatHistory = await MomentoChatMessageHistory.fromProps({
* client: new CacheClient({
* configuration: Configurations.Laptop.v1(),
* credentialProvider: CredentialProvider.fromEnvironmentVariable({
* environmentVariableName: "MOMENTO_API_KEY",
* }),
* defaultTtlSeconds: 60 * 60 * 24,
* }),
* cacheName: "langchain",
* sessionId: new Date().toISOString(),
* sessionTtl: 300,
* });
*
* const messages = await chatHistory.getMessages();
* console.log({ messages });
* ```
*/
export class MomentoChatMessageHistory extends BaseListChatMessageHistory {
lc_namespace = ["langchain", "stores", "message", "momento"];
private readonly sessionId: string;
private readonly client: ICacheClient;
private readonly cacheName: string;
private readonly sessionTtl: CollectionTtl;
private constructor(props: MomentoChatMessageHistoryProps) {
super();
this.sessionId = props.sessionId;
this.client = props.client;
this.cacheName = props.cacheName;
this.validateTtlSeconds(props.sessionTtl);
this.sessionTtl =
props.sessionTtl !== undefined
? CollectionTtl.of(props.sessionTtl)
: CollectionTtl.fromCacheTtl();
}
/**
* Create a new chat message history backed by Momento.
*
* @param {MomentoCacheProps} props The settings to instantiate the Momento chat message history.
* @param {string} props.sessionId The session ID to use to store the data.
* @param {ICacheClient} props.client The Momento cache client.
* @param {string} props.cacheName The name of the cache to use to store the data.
* @param {number} props.sessionTtl The time to live for the cache items in seconds.
* If not specified, the cache client default is used.
* @param {boolean} props.ensureCacheExists If true, ensure that the cache exists before returning.
* If false, the cache is not checked for existence.
* @throws {InvalidArgumentError} If {@link props.sessionTtl} is not strictly positive.
* @returns A new chat message history backed by Momento.
*/
public static async fromProps(
props: MomentoChatMessageHistoryProps
): Promise<MomentoChatMessageHistory> {
const instance = new MomentoChatMessageHistory(props);
if (props.ensureCacheExists || props.ensureCacheExists === undefined) {
await ensureCacheExists(props.client, props.cacheName);
}
return instance;
}
/**
* Validate the user-specified TTL, if provided, is strictly positive.
* @param ttlSeconds The TTL to validate.
*/
private validateTtlSeconds(ttlSeconds?: number): void {
if (ttlSeconds !== undefined && ttlSeconds <= 0) {
throw new InvalidArgumentError("ttlSeconds must be positive.");
}
}
/**
* Fetches messages from the cache.
* @returns A Promise that resolves to an array of BaseMessage instances.
*/
public async getMessages(): Promise<BaseMessage[]> {
const fetchResponse = await this.client.listFetch(
this.cacheName,
this.sessionId
);
let messages: StoredMessage[] = [];
if (fetchResponse instanceof CacheListFetch.Hit) {
messages = fetchResponse
.valueList()
.map((serializedStoredMessage) => JSON.parse(serializedStoredMessage));
} else if (fetchResponse instanceof CacheListFetch.Miss) {
// pass
} else if (fetchResponse instanceof CacheListFetch.Error) {
throw fetchResponse.innerException();
} else {
throw new Error(`Unknown response type: ${fetchResponse.toString()}`);
}
return mapStoredMessagesToChatMessages(messages);
}
/**
* Adds a message to the cache.
* @param message The BaseMessage instance to add to the cache.
* @returns A Promise that resolves when the message has been added.
*/
public async addMessage(message: BaseMessage): Promise<void> {
const messageToAdd = JSON.stringify(
mapChatMessagesToStoredMessages([message])[0]
);
const pushResponse = await this.client.listPushBack(
this.cacheName,
this.sessionId,
messageToAdd,
{ ttl: this.sessionTtl }
);
if (pushResponse instanceof CacheListPushBack.Success) {
// pass
} else if (pushResponse instanceof CacheListPushBack.Error) {
throw pushResponse.innerException();
} else {
throw new Error(`Unknown response type: ${pushResponse.toString()}`);
}
}
/**
* Deletes all messages from the cache.
* @returns A Promise that resolves when all messages have been deleted.
*/
public async clear(): Promise<void> {
const deleteResponse = await this.client.delete(
this.cacheName,
this.sessionId
);
if (deleteResponse instanceof CacheDelete.Success) {
// pass
} else if (deleteResponse instanceof CacheDelete.Error) {
throw deleteResponse.innerException();
} else {
throw new Error(`Unknown response type: ${deleteResponse.toString()}`);
}
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/message/cassandra.ts
|
import { BaseListChatMessageHistory } from "@langchain/core/chat_history";
import {
BaseMessage,
StoredMessage,
mapChatMessagesToStoredMessages,
mapStoredMessagesToChatMessages,
} from "@langchain/core/messages";
import {
Column,
CassandraTable,
CassandraClientArgs,
} from "../../utils/cassandra.js";
export interface CassandraChatMessageHistoryOptions
extends CassandraClientArgs {
keyspace: string;
table: string;
sessionId: string;
}
/**
* Class for storing chat message history within Cassandra. It extends the
* BaseListChatMessageHistory class and provides methods to get, add, and
* clear messages.
* @example
* ```typescript
* const chatHistory = new CassandraChatMessageHistory({
* cloud: {
* secureConnectBundle: "<path to your secure bundle>",
* },
* credentials: {
* username: "token",
* password: "<your Cassandra access token>",
* },
* keyspace: "langchain",
* table: "message_history",
* sessionId: "<some unique session identifier>",
* });
*
* const chain = new ConversationChain({
* llm: new ChatOpenAI(),
* memory: chatHistory,
* });
*
* const response = await chain.invoke({
* input: "What did I just say my name was?",
* });
* console.log({ response });
* ```
*/
export class CassandraChatMessageHistory extends BaseListChatMessageHistory {
lc_namespace = ["langchain", "stores", "message", "cassandra"];
private cassandraTable: CassandraTable;
private sessionId: string;
private options: CassandraChatMessageHistoryOptions;
private colSessionId: Column;
private colMessageTs: Column;
private colMessageType: Column;
private colData: Column;
constructor(options: CassandraChatMessageHistoryOptions) {
super();
this.sessionId = options.sessionId;
this.options = options;
this.colSessionId = { name: "session_id", type: "text", partition: true };
this.colMessageTs = { name: "message_ts", type: "timestamp" };
this.colMessageType = { name: "message_type", type: "text" };
this.colData = { name: "data", type: "text" };
}
/**
* Method to get all the messages stored in the Cassandra database.
* @returns Array of stored BaseMessage instances.
*/
public async getMessages(): Promise<BaseMessage[]> {
await this.ensureTable();
const resultSet = await this.cassandraTable.select(
[this.colMessageType, this.colData],
[{ name: "session_id", value: this.sessionId }]
);
const storedMessages: StoredMessage[] = resultSet.rows.map((row) => ({
type: row.message_type,
data: JSON.parse(row.data),
}));
const baseMessages = mapStoredMessagesToChatMessages(storedMessages);
return baseMessages;
}
/**
* Method to add a new message to the Cassandra database.
* @param message The BaseMessage instance to add.
* @returns A promise that resolves when the message has been added.
*/
public async addMessage(message: BaseMessage): Promise<void> {
await this.ensureTable();
const messages = mapChatMessagesToStoredMessages([message]);
const { type, data } = messages[0];
return this.cassandraTable
.upsert(
[[this.sessionId, type, Date.now(), JSON.stringify(data)]],
[
this.colSessionId,
this.colMessageType,
this.colMessageTs,
this.colData,
]
)
.then(() => {});
}
/**
* Method to clear all the messages from the Cassandra database.
* @returns A promise that resolves when all messages have been cleared.
*/
public async clear(): Promise<void> {
await this.ensureTable();
return this.cassandraTable
.delete({ name: this.colSessionId.name, value: this.sessionId })
.then(() => {});
}
/**
* Method to initialize the Cassandra database.
* @returns Promise that resolves when the database has been initialized.
*/
private async ensureTable(): Promise<void> {
if (this.cassandraTable) {
return;
}
const tableConfig = {
...this.options,
primaryKey: [this.colSessionId, this.colMessageTs],
nonKeyColumns: [this.colMessageType, this.colData],
};
this.cassandraTable = await new CassandraTable(tableConfig);
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/message/xata.ts
|
import {
BaseClient,
BaseClientOptions,
GetTableSchemaResponse,
Schemas,
XataApiClient,
parseWorkspacesUrlParts,
} from "@xata.io/client";
import { BaseListChatMessageHistory } from "@langchain/core/chat_history";
import {
BaseMessage,
StoredMessage,
StoredMessageData,
mapChatMessagesToStoredMessages,
mapStoredMessagesToChatMessages,
} from "@langchain/core/messages";
/**
* An object type that represents the input for the XataChatMessageHistory
* class.
*/
export type XataChatMessageHistoryInput<XataClient> = {
sessionId: string;
config?: BaseClientOptions;
client?: XataClient;
table?: string;
createTable?: boolean;
apiKey?: string;
};
/**
* An interface that represents the data transfer object for stored
* messages.
*/
interface storedMessagesDTO {
id: string;
sessionId: string;
type: string;
content: string;
role?: string;
name?: string;
additionalKwargs: string;
}
const chatMemoryColumns: Schemas.Column[] = [
{ name: "sessionId", type: "string" },
{ name: "type", type: "string" },
{ name: "role", type: "string" },
{ name: "content", type: "text" },
{ name: "name", type: "string" },
{ name: "additionalKwargs", type: "text" },
];
/**
* A class for managing chat message history using Xata.io client. It
* extends the BaseListChatMessageHistory class and provides methods to
* get, add, and clear messages. It also ensures the existence of a table
* where the chat messages are stored.
* @example
* ```typescript
* const chatHistory = new XataChatMessageHistory({
* table: "messages",
* sessionId: new Date().toISOString(),
* client: new BaseClient({
* databaseURL: process.env.XATA_DB_URL,
* apiKey: process.env.XATA_API_KEY,
* branch: "main",
* }),
* apiKey: process.env.XATA_API_KEY,
* });
*
* const chain = new ConversationChain({
* llm: new ChatOpenAI(),
* memory: new BufferMemory({ chatHistory }),
* });
*
* const response = await chain.invoke({
* input: "What did I just say my name was?",
* });
* console.log({ response });
* ```
*/
export class XataChatMessageHistory<
XataClient extends BaseClient
> extends BaseListChatMessageHistory {
lc_namespace = ["langchain", "stores", "message", "xata"];
public client: XataClient;
private sessionId: string;
private table: string;
private tableInitialized: boolean;
private createTable: boolean;
private apiClient: XataApiClient;
constructor(fields: XataChatMessageHistoryInput<XataClient>) {
super(fields);
const { sessionId, config, client, table } = fields;
this.sessionId = sessionId;
this.table = table || "memory";
if (client) {
this.client = client;
} else if (config) {
this.client = new BaseClient(config) as XataClient;
} else {
throw new Error(
"Either a client or a config must be provided to XataChatMessageHistoryInput"
);
}
if (fields.createTable !== false) {
this.createTable = true;
const apiKey = fields.apiKey || fields.config?.apiKey;
if (!apiKey) {
throw new Error(
"If createTable is set, an apiKey must be provided to XataChatMessageHistoryInput, either directly or through the config object"
);
}
this.apiClient = new XataApiClient({ apiKey });
} else {
this.createTable = false;
}
this.tableInitialized = false;
}
/**
* Retrieves all messages associated with the session ID, ordered by
* creation time.
* @returns A promise that resolves to an array of BaseMessage instances.
*/
async getMessages(): Promise<BaseMessage[]> {
await this.ensureTable();
const records = await this.client.db[this.table]
.filter({ sessionId: this.sessionId })
.sort("xata.createdAt", "asc")
.getAll();
const rawStoredMessages = records as unknown as storedMessagesDTO[];
const orderedMessages: StoredMessage[] = rawStoredMessages.map(
(message: storedMessagesDTO) => {
const data = {
content: message.content,
additional_kwargs: JSON.parse(message.additionalKwargs),
} as StoredMessageData;
if (message.role) {
data.role = message.role;
}
if (message.name) {
data.name = message.name;
}
return {
type: message.type,
data,
};
}
);
return mapStoredMessagesToChatMessages(orderedMessages);
}
/**
* Adds a new message to the database.
* @param message The BaseMessage instance to be added.
* @returns A promise that resolves when the message has been added.
*/
async addMessage(message: BaseMessage): Promise<void> {
await this.ensureTable();
const messageToAdd = mapChatMessagesToStoredMessages([message]);
await this.client.db[this.table].create({
sessionId: this.sessionId,
type: messageToAdd[0].type,
content: messageToAdd[0].data.content,
role: messageToAdd[0].data.role,
name: messageToAdd[0].data.name,
additionalKwargs: JSON.stringify(messageToAdd[0].data.additional_kwargs),
});
}
/**
* Deletes all messages associated with the session ID.
* @returns A promise that resolves when the messages have been deleted.
*/
async clear(): Promise<void> {
await this.ensureTable();
const records = await this.client.db[this.table]
.select(["id"])
.filter({ sessionId: this.sessionId })
.getAll();
const ids = records.map((m) => m.id);
await this.client.db[this.table].delete(ids);
}
/**
* Checks if the table exists and creates it if it doesn't. This method is
* called before any operation on the table.
* @returns A promise that resolves when the table has been ensured.
*/
private async ensureTable(): Promise<void> {
if (!this.createTable) {
return;
}
if (this.tableInitialized) {
return;
}
const { databaseURL, branch } = await this.client.getConfig();
const [, , host, , database] = databaseURL.split("/");
const urlParts = parseWorkspacesUrlParts(host);
if (urlParts == null) {
throw new Error("Invalid databaseURL");
}
const { workspace, region } = urlParts;
const tableParams = {
workspace,
region,
database,
branch,
table: this.table,
};
let schema: GetTableSchemaResponse | null = null;
try {
schema = await this.apiClient.tables.getTableSchema(tableParams);
} catch (e) {
// pass
}
if (schema == null) {
await this.apiClient.tables.createTable(tableParams);
await this.apiClient.tables.setTableSchema({
...tableParams,
schema: {
columns: chatMemoryColumns,
},
});
}
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/message/ipfs_datastore.ts
|
import { BaseListChatMessageHistory } from "@langchain/core/chat_history";
import {
type BaseMessage,
mapChatMessagesToStoredMessages,
mapStoredMessagesToChatMessages,
} from "@langchain/core/messages";
import * as cborg from "cborg";
import { type Datastore, Key } from "interface-datastore";
import all from "it-all";
export interface IPFSDatastoreChatMessageHistoryInput {
sessionId: string;
}
export interface IPFSDatastoreChatMessageHistoryProps {
datastore: Datastore;
sessionId: string;
}
export class IPFSDatastoreChatMessageHistory extends BaseListChatMessageHistory {
readonly lc_namespace = ["langchain", "stores", "message", "datastore"];
readonly sessionId: string;
private readonly datastore: Datastore;
constructor({ datastore, sessionId }: IPFSDatastoreChatMessageHistoryProps) {
super({ sessionId });
this.datastore = datastore;
this.sessionId = sessionId;
}
async getMessages(): Promise<BaseMessage[]> {
const data = await all(
this.datastore.query({ prefix: `/${this.sessionId}` })
);
const messages = data.map((d) => cborg.decode(d.value));
return mapStoredMessagesToChatMessages(messages);
}
async addMessage(message: BaseMessage): Promise<void> {
await this.addMessages([message]);
}
async addMessages(messages: BaseMessage[]): Promise<void> {
const { length } = await all(
this.datastore.queryKeys({ prefix: `/${this.sessionId}` })
);
const serializedMessages = mapChatMessagesToStoredMessages(messages);
const pairs = serializedMessages.map((message, index) => ({
key: new Key(`/${this.sessionId}/${index + length}`),
value: cborg.encode(message),
}));
await all(this.datastore.putMany(pairs));
}
async clear(): Promise<void> {
const keys = this.datastore.queryKeys({ prefix: `/${this.sessionId}` });
await all(this.datastore.deleteMany(keys));
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/message/upstash_redis.ts
|
import { Redis, type RedisConfigNodejs } from "@upstash/redis";
import { BaseListChatMessageHistory } from "@langchain/core/chat_history";
import {
BaseMessage,
StoredMessage,
mapChatMessagesToStoredMessages,
mapStoredMessagesToChatMessages,
} from "@langchain/core/messages";
/**
* Type definition for the input parameters required to initialize an
* instance of the UpstashRedisChatMessageHistory class.
*/
export type UpstashRedisChatMessageHistoryInput = {
sessionId: string;
sessionTTL?: number;
config?: RedisConfigNodejs;
client?: Redis;
};
/**
* Class used to store chat message history in Redis. It provides methods
* to add, get, and clear messages.
*/
export class UpstashRedisChatMessageHistory extends BaseListChatMessageHistory {
lc_namespace = ["langchain", "stores", "message", "upstash_redis"];
get lc_secrets() {
return {
"config.url": "UPSTASH_REDIS_REST_URL",
"config.token": "UPSTASH_REDIS_REST_TOKEN",
};
}
public client: Redis;
private sessionId: string;
private sessionTTL?: number;
constructor(fields: UpstashRedisChatMessageHistoryInput) {
super(fields);
const { sessionId, sessionTTL, config, client } = fields;
if (client) {
this.client = client;
} else if (config) {
this.client = new Redis(config);
} else {
throw new Error(
`Upstash Redis message stores require either a config object or a pre-configured client.`
);
}
this.sessionId = sessionId;
this.sessionTTL = sessionTTL;
}
/**
* Retrieves the chat messages from the Redis database.
* @returns An array of BaseMessage instances representing the chat history.
*/
async getMessages(): Promise<BaseMessage[]> {
const rawStoredMessages: StoredMessage[] =
await this.client.lrange<StoredMessage>(this.sessionId, 0, -1);
const orderedMessages = rawStoredMessages.reverse();
const previousMessages = orderedMessages.filter(
(x): x is StoredMessage =>
x.type !== undefined && x.data.content !== undefined
);
return mapStoredMessagesToChatMessages(previousMessages);
}
/**
* Adds a new message to the chat history in the Redis database.
* @param message The message to be added to the chat history.
* @returns Promise resolving to void.
*/
async addMessage(message: BaseMessage): Promise<void> {
const messageToAdd = mapChatMessagesToStoredMessages([message]);
await this.client.lpush(this.sessionId, JSON.stringify(messageToAdd[0]));
if (this.sessionTTL) {
await this.client.expire(this.sessionId, this.sessionTTL);
}
}
/**
* Deletes all messages from the chat history in the Redis database.
* @returns Promise resolving to void.
*/
async clear(): Promise<void> {
await this.client.del(this.sessionId);
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/message/ioredis.ts
|
import { Redis, RedisOptions } from "ioredis";
import { BaseListChatMessageHistory } from "@langchain/core/chat_history";
import {
BaseMessage,
mapChatMessagesToStoredMessages,
mapStoredMessagesToChatMessages,
} from "@langchain/core/messages";
/**
* Type for the input parameter of the RedisChatMessageHistory
* constructor. It includes fields for the session ID, session TTL, Redis
* URL, Redis configuration, and Redis client.
*/
export type RedisChatMessageHistoryInput = {
sessionId: string;
sessionTTL?: number;
url?: string;
config?: RedisOptions;
client?: Redis;
};
/**
* Class used to store chat message history in Redis. It provides methods
* to add, retrieve, and clear messages from the chat history.
* @example
* ```typescript
* const chatHistory = new RedisChatMessageHistory({
* sessionId: new Date().toISOString(),
* sessionTTL: 300,
* url: "redis:
* });
*
* const chain = new ConversationChain({
* llm: new ChatOpenAI({ temperature: 0 }),
* memory: { chatHistory },
* });
*
* const response = await chain.invoke({
* input: "What did I just say my name was?",
* });
* console.log({ response });
* ```
*/
export class RedisChatMessageHistory extends BaseListChatMessageHistory {
lc_namespace = ["langchain", "stores", "message", "ioredis"];
get lc_secrets() {
return {
url: "REDIS_URL",
"config.username": "REDIS_USERNAME",
"config.password": "REDIS_PASSWORD",
};
}
public client: Redis;
private sessionId: string;
private sessionTTL?: number;
constructor(fields: RedisChatMessageHistoryInput) {
super(fields);
const { sessionId, sessionTTL, url, config, client } = fields;
this.client = (client ??
(url ? new Redis(url) : new Redis(config ?? {}))) as Redis;
this.sessionId = sessionId;
this.sessionTTL = sessionTTL;
}
/**
* Retrieves all messages from the chat history.
* @returns Promise that resolves with an array of BaseMessage instances.
*/
async getMessages(): Promise<BaseMessage[]> {
const rawStoredMessages = await this.client.lrange(this.sessionId, 0, -1);
const orderedMessages = rawStoredMessages
.reverse()
.map((message) => JSON.parse(message));
return mapStoredMessagesToChatMessages(orderedMessages);
}
/**
* Adds a message to the chat history.
* @param message The message to add to the chat history.
* @returns Promise that resolves when the message has been added.
*/
async addMessage(message: BaseMessage): Promise<void> {
const messageToAdd = mapChatMessagesToStoredMessages([message]);
await this.client.lpush(this.sessionId, JSON.stringify(messageToAdd[0]));
if (this.sessionTTL) {
await this.client.expire(this.sessionId, this.sessionTTL);
}
}
/**
* Clears all messages from the chat history.
* @returns Promise that resolves when the chat history has been cleared.
*/
async clear(): Promise<void> {
await this.client.del(this.sessionId);
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores
|
lc_public_repos/langchainjs/libs/langchain-community/src/stores/message/redis.ts
|
import {
createClient,
RedisClientOptions,
RedisClientType,
RedisModules,
RedisFunctions,
RedisScripts,
} from "redis";
import { BaseListChatMessageHistory } from "@langchain/core/chat_history";
import {
BaseMessage,
mapChatMessagesToStoredMessages,
mapStoredMessagesToChatMessages,
} from "@langchain/core/messages";
/**
* @deprecated Install and import from the "@langchain/redis" integration package instead.
* Type for the input to the `RedisChatMessageHistory` constructor.
*/
export type RedisChatMessageHistoryInput = {
sessionId: string;
sessionTTL?: number;
config?: RedisClientOptions;
// Typing issues with createClient output: https://github.com/redis/node-redis/issues/1865
// eslint-disable-next-line @typescript-eslint/no-explicit-any
client?: any;
};
/**
* @deprecated Install and import from the "@langchain/redis" integration package instead.
* Class for storing chat message history using Redis. Extends the
* `BaseListChatMessageHistory` class.
* @example
* ```typescript
* const chatHistory = new RedisChatMessageHistory({
* sessionId: new Date().toISOString(),
* sessionTTL: 300,
* url: "redis:
* });
*
* const chain = new ConversationChain({
* llm: new ChatOpenAI({ modelName: "gpt-3.5-turbo", temperature: 0 }),
* memory: { chatHistory },
* });
*
* const response = await chain.invoke({
* input: "What did I just say my name was?",
* });
* console.log({ response });
* ```
*/
export class RedisChatMessageHistory extends BaseListChatMessageHistory {
lc_namespace = ["langchain", "stores", "message", "redis"];
get lc_secrets() {
return {
"config.url": "REDIS_URL",
"config.username": "REDIS_USERNAME",
"config.password": "REDIS_PASSWORD",
};
}
public client: RedisClientType<RedisModules, RedisFunctions, RedisScripts>;
private sessionId: string;
private sessionTTL?: number;
constructor(fields: RedisChatMessageHistoryInput) {
super(fields);
const { sessionId, sessionTTL, config, client } = fields;
this.client = (client ?? createClient(config ?? {})) as RedisClientType<
RedisModules,
RedisFunctions,
RedisScripts
>;
this.sessionId = sessionId;
this.sessionTTL = sessionTTL;
}
/**
* Ensures the Redis client is ready to perform operations. If the client
* is not ready, it attempts to connect to the Redis database.
* @returns Promise resolving to true when the client is ready.
*/
async ensureReadiness() {
if (!this.client.isReady) {
await this.client.connect();
}
return true;
}
/**
* Retrieves all chat messages from the Redis database for the current
* session.
* @returns Promise resolving to an array of `BaseMessage` instances.
*/
async getMessages(): Promise<BaseMessage[]> {
await this.ensureReadiness();
const rawStoredMessages = await this.client.lRange(this.sessionId, 0, -1);
const orderedMessages = rawStoredMessages
.reverse()
.map((message) => JSON.parse(message));
return mapStoredMessagesToChatMessages(orderedMessages);
}
/**
* Adds a new chat message to the Redis database for the current session.
* @param message The `BaseMessage` instance to add.
* @returns Promise resolving when the message has been added.
*/
async addMessage(message: BaseMessage): Promise<void> {
await this.ensureReadiness();
const messageToAdd = mapChatMessagesToStoredMessages([message]);
await this.client.lPush(this.sessionId, JSON.stringify(messageToAdd[0]));
if (this.sessionTTL) {
await this.client.expire(this.sessionId, this.sessionTTL);
}
}
/**
* Deletes all chat messages from the Redis database for the current
* session.
* @returns Promise resolving when the messages have been deleted.
*/
async clear(): Promise<void> {
await this.ensureReadiness();
await this.client.del(this.sessionId);
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/caches/momento.ts
|
/* eslint-disable no-instanceof/no-instanceof */
import {
ICacheClient,
CacheGet,
CacheSet,
InvalidArgumentError,
} from "@gomomento/sdk-core";
import {
BaseCache,
deserializeStoredGeneration,
getCacheKey,
serializeGeneration,
} from "@langchain/core/caches";
import { Generation } from "@langchain/core/outputs";
import { ensureCacheExists } from "../utils/momento.js";
/**
* The settings to instantiate the Momento standard cache.
*/
export interface MomentoCacheProps {
/**
* The Momento cache client.
*/
client: ICacheClient;
/**
* The name of the cache to use to store the data.
*/
cacheName: string;
/**
* The time to live for the cache items. If not specified,
* the cache client default is used.
*/
ttlSeconds?: number;
/**
* If true, ensure that the cache exists before returning.
* If false, the cache is not checked for existence.
* Defaults to true.
*/
ensureCacheExists?: true;
}
/**
* A cache that uses Momento as the backing store.
* See https://gomomento.com.
* @example
* ```typescript
* const cache = new MomentoCache({
* client: new CacheClient({
* configuration: Configurations.Laptop.v1(),
* credentialProvider: CredentialProvider.fromEnvironmentVariable({
* environmentVariableName: "MOMENTO_API_KEY",
* }),
* defaultTtlSeconds: 60 * 60 * 24, // Cache TTL set to 24 hours.
* }),
* cacheName: "langchain",
* });
* // Initialize the OpenAI model with Momento cache for caching responses
* const model = new ChatOpenAI({
* cache,
* });
* await model.invoke("How are you today?");
* const cachedValues = await cache.lookup("How are you today?", "llmKey");
* ```
*/
export class MomentoCache extends BaseCache {
private client: ICacheClient;
private readonly cacheName: string;
private readonly ttlSeconds?: number;
private constructor(props: MomentoCacheProps) {
super();
this.client = props.client;
this.cacheName = props.cacheName;
this.validateTtlSeconds(props.ttlSeconds);
this.ttlSeconds = props.ttlSeconds;
}
/**
* Create a new standard cache backed by Momento.
*
* @param {MomentoCacheProps} props The settings to instantiate the cache.
* @param {ICacheClient} props.client The Momento cache client.
* @param {string} props.cacheName The name of the cache to use to store the data.
* @param {number} props.ttlSeconds The time to live for the cache items. If not specified,
* the cache client default is used.
* @param {boolean} props.ensureCacheExists If true, ensure that the cache exists before returning.
* If false, the cache is not checked for existence. Defaults to true.
* @throws {@link InvalidArgumentError} if {@link props.ttlSeconds} is not strictly positive.
* @returns The Momento-backed cache.
*/
public static async fromProps(
props: MomentoCacheProps
): Promise<MomentoCache> {
const instance = new MomentoCache(props);
if (props.ensureCacheExists || props.ensureCacheExists === undefined) {
await ensureCacheExists(props.client, props.cacheName);
}
return instance;
}
/**
* Validate the user-specified TTL, if provided, is strictly positive.
* @param ttlSeconds The TTL to validate.
*/
private validateTtlSeconds(ttlSeconds?: number): void {
if (ttlSeconds !== undefined && ttlSeconds <= 0) {
throw new InvalidArgumentError("ttlSeconds must be positive.");
}
}
/**
* Lookup LLM generations in cache by prompt and associated LLM key.
* @param prompt The prompt to lookup.
* @param llmKey The LLM key to lookup.
* @returns The generations associated with the prompt and LLM key, or null if not found.
*/
public async lookup(
prompt: string,
llmKey: string
): Promise<Generation[] | null> {
const key = getCacheKey(prompt, llmKey);
const getResponse = await this.client.get(this.cacheName, key);
if (getResponse instanceof CacheGet.Hit) {
const value = getResponse.valueString();
const parsedValue = JSON.parse(value);
if (!Array.isArray(parsedValue)) {
return null;
}
return JSON.parse(value).map(deserializeStoredGeneration);
} else if (getResponse instanceof CacheGet.Miss) {
return null;
} else if (getResponse instanceof CacheGet.Error) {
throw getResponse.innerException();
} else {
throw new Error(`Unknown response type: ${getResponse.toString()}`);
}
}
/**
* Update the cache with the given generations.
*
* Note this overwrites any existing generations for the given prompt and LLM key.
*
* @param prompt The prompt to update.
* @param llmKey The LLM key to update.
* @param value The generations to store.
*/
public async update(
prompt: string,
llmKey: string,
value: Generation[]
): Promise<void> {
const key = getCacheKey(prompt, llmKey);
const setResponse = await this.client.set(
this.cacheName,
key,
JSON.stringify(value.map(serializeGeneration)),
{ ttl: this.ttlSeconds }
);
if (setResponse instanceof CacheSet.Success) {
// pass
} else if (setResponse instanceof CacheSet.Error) {
throw setResponse.innerException();
} else {
throw new Error(`Unknown response type: ${setResponse.toString()}`);
}
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/caches/cloudflare_kv.ts
|
import type { KVNamespace } from "@cloudflare/workers-types";
import {
BaseCache,
getCacheKey,
serializeGeneration,
deserializeStoredGeneration,
} from "@langchain/core/caches";
import { Generation } from "@langchain/core/outputs";
/**
* @deprecated Install and import from "@langchain/cloudflare" instead.
*
* Represents a specific implementation of a caching mechanism using Cloudflare KV
* as the underlying storage system. It extends the `BaseCache` class and
* overrides its methods to provide the Cloudflare KV-specific logic.
* @example
* ```typescript
* // Example of using OpenAI with Cloudflare KV as cache in a Cloudflare Worker
* const cache = new CloudflareKVCache(env.KV_NAMESPACE);
* const model = new ChatAnthropic({
* cache,
* });
* const response = await model.invoke("How are you today?");
* return new Response(JSON.stringify(response), {
* headers: { "content-type": "application/json" },
* });
*
* ```
*/
export class CloudflareKVCache extends BaseCache {
private binding: KVNamespace;
constructor(binding: KVNamespace) {
super();
this.binding = binding;
}
/**
* Retrieves data from the cache. It constructs a cache key from the given
* `prompt` and `llmKey`, and retrieves the corresponding value from the
* Cloudflare KV namespace.
* @param prompt The prompt used to construct the cache key.
* @param llmKey The LLM key used to construct the cache key.
* @returns An array of Generations if found, null otherwise.
*/
public async lookup(prompt: string, llmKey: string) {
let idx = 0;
let key = getCacheKey(prompt, llmKey, String(idx));
let value = await this.binding.get(key);
const generations: Generation[] = [];
while (value) {
generations.push(deserializeStoredGeneration(JSON.parse(value)));
idx += 1;
key = getCacheKey(prompt, llmKey, String(idx));
value = await this.binding.get(key);
}
return generations.length > 0 ? generations : null;
}
/**
* Updates the cache with new data. It constructs a cache key from the
* given `prompt` and `llmKey`, and stores the `value` in the Cloudflare KV
* namespace.
* @param prompt The prompt used to construct the cache key.
* @param llmKey The LLM key used to construct the cache key.
* @param value The value to be stored in the cache.
*/
public async update(prompt: string, llmKey: string, value: Generation[]) {
for (let i = 0; i < value.length; i += 1) {
const key = getCacheKey(prompt, llmKey, String(i));
await this.binding.put(
key,
JSON.stringify(serializeGeneration(value[i]))
);
}
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/caches/upstash_redis.ts
|
import { Redis, type RedisConfigNodejs } from "@upstash/redis";
import { Generation } from "@langchain/core/outputs";
import {
BaseCache,
deserializeStoredGeneration,
getCacheKey,
serializeGeneration,
} from "@langchain/core/caches";
import { StoredGeneration } from "@langchain/core/messages";
export type UpstashRedisCacheProps = {
/**
* The config to use to instantiate an Upstash Redis client.
*/
config?: RedisConfigNodejs;
/**
* An existing Upstash Redis client.
*/
client?: Redis;
};
/**
* A cache that uses Upstash as the backing store.
* See https://docs.upstash.com/redis.
* @example
* ```typescript
* const cache = new UpstashRedisCache({
* config: {
* url: "UPSTASH_REDIS_REST_URL",
* token: "UPSTASH_REDIS_REST_TOKEN",
* },
* });
* // Initialize the OpenAI model with Upstash Redis cache for caching responses
* const model = new ChatOpenAI({
* cache,
* });
* await model.invoke("How are you today?");
* const cachedValues = await cache.lookup("How are you today?", "llmKey");
* ```
*/
export class UpstashRedisCache extends BaseCache {
private redisClient: Redis;
constructor(props: UpstashRedisCacheProps) {
super();
const { config, client } = props;
if (client) {
this.redisClient = client;
} else if (config) {
this.redisClient = new Redis(config);
} else {
throw new Error(
`Upstash Redis caches require either a config object or a pre-configured client.`
);
}
}
/**
* Lookup LLM generations in cache by prompt and associated LLM key.
*/
public async lookup(prompt: string, llmKey: string) {
let idx = 0;
let key = getCacheKey(prompt, llmKey, String(idx));
let value = await this.redisClient.get<StoredGeneration | null>(key);
const generations: Generation[] = [];
while (value) {
generations.push(deserializeStoredGeneration(value));
idx += 1;
key = getCacheKey(prompt, llmKey, String(idx));
value = await this.redisClient.get<StoredGeneration | null>(key);
}
return generations.length > 0 ? generations : null;
}
/**
* Update the cache with the given generations.
*
* Note this overwrites any existing generations for the given prompt and LLM key.
*/
public async update(prompt: string, llmKey: string, value: Generation[]) {
for (let i = 0; i < value.length; i += 1) {
const key = getCacheKey(prompt, llmKey, String(i));
await this.redisClient.set(
key,
JSON.stringify(serializeGeneration(value[i]))
);
}
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/caches/ioredis.ts
|
import { Redis } from "ioredis";
import {
BaseCache,
getCacheKey,
serializeGeneration,
deserializeStoredGeneration,
} from "@langchain/core/caches";
import { Generation } from "@langchain/core/outputs";
/**
* Cache LLM results using Redis.
* @example
* ```typescript
* const model = new ChatOpenAI({
* cache: new RedisCache(new Redis(), { ttl: 60 }),
* });
*
* // Invoke the model with a prompt
* const response = await model.invoke("Do something random!");
* console.log(response);
*
* // Remember to disconnect the Redis client when done
* await redisClient.disconnect();
* ```
*/
export class RedisCache extends BaseCache {
protected redisClient: Redis;
protected ttl?: number;
constructor(
redisClient: Redis,
config?: {
ttl?: number;
}
) {
super();
this.redisClient = redisClient;
this.ttl = config?.ttl;
}
/**
* Retrieves data from the Redis server using a prompt and an LLM key. If
* the data is not found, it returns null.
* @param prompt The prompt used to find the data.
* @param llmKey The LLM key used to find the data.
* @returns The corresponding data as an array of Generation objects, or null if not found.
*/
public async lookup(prompt: string, llmKey: string) {
let idx = 0;
let key = getCacheKey(prompt, llmKey, String(idx));
let value = await this.redisClient.get(key);
const generations: Generation[] = [];
while (value) {
const storedGeneration = JSON.parse(value);
generations.push(deserializeStoredGeneration(storedGeneration));
idx += 1;
key = getCacheKey(prompt, llmKey, String(idx));
value = await this.redisClient.get(key);
}
return generations.length > 0 ? generations : null;
}
/**
* Updates the data in the Redis server using a prompt and an LLM key.
* @param prompt The prompt used to store the data.
* @param llmKey The LLM key used to store the data.
* @param value The data to be stored, represented as an array of Generation objects.
*/
public async update(prompt: string, llmKey: string, value: Generation[]) {
for (let i = 0; i < value.length; i += 1) {
const key = getCacheKey(prompt, llmKey, String(i));
if (this.ttl !== undefined) {
await this.redisClient.set(
key,
JSON.stringify(serializeGeneration(value[i])),
"EX",
this.ttl
);
} else {
await this.redisClient.set(
key,
JSON.stringify(serializeGeneration(value[i]))
);
}
}
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/caches
|
lc_public_repos/langchainjs/libs/langchain-community/src/caches/tests/upstash_redis.int.test.ts
|
/* eslint-disable no-process-env */
import { ChatOpenAI } from "@langchain/openai";
import { UpstashRedisCache } from "../upstash_redis.js";
/**
* This test is a result of the `lookup` method trying to parse an
* incorrectly typed value Before it was being typed as a string,
* whereas in reality it was a JSON object.
*/
test.skip("UpstashRedisCache does not parse non string cached values", async () => {
if (
!process.env.UPSTASH_REDIS_REST_URL ||
!process.env.UPSTASH_REDIS_REST_TOKEN ||
!process.env.OPENAI_API_KEY
) {
throw new Error(
"Missing Upstash Redis REST URL // REST TOKEN or OpenAI API key"
);
}
const upstashRedisCache = new UpstashRedisCache({
config: {
url: process.env.UPSTASH_REDIS_REST_URL,
token: process.env.UPSTASH_REDIS_REST_TOKEN,
},
});
const chat = new ChatOpenAI({
temperature: 0,
cache: upstashRedisCache,
maxTokens: 10,
});
const prompt = "is the sky blue";
const result1 = await chat.invoke(prompt);
const result2 = await chat.invoke(prompt);
expect(result1).toEqual(result2);
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/caches
|
lc_public_repos/langchainjs/libs/langchain-community/src/caches/tests/ioredis.test.ts
|
import { test, expect, jest } from "@jest/globals";
import { insecureHash } from "@langchain/core/utils/hash";
import { RedisCache } from "../ioredis.js";
const sha1 = (str: string) => insecureHash(str);
test("RedisCache", async () => {
const redis = {
get: jest.fn(async (key: string) => {
if (key === sha1("foo_bar_0")) {
return JSON.stringify({ text: "baz" });
}
return null;
}),
};
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const cache = new RedisCache(redis as any);
expect(await cache.lookup("foo", "bar")).toEqual([{ text: "baz" }]);
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/caches
|
lc_public_repos/langchainjs/libs/langchain-community/src/caches/tests/ioredis.int.test.ts
|
import { Redis } from "ioredis";
import { test, expect } from "@jest/globals";
import { OpenAI, ChatOpenAI } from "@langchain/openai";
import { RedisCache } from "../ioredis.js";
// eslint-disable-next-line @typescript-eslint/no-explicit-any
let client: any;
describe("Test RedisCache", () => {
beforeAll(async () => {
client = new Redis("redis://localhost:6379");
});
afterAll(async () => {
await client.disconnect();
});
test("RedisCache with an LLM", async () => {
const cache = new RedisCache(client, {
ttl: 60,
});
const model = new OpenAI({ cache });
const response1 = await model.invoke("What is something random?");
const response2 = await model.invoke("What is something random?");
expect(response1).toEqual(response2);
});
test("RedisCache with a chat model", async () => {
const cache = new RedisCache(client, {
ttl: 60,
});
const model = new ChatOpenAI({ cache });
const response1 = await model.invoke("What is something random?");
const response2 = await model.invoke("What is something random?");
expect(response1).not.toBeUndefined();
expect(JSON.stringify(response1)).toEqual(JSON.stringify(response2));
});
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/caches
|
lc_public_repos/langchainjs/libs/langchain-community/src/caches/tests/upstash_redis.test.ts
|
import { test, expect, jest } from "@jest/globals";
import { insecureHash } from "@langchain/core/utils/hash";
import { StoredGeneration } from "@langchain/core/messages";
import { UpstashRedisCache } from "../upstash_redis.js";
const sha1 = (str: string) => insecureHash(str);
test("UpstashRedisCache", async () => {
const redis = {
get: jest.fn(async (key: string): Promise<StoredGeneration | null> => {
if (key === sha1("foo_bar_0")) {
return { text: "baz" };
}
return null;
}),
};
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const cache = new UpstashRedisCache({ client: redis as any });
expect(await cache.lookup("foo", "bar")).toEqual([{ text: "baz" }]);
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/caches
|
lc_public_repos/langchainjs/libs/langchain-community/src/caches/tests/momento.test.ts
|
import { expect } from "@jest/globals";
import {
ICacheClient,
IMomentoCache,
CacheDelete,
CacheGet,
CacheIncrement,
CacheKeyExists,
CacheKeysExist,
CacheSet,
CacheSetIfNotExists,
CacheSetFetch,
CacheSetAddElements,
CacheSetAddElement,
CacheSetRemoveElements,
CacheSetRemoveElement,
CacheListFetch,
CacheListLength,
CacheListPushFront,
CacheListPushBack,
CacheListConcatenateBack,
CacheListConcatenateFront,
CacheListPopBack,
CacheListPopFront,
CacheListRemoveValue,
CacheListRetain,
CacheDictionarySetField,
CacheDictionarySetFields,
CacheDictionaryGetField,
CacheDictionaryGetFields,
CacheDictionaryFetch,
CacheDictionaryLength,
CacheDictionaryIncrement,
CacheDictionaryRemoveField,
CacheDictionaryRemoveFields,
CacheSortedSetFetch,
CacheSortedSetPutElement,
CacheSortedSetPutElements,
CacheSortedSetGetRank,
CacheSortedSetGetScore,
CacheSortedSetGetScores,
CacheSortedSetLength,
CacheSortedSetLengthByScore,
CacheSortedSetIncrementScore,
CacheSortedSetRemoveElement,
CacheItemGetType,
CacheItemGetTtl,
CreateCache,
ListCaches,
DeleteCache,
CacheFlush,
CacheUpdateTtl,
CacheIncreaseTtl,
CacheDecreaseTtl,
} from "@gomomento/sdk-core";
import { Generation } from "@langchain/core/outputs";
import { MomentoCache } from "../momento.js";
class MockClient implements ICacheClient {
private _cache: Map<string, string>;
constructor() {
this._cache = new Map();
}
cache(): IMomentoCache {
throw new Error("Method not implemented.");
}
public async get(_: string, key: string): Promise<CacheGet.Response> {
if (this._cache.has(key)) {
return new CacheGet.Hit(new TextEncoder().encode(this._cache.get(key)));
} else {
return new CacheGet.Miss();
}
}
public async set(
_: string,
key: string,
value: string
): Promise<CacheSet.Response> {
this._cache.set(key, value);
return new CacheSet.Success();
}
public async createCache(): Promise<CreateCache.Response> {
return new CreateCache.Success();
}
deleteCache(): Promise<DeleteCache.Response> {
throw new Error("Method not implemented.");
}
listCaches(): Promise<ListCaches.Response> {
throw new Error("Method not implemented.");
}
flushCache(): Promise<CacheFlush.Response> {
throw new Error("Method not implemented.");
}
ping(): Promise<void> {
throw new Error("Method not implemented.");
}
delete(): Promise<CacheDelete.Response> {
throw new Error("Method not implemented.");
}
increment(): Promise<CacheIncrement.Response> {
throw new Error("Method not implemented.");
}
keyExists(): Promise<CacheKeyExists.Response> {
throw new Error("Method not implemented.");
}
keysExist(): Promise<CacheKeysExist.Response> {
throw new Error("Method not implemented.");
}
setIfNotExists(): Promise<CacheSetIfNotExists.Response> {
throw new Error("Method not implemented.");
}
setFetch(): Promise<CacheSetFetch.Response> {
throw new Error("Method not implemented.");
}
setAddElement(): Promise<CacheSetAddElement.Response> {
throw new Error("Method not implemented.");
}
setAddElements(): Promise<CacheSetAddElements.Response> {
throw new Error("Method not implemented.");
}
setRemoveElement(): Promise<CacheSetRemoveElement.Response> {
throw new Error("Method not implemented.");
}
setRemoveElements(): Promise<CacheSetRemoveElements.Response> {
throw new Error("Method not implemented.");
}
listFetch(): Promise<CacheListFetch.Response> {
throw new Error("Method not implemented.");
}
listLength(): Promise<CacheListLength.Response> {
throw new Error("Method not implemented.");
}
listPushFront(): Promise<CacheListPushFront.Response> {
throw new Error("Method not implemented.");
}
listPushBack(): Promise<CacheListPushBack.Response> {
throw new Error("Method not implemented.");
}
listConcatenateBack(): Promise<CacheListConcatenateBack.Response> {
throw new Error("Method not implemented.");
}
listConcatenateFront(): Promise<CacheListConcatenateFront.Response> {
throw new Error("Method not implemented.");
}
listPopBack(): Promise<CacheListPopBack.Response> {
throw new Error("Method not implemented.");
}
listPopFront(): Promise<CacheListPopFront.Response> {
throw new Error("Method not implemented.");
}
listRemoveValue(): Promise<CacheListRemoveValue.Response> {
throw new Error("Method not implemented.");
}
listRetain(): Promise<CacheListRetain.Response> {
throw new Error("Method not implemented.");
}
dictionarySetField(): Promise<CacheDictionarySetField.Response> {
throw new Error("Method not implemented.");
}
dictionarySetFields(): Promise<CacheDictionarySetFields.Response> {
throw new Error("Method not implemented.");
}
dictionaryGetField(): Promise<CacheDictionaryGetField.Response> {
throw new Error("Method not implemented.");
}
dictionaryGetFields(): Promise<CacheDictionaryGetFields.Response> {
throw new Error("Method not implemented.");
}
dictionaryFetch(): Promise<CacheDictionaryFetch.Response> {
throw new Error("Method not implemented.");
}
dictionaryIncrement(): Promise<CacheDictionaryIncrement.Response> {
throw new Error("Method not implemented.");
}
dictionaryLength(): Promise<CacheDictionaryLength.Response> {
throw new Error("Method not implemented.");
}
dictionaryRemoveField(): Promise<CacheDictionaryRemoveField.Response> {
throw new Error("Method not implemented.");
}
dictionaryRemoveFields(): Promise<CacheDictionaryRemoveFields.Response> {
throw new Error("Method not implemented.");
}
sortedSetFetchByRank(): Promise<CacheSortedSetFetch.Response> {
throw new Error("Method not implemented.");
}
sortedSetFetchByScore(): Promise<CacheSortedSetFetch.Response> {
throw new Error("Method not implemented.");
}
sortedSetPutElement(): Promise<CacheSortedSetPutElement.Response> {
throw new Error("Method not implemented.");
}
sortedSetPutElements(): Promise<CacheSortedSetPutElements.Response> {
throw new Error("Method not implemented.");
}
sortedSetGetRank(): Promise<CacheSortedSetGetRank.Response> {
throw new Error("Method not implemented.");
}
sortedSetGetScore(): Promise<CacheSortedSetGetScore.Response> {
throw new Error("Method not implemented.");
}
sortedSetGetScores(): Promise<CacheSortedSetGetScores.Response> {
throw new Error("Method not implemented.");
}
sortedSetIncrementScore(): Promise<CacheSortedSetIncrementScore.Response> {
throw new Error("Method not implemented.");
}
sortedSetLength(): Promise<CacheSortedSetLength.Response> {
throw new Error("Method not implemented.");
}
sortedSetLengthByScore(): Promise<CacheSortedSetLengthByScore.Response> {
throw new Error("Method not implemented.");
}
sortedSetRemoveElement(): Promise<CacheSortedSetRemoveElement.Response> {
throw new Error("Method not implemented.");
}
sortedSetRemoveElements(): Promise<CacheSortedSetRemoveElement.Response> {
throw new Error("Method not implemented.");
}
itemGetType(): Promise<CacheItemGetType.Response> {
throw new Error("Method not implemented.");
}
itemGetTtl(): Promise<CacheItemGetTtl.Response> {
throw new Error("Method not implemented.");
}
updateTtl(): Promise<CacheUpdateTtl.Response> {
throw new Error("Method not implemented.");
}
increaseTtl(): Promise<CacheIncreaseTtl.Response> {
throw new Error("Method not implemented.");
}
decreaseTtl(): Promise<CacheDecreaseTtl.Response> {
throw new Error("Method not implemented.");
}
}
describe("MomentoCache", () => {
it("should return null on a cache miss", async () => {
const client = new MockClient();
const cache = await MomentoCache.fromProps({
client,
cacheName: "test-cache",
});
expect(await cache.lookup("prompt", "llm-key")).toBeNull();
});
it("should get a stored value", async () => {
const client = new MockClient();
const cache = await MomentoCache.fromProps({
client,
cacheName: "test-cache",
});
const generations: Generation[] = [{ text: "foo" }];
await cache.update("prompt", "llm-key", generations);
expect(await cache.lookup("prompt", "llm-key")).toStrictEqual(generations);
});
it("should work with multiple generations", async () => {
const client = new MockClient();
const cache = await MomentoCache.fromProps({
client,
cacheName: "test-cache",
});
const generations: Generation[] = [
{ text: "foo" },
{ text: "bar" },
{ text: "baz" },
];
await cache.update("prompt", "llm-key", generations);
expect(await cache.lookup("prompt", "llm-key")).toStrictEqual(generations);
});
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/replicate.ts
|
import { LLM, type BaseLLMParams } from "@langchain/core/language_models/llms";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import { GenerationChunk } from "@langchain/core/outputs";
import type ReplicateInstance from "replicate";
/**
* Interface defining the structure of the input data for the Replicate
* class. It includes details about the model to be used, any additional
* input parameters, and the API key for the Replicate service.
*/
export interface ReplicateInput {
// owner/model_name:version
model: `${string}/${string}:${string}`;
input?: {
// different models accept different inputs
[key: string]: string | number | boolean;
};
apiKey?: string;
/** The key used to pass prompts to the model. */
promptKey?: string;
}
/**
* Class responsible for managing the interaction with the Replicate API.
* It handles the API key and model details, makes the actual API calls,
* and converts the API response into a format usable by the rest of the
* LangChain framework.
* @example
* ```typescript
* const model = new Replicate({
* model: "replicate/flan-t5-xl:3ae0799123a1fe11f8c89fd99632f843fc5f7a761630160521c4253149754523",
* });
*
* const res = await model.invoke(
* "Question: What would be a good company name for a company that makes colorful socks?\nAnswer:"
* );
* console.log({ res });
* ```
*/
export class Replicate extends LLM implements ReplicateInput {
static lc_name() {
return "Replicate";
}
get lc_secrets(): { [key: string]: string } | undefined {
return {
apiKey: "REPLICATE_API_TOKEN",
};
}
lc_serializable = true;
model: ReplicateInput["model"];
input: ReplicateInput["input"];
apiKey: string;
promptKey?: string;
constructor(fields: ReplicateInput & BaseLLMParams) {
super(fields);
const apiKey =
fields?.apiKey ??
getEnvironmentVariable("REPLICATE_API_KEY") ?? // previous environment variable for backwards compatibility
getEnvironmentVariable("REPLICATE_API_TOKEN"); // current environment variable, matching the Python library
if (!apiKey) {
throw new Error(
"Please set the REPLICATE_API_TOKEN environment variable"
);
}
this.apiKey = apiKey;
this.model = fields.model;
this.input = fields.input ?? {};
this.promptKey = fields.promptKey;
}
_llmType() {
return "replicate";
}
/** @ignore */
async _call(
prompt: string,
options: this["ParsedCallOptions"]
): Promise<string> {
const replicate = await this._prepareReplicate();
const input = await this._getReplicateInput(replicate, prompt);
const output = await this.caller.callWithOptions(
{ signal: options.signal },
() =>
replicate.run(this.model, {
input,
})
);
if (typeof output === "string") {
return output;
} else if (Array.isArray(output)) {
return output.join("");
} else {
// Note this is a little odd, but the output format is not consistent
// across models, so it makes some amount of sense.
return String(output);
}
}
async *_streamResponseChunks(
prompt: string,
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<GenerationChunk> {
const replicate = await this._prepareReplicate();
const input = await this._getReplicateInput(replicate, prompt);
const stream = await this.caller.callWithOptions(
{ signal: options?.signal },
async () =>
replicate.stream(this.model, {
input,
})
);
for await (const chunk of stream) {
if (chunk.event === "output") {
yield new GenerationChunk({ text: chunk.data, generationInfo: chunk });
await runManager?.handleLLMNewToken(chunk.data ?? "");
}
// stream is done
if (chunk.event === "done")
yield new GenerationChunk({
text: "",
generationInfo: { finished: true },
});
}
}
/** @ignore */
static async imports(): Promise<{
Replicate: typeof ReplicateInstance;
}> {
try {
const { default: Replicate } = await import("replicate");
return { Replicate };
} catch (e) {
throw new Error(
"Please install replicate as a dependency with, e.g. `yarn add replicate`"
);
}
}
private async _prepareReplicate(): Promise<ReplicateInstance> {
const imports = await Replicate.imports();
return new imports.Replicate({
userAgent: "langchain",
auth: this.apiKey,
});
}
private async _getReplicateInput(
replicate: ReplicateInstance,
prompt: string
) {
if (this.promptKey === undefined) {
const [modelString, versionString] = this.model.split(":");
const version = await replicate.models.versions.get(
modelString.split("/")[0],
modelString.split("/")[1],
versionString
);
const openapiSchema = version.openapi_schema;
const inputProperties: { "x-order": number | undefined }[] =
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(openapiSchema as any)?.components?.schemas?.Input?.properties;
if (inputProperties === undefined) {
this.promptKey = "prompt";
} else {
const sortedInputProperties = Object.entries(inputProperties).sort(
([_keyA, valueA], [_keyB, valueB]) => {
const orderA = valueA["x-order"] || 0;
const orderB = valueB["x-order"] || 0;
return orderA - orderB;
}
);
this.promptKey = sortedInputProperties[0][0] ?? "prompt";
}
}
return {
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
[this.promptKey!]: prompt,
...this.input,
};
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/hf.ts
|
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import { LLM, type BaseLLMParams } from "@langchain/core/language_models/llms";
import { GenerationChunk } from "@langchain/core/outputs";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
/**
* Interface defining the parameters for configuring the Hugging Face
* model for text generation.
*/
export interface HFInput {
/** Model to use */
model: string;
/** Custom inference endpoint URL to use */
endpointUrl?: string;
/** Sampling temperature to use */
temperature?: number;
/**
* Maximum number of tokens to generate in the completion.
*/
maxTokens?: number;
/**
* The model will stop generating text when one of the strings in the list is generated.
*/
stopSequences?: string[];
/** Total probability mass of tokens to consider at each step */
topP?: number;
/** Integer to define the top tokens considered within the sample operation to create new text. */
topK?: number;
/** Penalizes repeated tokens according to frequency */
frequencyPenalty?: number;
/** API key to use. */
apiKey?: string;
/**
* Credentials to use for the request. If this is a string, it will be passed straight on. If it's a boolean, true will be "include" and false will not send credentials at all.
*/
includeCredentials?: string | boolean;
}
/**
* Class implementing the Large Language Model (LLM) interface using the
* Hugging Face Inference API for text generation.
* @example
* ```typescript
* const model = new HuggingFaceInference({
* model: "gpt2",
* temperature: 0.7,
* maxTokens: 50,
* });
*
* const res = await model.invoke(
* "Question: What would be a good company name for a company that makes colorful socks?\nAnswer:"
* );
* console.log({ res });
* ```
*/
export class HuggingFaceInference extends LLM implements HFInput {
lc_serializable = true;
get lc_secrets(): { [key: string]: string } | undefined {
return {
apiKey: "HUGGINGFACEHUB_API_KEY",
};
}
model = "gpt2";
temperature: number | undefined = undefined;
maxTokens: number | undefined = undefined;
stopSequences: string[] | undefined = undefined;
topP: number | undefined = undefined;
topK: number | undefined = undefined;
frequencyPenalty: number | undefined = undefined;
apiKey: string | undefined = undefined;
endpointUrl: string | undefined = undefined;
includeCredentials: string | boolean | undefined = undefined;
constructor(fields?: Partial<HFInput> & BaseLLMParams) {
super(fields ?? {});
this.model = fields?.model ?? this.model;
this.temperature = fields?.temperature ?? this.temperature;
this.maxTokens = fields?.maxTokens ?? this.maxTokens;
this.stopSequences = fields?.stopSequences ?? this.stopSequences;
this.topP = fields?.topP ?? this.topP;
this.topK = fields?.topK ?? this.topK;
this.frequencyPenalty = fields?.frequencyPenalty ?? this.frequencyPenalty;
this.apiKey =
fields?.apiKey ?? getEnvironmentVariable("HUGGINGFACEHUB_API_KEY");
this.endpointUrl = fields?.endpointUrl;
this.includeCredentials = fields?.includeCredentials;
if (!this.apiKey) {
throw new Error(
`Please set an API key for HuggingFace Hub in the environment variable "HUGGINGFACEHUB_API_KEY" or in the apiKey field of the HuggingFaceInference constructor.`
);
}
}
_llmType() {
return "hf";
}
invocationParams(options?: this["ParsedCallOptions"]) {
return {
model: this.model,
parameters: {
// make it behave similar to openai, returning only the generated text
return_full_text: false,
temperature: this.temperature,
max_new_tokens: this.maxTokens,
stop: options?.stop ?? this.stopSequences,
top_p: this.topP,
top_k: this.topK,
repetition_penalty: this.frequencyPenalty,
},
};
}
async *_streamResponseChunks(
prompt: string,
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<GenerationChunk> {
const hfi = await this._prepareHFInference();
const stream = await this.caller.call(async () =>
hfi.textGenerationStream({
...this.invocationParams(options),
inputs: prompt,
})
);
for await (const chunk of stream) {
const token = chunk.token.text;
yield new GenerationChunk({ text: token, generationInfo: chunk });
await runManager?.handleLLMNewToken(token ?? "");
// stream is done
if (chunk.generated_text)
yield new GenerationChunk({
text: "",
generationInfo: { finished: true },
});
}
}
/** @ignore */
async _call(
prompt: string,
options: this["ParsedCallOptions"]
): Promise<string> {
const hfi = await this._prepareHFInference();
const args = { ...this.invocationParams(options), inputs: prompt };
const res = await this.caller.callWithOptions(
{ signal: options.signal },
hfi.textGeneration.bind(hfi),
args
);
return res.generated_text;
}
/** @ignore */
private async _prepareHFInference() {
const { HfInference } = await HuggingFaceInference.imports();
const hfi = new HfInference(this.apiKey, {
includeCredentials: this.includeCredentials,
});
return this.endpointUrl ? hfi.endpoint(this.endpointUrl) : hfi;
}
/** @ignore */
static async imports(): Promise<{
HfInference: typeof import("@huggingface/inference").HfInference;
}> {
try {
const { HfInference } = await import("@huggingface/inference");
return { HfInference };
} catch (e) {
throw new Error(
"Please install huggingface as a dependency with, e.g. `yarn add @huggingface/inference`"
);
}
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/llama_cpp.ts
|
/* eslint-disable import/no-extraneous-dependencies */
import {
LlamaModel,
LlamaContext,
LlamaChatSession,
LlamaJsonSchemaGrammar,
LlamaGrammar,
getLlama,
GbnfJsonSchema,
} from "node-llama-cpp";
import {
LLM,
type BaseLLMCallOptions,
type BaseLLMParams,
} from "@langchain/core/language_models/llms";
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import { GenerationChunk } from "@langchain/core/outputs";
import {
LlamaBaseCppInputs,
createLlamaModel,
createLlamaContext,
createLlamaSession,
createLlamaJsonSchemaGrammar,
createCustomGrammar,
} from "../utils/llama_cpp.js";
/**
* Note that the modelPath is the only required parameter. For testing you
* can set this in the environment variable `LLAMA_PATH`.
*/
export interface LlamaCppInputs extends LlamaBaseCppInputs, BaseLLMParams {}
export interface LlamaCppCallOptions extends BaseLLMCallOptions {
/** The maximum number of tokens the response should contain. */
maxTokens?: number;
/** A function called when matching the provided token array */
onToken?: (tokens: number[]) => void;
}
/**
* To use this model you need to have the `node-llama-cpp` module installed.
* This can be installed using `npm install -S node-llama-cpp` and the minimum
* version supported in version 2.0.0.
* This also requires that have a locally built version of Llama2 installed.
*/
export class LlamaCpp extends LLM<LlamaCppCallOptions> {
lc_serializable = true;
static inputs: LlamaCppInputs;
maxTokens?: number;
temperature?: number;
topK?: number;
topP?: number;
trimWhitespaceSuffix?: boolean;
_model: LlamaModel;
_context: LlamaContext;
_session: LlamaChatSession;
_jsonSchema: LlamaJsonSchemaGrammar<GbnfJsonSchema> | undefined;
_gbnf: LlamaGrammar | undefined;
static lc_name() {
return "LlamaCpp";
}
public constructor(inputs: LlamaCppInputs) {
super(inputs);
this.maxTokens = inputs?.maxTokens;
this.temperature = inputs?.temperature;
this.topK = inputs?.topK;
this.topP = inputs?.topP;
this.trimWhitespaceSuffix = inputs?.trimWhitespaceSuffix;
}
/**
* Initializes the llama_cpp model for usage.
* @param inputs - the inputs passed onto the model.
* @returns A Promise that resolves to the LlamaCpp type class.
*/
public static async initialize(inputs: LlamaCppInputs): Promise<LlamaCpp> {
const instance = new LlamaCpp(inputs);
const llama = await getLlama();
instance._model = await createLlamaModel(inputs, llama);
instance._context = await createLlamaContext(instance._model, inputs);
instance._jsonSchema = await createLlamaJsonSchemaGrammar(
inputs?.jsonSchema,
llama
);
instance._gbnf = await createCustomGrammar(inputs?.gbnf, llama);
instance._session = createLlamaSession(instance._context);
return instance;
}
_llmType() {
return "llama_cpp";
}
/** @ignore */
async _call(
prompt: string,
options?: this["ParsedCallOptions"]
): Promise<string> {
try {
let promptGrammer;
if (this._jsonSchema !== undefined) {
promptGrammer = this._jsonSchema;
} else if (this._gbnf !== undefined) {
promptGrammer = this._gbnf;
} else {
promptGrammer = undefined;
}
const promptOptions = {
grammar: promptGrammer,
onToken: options?.onToken,
maxTokens: this?.maxTokens,
temperature: this?.temperature,
topK: this?.topK,
topP: this?.topP,
trimWhitespaceSuffix: this?.trimWhitespaceSuffix,
};
const completion = await this._session.prompt(prompt, promptOptions);
if (this._jsonSchema !== undefined && completion !== undefined) {
return this._jsonSchema.parse(completion) as unknown as string;
}
return completion;
} catch (e) {
throw new Error("Error getting prompt completion.");
}
}
async *_streamResponseChunks(
prompt: string,
_options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<GenerationChunk> {
const promptOptions = {
temperature: this?.temperature,
maxTokens: this?.maxTokens,
topK: this?.topK,
topP: this?.topP,
};
if (this._context.sequencesLeft === 0) {
this._context = await createLlamaContext(this._model, LlamaCpp.inputs);
}
const sequence = this._context.getSequence();
const tokens = this._model.tokenize(prompt);
const stream = await this.caller.call(async () =>
sequence.evaluate(tokens, promptOptions)
);
for await (const chunk of stream) {
yield new GenerationChunk({
text: this._model.detokenize([chunk]),
generationInfo: {},
});
await runManager?.handleLLMNewToken(
this._model.detokenize([chunk]) ?? ""
);
}
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/raycast.ts
|
import { AI, environment } from "@raycast/api";
import { LLM, type BaseLLMParams } from "@langchain/core/language_models/llms";
/**
* The input parameters for the RaycastAI class, which extends the BaseLLMParams interface.
*/
export interface RaycastAIInput extends BaseLLMParams {
model?: AI.Model;
creativity?: number;
rateLimitPerMinute?: number;
}
const wait = (ms: number) =>
new Promise((resolve) => {
setTimeout(resolve, ms);
});
/**
* The RaycastAI class, which extends the LLM class and implements the RaycastAIInput interface.
*/
export class RaycastAI extends LLM implements RaycastAIInput {
lc_serializable = true;
/**
* The model to use for generating text.
*/
model: AI.Model;
/**
* The creativity parameter, also known as the "temperature".
*/
creativity: number;
/**
* The rate limit for API calls, in requests per minute.
*/
rateLimitPerMinute: number;
/**
* The timestamp of the last API call, used to enforce the rate limit.
*/
private lastCallTimestamp = 0;
/**
* Creates a new instance of the RaycastAI class.
* @param {RaycastAIInput} fields The input parameters for the RaycastAI class.
* @throws {Error} If the Raycast AI environment is not accessible.
*/
constructor(fields: RaycastAIInput) {
super(fields ?? {});
if (!environment.canAccess(AI)) {
throw new Error("Raycast AI environment is not accessible.");
}
if (fields.model === undefined) {
throw new Error(`You must provide a "model" field in your params.`);
}
this.model = fields.model;
this.creativity = fields.creativity ?? 0.5;
this.rateLimitPerMinute = fields.rateLimitPerMinute ?? 10;
}
/**
* Returns the type of the LLM, which is "raycast_ai".
* @return {string} The type of the LLM.
* @ignore
*/
_llmType() {
return "raycast_ai";
}
/**
* Calls AI.ask with the given prompt and returns the generated text.
* @param {string} prompt The prompt to generate text from.
* @return {Promise<string>} A Promise that resolves to the generated text.
* @ignore
*/
async _call(
prompt: string,
options: this["ParsedCallOptions"]
): Promise<string> {
const response = await this.caller.call(async () => {
// Rate limit calls to Raycast AI
const now = Date.now();
const timeSinceLastCall = now - this.lastCallTimestamp;
const timeToWait =
(60 / this.rateLimitPerMinute) * 1000 - timeSinceLastCall;
if (timeToWait > 0) {
await wait(timeToWait);
}
return await AI.ask(prompt, {
model: this.model,
creativity: this.creativity,
signal: options.signal,
});
});
// Since Raycast AI returns the response directly, no need for output transformation
return response;
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/yandex.ts
|
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { LLM, type BaseLLMParams } from "@langchain/core/language_models/llms";
const apiUrl = "https://llm.api.cloud.yandex.net/llm/v1alpha/instruct";
/** @deprecated Prefer @langchain/yandex */
export interface YandexGPTInputs extends BaseLLMParams {
/**
* What sampling temperature to use.
* Should be a double number between 0 (inclusive) and 1 (inclusive).
*/
temperature?: number;
/**
* Maximum limit on the total number of tokens
* used for both the input prompt and the generated response.
*/
maxTokens?: number;
/** Model name to use. */
model?: string;
/**
* Yandex Cloud Api Key for service account
* with the `ai.languageModels.user` role.
*/
apiKey?: string;
/**
* Yandex Cloud IAM token for service account
* with the `ai.languageModels.user` role.
*/
iamToken?: string;
}
/** @deprecated Prefer @langchain/yandex */
export class YandexGPT extends LLM implements YandexGPTInputs {
lc_serializable = true;
static lc_name() {
return "Yandex GPT";
}
get lc_secrets(): { [key: string]: string } | undefined {
return {
apiKey: "YC_API_KEY",
iamToken: "YC_IAM_TOKEN",
};
}
temperature = 0.6;
maxTokens = 1700;
model = "general";
apiKey?: string;
iamToken?: string;
constructor(fields?: YandexGPTInputs) {
super(fields ?? {});
const apiKey = fields?.apiKey ?? getEnvironmentVariable("YC_API_KEY");
const iamToken = fields?.iamToken ?? getEnvironmentVariable("YC_IAM_TOKEN");
if (apiKey === undefined && iamToken === undefined) {
throw new Error(
"Please set the YC_API_KEY or YC_IAM_TOKEN environment variable or pass it to the constructor as the apiKey or iamToken field."
);
}
this.apiKey = apiKey;
this.iamToken = iamToken;
this.maxTokens = fields?.maxTokens ?? this.maxTokens;
this.temperature = fields?.temperature ?? this.temperature;
this.model = fields?.model ?? this.model;
}
_llmType() {
return "yandexgpt";
}
/** @ignore */
async _call(
prompt: string,
options: this["ParsedCallOptions"]
): Promise<string> {
// Hit the `generate` endpoint on the `large` model
return this.caller.callWithOptions({ signal: options.signal }, async () => {
const headers = { "Content-Type": "application/json", Authorization: "" };
if (this.apiKey !== undefined) {
headers.Authorization = `Api-Key ${this.apiKey}`;
} else {
headers.Authorization = `Bearer ${this.iamToken}`;
}
const bodyData = {
model: this.model,
generationOptions: {
temperature: this.temperature,
maxTokens: this.maxTokens,
},
requestText: prompt,
};
try {
const response = await fetch(apiUrl, {
method: "POST",
headers,
body: JSON.stringify(bodyData),
});
if (!response.ok) {
throw new Error(
`Failed to fetch ${apiUrl} from YandexGPT: ${response.status}`
);
}
const responseData = await response.json();
return responseData.result.alternatives[0].text;
} catch (error) {
throw new Error(`Failed to fetch ${apiUrl} from YandexGPT ${error}`);
}
});
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/ai21.ts
|
import { LLM, type BaseLLMParams } from "@langchain/core/language_models/llms";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
/**
* Type definition for AI21 penalty data.
*/
export type AI21PenaltyData = {
scale: number;
applyToWhitespaces: boolean;
applyToPunctuations: boolean;
applyToNumbers: boolean;
applyToStopwords: boolean;
applyToEmojis: boolean;
};
/**
* Interface for AI21 input parameters.
*/
export interface AI21Input extends BaseLLMParams {
ai21ApiKey?: string;
model?: string;
temperature?: number;
minTokens?: number;
maxTokens?: number;
topP?: number;
presencePenalty?: AI21PenaltyData;
countPenalty?: AI21PenaltyData;
frequencyPenalty?: AI21PenaltyData;
numResults?: number;
logitBias?: Record<string, number>;
stop?: string[];
baseUrl?: string;
}
/**
* Class representing the AI21 language model. It extends the LLM (Large
* Language Model) class, providing a standard interface for interacting
* with the AI21 language model.
*/
export class AI21 extends LLM implements AI21Input {
lc_serializable = true;
model = "j2-jumbo-instruct";
temperature = 0.7;
maxTokens = 1024;
minTokens = 0;
topP = 1;
presencePenalty = AI21.getDefaultAI21PenaltyData();
countPenalty = AI21.getDefaultAI21PenaltyData();
frequencyPenalty = AI21.getDefaultAI21PenaltyData();
numResults = 1;
logitBias?: Record<string, number>;
ai21ApiKey?: string;
stop?: string[];
baseUrl?: string;
constructor(fields?: AI21Input) {
super(fields ?? {});
this.model = fields?.model ?? this.model;
this.temperature = fields?.temperature ?? this.temperature;
this.maxTokens = fields?.maxTokens ?? this.maxTokens;
this.minTokens = fields?.minTokens ?? this.minTokens;
this.topP = fields?.topP ?? this.topP;
this.presencePenalty = fields?.presencePenalty ?? this.presencePenalty;
this.countPenalty = fields?.countPenalty ?? this.countPenalty;
this.frequencyPenalty = fields?.frequencyPenalty ?? this.frequencyPenalty;
this.numResults = fields?.numResults ?? this.numResults;
this.logitBias = fields?.logitBias;
this.ai21ApiKey =
fields?.ai21ApiKey ?? getEnvironmentVariable("AI21_API_KEY");
this.stop = fields?.stop;
this.baseUrl = fields?.baseUrl;
}
/**
* Method to validate the environment. It checks if the AI21 API key is
* set. If not, it throws an error.
*/
validateEnvironment() {
if (!this.ai21ApiKey) {
throw new Error(
`No AI21 API key found. Please set it as "AI21_API_KEY" in your environment variables.`
);
}
}
/**
* Static method to get the default penalty data for AI21.
* @returns AI21PenaltyData
*/
static getDefaultAI21PenaltyData(): AI21PenaltyData {
return {
scale: 0,
applyToWhitespaces: true,
applyToPunctuations: true,
applyToNumbers: true,
applyToStopwords: true,
applyToEmojis: true,
};
}
/** Get the type of LLM. */
_llmType() {
return "ai21";
}
/** Get the default parameters for calling AI21 API. */
get defaultParams() {
return {
temperature: this.temperature,
maxTokens: this.maxTokens,
minTokens: this.minTokens,
topP: this.topP,
presencePenalty: this.presencePenalty,
countPenalty: this.countPenalty,
frequencyPenalty: this.frequencyPenalty,
numResults: this.numResults,
logitBias: this.logitBias,
};
}
/** Get the identifying parameters for this LLM. */
get identifyingParams() {
return { ...this.defaultParams, model: this.model };
}
/** Call out to AI21's complete endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
let response = ai21._call("Tell me a joke.");
*/
async _call(
prompt: string,
options: this["ParsedCallOptions"]
): Promise<string> {
let stop = options?.stop;
this.validateEnvironment();
if (this.stop && stop && this.stop.length > 0 && stop.length > 0) {
throw new Error("`stop` found in both the input and default params.");
}
stop = this.stop ?? stop ?? [];
const baseUrl =
this.baseUrl ?? this.model === "j1-grande-instruct"
? "https://api.ai21.com/studio/v1/experimental"
: "https://api.ai21.com/studio/v1";
const url = `${baseUrl}/${this.model}/complete`;
const headers = {
Authorization: `Bearer ${this.ai21ApiKey}`,
"Content-Type": "application/json",
};
const data = { prompt, stopSequences: stop, ...this.defaultParams };
const responseData = await this.caller.callWithOptions({}, async () => {
const response = await fetch(url, {
method: "POST",
headers,
body: JSON.stringify(data),
signal: options.signal,
});
if (!response.ok) {
const error = new Error(
`AI21 call failed with status code ${response.status}`
);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(error as any).response = response;
throw error;
}
return response.json();
});
if (
!responseData.completions ||
responseData.completions.length === 0 ||
!responseData.completions[0].data
) {
throw new Error("No completions found in response");
}
return responseData.completions[0].data.text ?? "";
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/arcjet.ts
|
import {
LLM,
BaseLLM,
type BaseLLMCallOptions,
} from "@langchain/core/language_models/llms";
import type { ArcjetSensitiveInfoType, RedactOptions } from "@arcjet/redact";
type DetectSensitiveInfoEntities<T> = (
tokens: string[]
) => Array<ArcjetSensitiveInfoType | T | undefined>;
type ValidEntities<Detect> = Array<
undefined extends Detect
? ArcjetSensitiveInfoType
: Detect extends DetectSensitiveInfoEntities<infer CustomEntities>
? ArcjetSensitiveInfoType | CustomEntities
: never
>;
export type { ArcjetSensitiveInfoType, RedactOptions };
export interface ArcjetRedactOptions<Detect> extends BaseLLMCallOptions {
llm: BaseLLM;
entities?: ValidEntities<Detect>;
contextWindowSize?: number;
detect?: Detect;
replace?: (entity: ValidEntities<Detect>[number]) => string | undefined;
}
export class ArcjetRedact<
Detect extends DetectSensitiveInfoEntities<CustomEntities> | undefined,
CustomEntities extends string
> extends LLM {
static lc_name() {
return "ArcjetRedact";
}
llm: BaseLLM;
entities?: ValidEntities<Detect>;
contextWindowSize?: number;
detect?: Detect;
replace?: (entity: ValidEntities<Detect>[number]) => string | undefined;
constructor(options: ArcjetRedactOptions<Detect>) {
super(options);
if (options.entities && options.entities.length === 0) {
throw new Error("no entities configured for redaction");
}
this.llm = options.llm;
this.entities = options.entities;
this.contextWindowSize = options.contextWindowSize;
this.detect = options.detect;
this.replace = options.replace;
}
_llmType() {
return "arcjet_redact";
}
async _call(input: string, options?: BaseLLMCallOptions): Promise<string> {
const ajOptions: RedactOptions<Detect> = {
entities: this.entities,
contextWindowSize: this.contextWindowSize,
detect: this.detect,
replace: this.replace,
};
const { redact } = await import("@arcjet/redact");
const [redacted, unredact] = await redact(input, ajOptions);
// Invoke the underlying LLM with the prompt and options
const result = await this.llm.invoke(redacted, options);
return unredact(result);
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/gradient_ai.ts
|
import { Gradient } from "@gradientai/nodejs-sdk";
import {
type BaseLLMCallOptions,
type BaseLLMParams,
LLM,
} from "@langchain/core/language_models/llms";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
/**
* The GradientLLMParams interface defines the input parameters for
* the GradientLLM class.
*/
export interface GradientLLMParams extends BaseLLMParams {
/**
* Gradient AI Access Token.
* Provide Access Token if you do not wish to automatically pull from env.
*/
gradientAccessKey?: string;
/**
* Gradient Workspace Id.
* Provide workspace id if you do not wish to automatically pull from env.
*/
workspaceId?: string;
/**
* Parameters accepted by the Gradient npm package.
*/
inferenceParameters?: Record<string, unknown>;
/**
* Gradient AI Model Slug.
*/
modelSlug?: string;
/**
* Gradient Adapter ID for custom fine tuned models.
*/
adapterId?: string;
}
/**
* The GradientLLM class is used to interact with Gradient AI inference Endpoint models.
* This requires your Gradient AI Access Token which is autoloaded if not specified.
*/
export class GradientLLM extends LLM<BaseLLMCallOptions> {
static lc_name() {
return "GradientLLM";
}
get lc_secrets(): { [key: string]: string } | undefined {
return {
gradientAccessKey: "GRADIENT_ACCESS_TOKEN",
workspaceId: "GRADIENT_WORKSPACE_ID",
};
}
modelSlug = "llama2-7b-chat";
adapterId?: string;
gradientAccessKey?: string;
workspaceId?: string;
inferenceParameters?: Record<string, unknown>;
lc_serializable = true;
// Gradient AI does not export the BaseModel type. Once it does, we can use it here.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
model: any;
constructor(fields: GradientLLMParams) {
super(fields);
this.modelSlug = fields?.modelSlug ?? this.modelSlug;
this.adapterId = fields?.adapterId;
this.gradientAccessKey =
fields?.gradientAccessKey ??
getEnvironmentVariable("GRADIENT_ACCESS_TOKEN");
this.workspaceId =
fields?.workspaceId ?? getEnvironmentVariable("GRADIENT_WORKSPACE_ID");
this.inferenceParameters = fields.inferenceParameters;
if (!this.gradientAccessKey) {
throw new Error("Missing Gradient AI Access Token");
}
if (!this.workspaceId) {
throw new Error("Missing Gradient AI Workspace ID");
}
}
_llmType() {
return "gradient_ai";
}
/**
* Calls the Gradient AI endpoint and retrieves the result.
* @param {string} prompt The input prompt.
* @returns {Promise<string>} A promise that resolves to the generated string.
*/
/** @ignore */
async _call(
prompt: string,
_options: this["ParsedCallOptions"]
): Promise<string> {
await this.setModel();
// GradientLLM does not export the CompleteResponse type. Once it does, we can use it here.
interface CompleteResponse {
finishReason: string;
generatedOutput: string;
}
const response = (await this.caller.call(async () =>
this.model.complete({
query: prompt,
...this.inferenceParameters,
})
)) as CompleteResponse;
return response.generatedOutput;
}
async setModel() {
if (this.model) return;
const gradient = new Gradient({
accessToken: this.gradientAccessKey,
workspaceId: this.workspaceId,
});
if (this.adapterId) {
this.model = await gradient.getModelAdapter({
modelAdapterId: this.adapterId,
});
} else {
this.model = await gradient.getBaseModel({
baseModelSlug: this.modelSlug,
});
}
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/aleph_alpha.ts
|
import { LLM, type BaseLLMParams } from "@langchain/core/language_models/llms";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
/**
* Interface for the input parameters specific to the Aleph Alpha LLM.
*/
export interface AlephAlphaInput extends BaseLLMParams {
model: string;
maximum_tokens: number;
minimum_tokens?: number;
echo?: boolean;
temperature?: number;
top_k?: number;
top_p?: number;
presence_penalty?: number;
frequency_penalty?: number;
sequence_penalty?: number;
sequence_penalty_min_length?: number;
repetition_penalties_include_prompt?: boolean;
repetition_penalties_include_completion?: boolean;
use_multiplicative_presence_penalty?: boolean;
use_multiplicative_frequency_penalty?: boolean;
use_multiplicative_sequence_penalty?: boolean;
penalty_bias?: string;
penalty_exceptions?: string[];
penalty_exceptions_include_stop_sequences?: boolean;
best_of?: number;
n?: number;
logit_bias?: object;
log_probs?: number;
tokens?: boolean;
raw_completion: boolean;
disable_optimizations?: boolean;
completion_bias_inclusion?: string[];
completion_bias_inclusion_first_token_only: boolean;
completion_bias_exclusion?: string[];
completion_bias_exclusion_first_token_only: boolean;
contextual_control_threshold?: number;
control_log_additive: boolean;
stop?: string[];
aleph_alpha_api_key?: string;
base_url: string;
}
/**
* Specific implementation of a Large Language Model (LLM) designed to
* interact with the Aleph Alpha API. It extends the base LLM class and
* includes a variety of parameters for customizing the behavior of the
* Aleph Alpha model.
*/
export class AlephAlpha extends LLM implements AlephAlphaInput {
lc_serializable = true;
model = "luminous-base";
maximum_tokens = 64;
minimum_tokens = 0;
echo: boolean;
temperature = 0.0;
top_k: number;
top_p = 0.0;
presence_penalty?: number;
frequency_penalty?: number;
sequence_penalty?: number;
sequence_penalty_min_length?: number;
repetition_penalties_include_prompt?: boolean;
repetition_penalties_include_completion?: boolean;
use_multiplicative_presence_penalty?: boolean;
use_multiplicative_frequency_penalty?: boolean;
use_multiplicative_sequence_penalty?: boolean;
penalty_bias?: string;
penalty_exceptions?: string[];
penalty_exceptions_include_stop_sequences?: boolean;
best_of?: number;
n?: number;
logit_bias?: object;
log_probs?: number;
tokens?: boolean;
raw_completion: boolean;
disable_optimizations?: boolean;
completion_bias_inclusion?: string[];
completion_bias_inclusion_first_token_only: boolean;
completion_bias_exclusion?: string[];
completion_bias_exclusion_first_token_only: boolean;
contextual_control_threshold?: number;
control_log_additive: boolean;
aleph_alpha_api_key? = getEnvironmentVariable("ALEPH_ALPHA_API_KEY");
stop?: string[];
base_url = "https://api.aleph-alpha.com/complete";
constructor(fields: Partial<AlephAlpha>) {
super(fields ?? {});
this.model = fields?.model ?? this.model;
this.temperature = fields?.temperature ?? this.temperature;
this.maximum_tokens = fields?.maximum_tokens ?? this.maximum_tokens;
this.minimum_tokens = fields?.minimum_tokens ?? this.minimum_tokens;
this.top_k = fields?.top_k ?? this.top_k;
this.top_p = fields?.top_p ?? this.top_p;
this.presence_penalty = fields?.presence_penalty ?? this.presence_penalty;
this.frequency_penalty =
fields?.frequency_penalty ?? this.frequency_penalty;
this.sequence_penalty = fields?.sequence_penalty ?? this.sequence_penalty;
this.sequence_penalty_min_length =
fields?.sequence_penalty_min_length ?? this.sequence_penalty_min_length;
this.repetition_penalties_include_prompt =
fields?.repetition_penalties_include_prompt ??
this.repetition_penalties_include_prompt;
this.repetition_penalties_include_completion =
fields?.repetition_penalties_include_completion ??
this.repetition_penalties_include_completion;
this.use_multiplicative_presence_penalty =
fields?.use_multiplicative_presence_penalty ??
this.use_multiplicative_presence_penalty;
this.use_multiplicative_frequency_penalty =
fields?.use_multiplicative_frequency_penalty ??
this.use_multiplicative_frequency_penalty;
this.use_multiplicative_sequence_penalty =
fields?.use_multiplicative_sequence_penalty ??
this.use_multiplicative_sequence_penalty;
this.penalty_bias = fields?.penalty_bias ?? this.penalty_bias;
this.penalty_exceptions =
fields?.penalty_exceptions ?? this.penalty_exceptions;
this.penalty_exceptions_include_stop_sequences =
fields?.penalty_exceptions_include_stop_sequences ??
this.penalty_exceptions_include_stop_sequences;
this.best_of = fields?.best_of ?? this.best_of;
this.n = fields?.n ?? this.n;
this.logit_bias = fields?.logit_bias ?? this.logit_bias;
this.log_probs = fields?.log_probs ?? this.log_probs;
this.tokens = fields?.tokens ?? this.tokens;
this.raw_completion = fields?.raw_completion ?? this.raw_completion;
this.disable_optimizations =
fields?.disable_optimizations ?? this.disable_optimizations;
this.completion_bias_inclusion =
fields?.completion_bias_inclusion ?? this.completion_bias_inclusion;
this.completion_bias_inclusion_first_token_only =
fields?.completion_bias_inclusion_first_token_only ??
this.completion_bias_inclusion_first_token_only;
this.completion_bias_exclusion =
fields?.completion_bias_exclusion ?? this.completion_bias_exclusion;
this.completion_bias_exclusion_first_token_only =
fields?.completion_bias_exclusion_first_token_only ??
this.completion_bias_exclusion_first_token_only;
this.contextual_control_threshold =
fields?.contextual_control_threshold ?? this.contextual_control_threshold;
this.control_log_additive =
fields?.control_log_additive ?? this.control_log_additive;
this.aleph_alpha_api_key =
fields?.aleph_alpha_api_key ?? this.aleph_alpha_api_key;
this.stop = fields?.stop ?? this.stop;
}
/**
* Validates the environment by ensuring the necessary Aleph Alpha API key
* is available. Throws an error if the API key is missing.
*/
validateEnvironment() {
if (!this.aleph_alpha_api_key) {
throw new Error(
"Aleph Alpha API Key is missing in environment variables."
);
}
}
/** Get the default parameters for calling Aleph Alpha API. */
get defaultParams() {
return {
model: this.model,
temperature: this.temperature,
maximum_tokens: this.maximum_tokens,
minimum_tokens: this.minimum_tokens,
top_k: this.top_k,
top_p: this.top_p,
presence_penalty: this.presence_penalty,
frequency_penalty: this.frequency_penalty,
sequence_penalty: this.sequence_penalty,
sequence_penalty_min_length: this.sequence_penalty_min_length,
repetition_penalties_include_prompt:
this.repetition_penalties_include_prompt,
repetition_penalties_include_completion:
this.repetition_penalties_include_completion,
use_multiplicative_presence_penalty:
this.use_multiplicative_presence_penalty,
use_multiplicative_frequency_penalty:
this.use_multiplicative_frequency_penalty,
use_multiplicative_sequence_penalty:
this.use_multiplicative_sequence_penalty,
penalty_bias: this.penalty_bias,
penalty_exceptions: this.penalty_exceptions,
penalty_exceptions_include_stop_sequences:
this.penalty_exceptions_include_stop_sequences,
best_of: this.best_of,
n: this.n,
logit_bias: this.logit_bias,
log_probs: this.log_probs,
tokens: this.tokens,
raw_completion: this.raw_completion,
disable_optimizations: this.disable_optimizations,
completion_bias_inclusion: this.completion_bias_inclusion,
completion_bias_inclusion_first_token_only:
this.completion_bias_inclusion_first_token_only,
completion_bias_exclusion: this.completion_bias_exclusion,
completion_bias_exclusion_first_token_only:
this.completion_bias_exclusion_first_token_only,
contextual_control_threshold: this.contextual_control_threshold,
control_log_additive: this.control_log_additive,
};
}
/** Get the identifying parameters for this LLM. */
get identifyingParams() {
return { ...this.defaultParams };
}
/** Get the type of LLM. */
_llmType(): string {
return "aleph_alpha";
}
async _call(
prompt: string,
options: this["ParsedCallOptions"]
): Promise<string> {
let stop = options?.stop;
this.validateEnvironment();
if (this.stop && stop && this.stop.length > 0 && stop.length > 0) {
throw new Error("`stop` found in both the input and default params.");
}
stop = this.stop ?? stop ?? [];
const headers = {
Authorization: `Bearer ${this.aleph_alpha_api_key}`,
"Content-Type": "application/json",
Accept: "application/json",
};
const data = { prompt, stop_sequences: stop, ...this.defaultParams };
const responseData = await this.caller.call(async () => {
const response = await fetch(this.base_url, {
method: "POST",
headers,
body: JSON.stringify(data),
signal: options.signal,
});
if (!response.ok) {
// consume the response body to release the connection
// https://undici.nodejs.org/#/?id=garbage-collection
const text = await response.text();
const error = new Error(
`Aleph Alpha call failed with status ${response.status} and body ${text}`
);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(error as any).response = response;
throw error;
}
return response.json();
});
if (
!responseData.completions ||
responseData.completions.length === 0 ||
!responseData.completions[0].completion
) {
throw new Error("No completions found in response");
}
return responseData.completions[0].completion ?? "";
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/togetherai.ts
|
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import {
LLM,
type BaseLLMCallOptions,
type BaseLLMParams,
} from "@langchain/core/language_models/llms";
import { GenerationChunk } from "@langchain/core/outputs";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { convertEventStreamToIterableReadableDataStream } from "../utils/event_source_parse.js";
interface TogetherAIInferenceResult {
object: string;
status: string;
prompt: Array<string>;
model: string;
model_owner: string;
tags: object;
num_returns: number;
args: {
model: string;
prompt: string;
temperature: number;
top_p: number;
top_k: number;
max_tokens: number;
stop: string[];
};
// eslint-disable-next-line @typescript-eslint/no-explicit-any
subjobs: Array<any>;
output: {
choices: Array<{
finish_reason: string;
index: number;
text: string;
}>;
raw_compute_time: number;
result_type: string;
};
}
/**
* Note that the modelPath is the only required parameter. For testing you
* can set this in the environment variable `LLAMA_PATH`.
*/
export interface TogetherAIInputs extends BaseLLMParams {
/**
* The API key to use for the TogetherAI API.
* @default {process.env.TOGETHER_AI_API_KEY}
*/
apiKey?: string;
/**
* The name of the model to query.
* Alias for `model`
*/
modelName?: string;
/**
* The name of the model to query.
*/
model?: string;
/**
* A decimal number that determines the degree of randomness in the response.
* A value of 1 will always yield the same output.
* A temperature less than 1 favors more correctness and is appropriate for question answering or summarization.
* A value greater than 1 introduces more randomness in the output.
* @default {0.7}
*/
temperature?: number;
/**
* Whether or not to stream tokens as they are generated.
* @default {false}
*/
streaming?: boolean;
/**
* The `topP` (nucleus) parameter is used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities.
* It specifies a probability threshold, below which all less likely tokens are filtered out.
* This technique helps to maintain diversity and generate more fluent and natural-sounding text.
* @default {0.7}
*/
topP?: number;
/**
* The `topK` parameter is used to limit the number of choices for the next predicted word or token.
* It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence.
* This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options.
* @default {50}
*/
topK?: number;
/**
* A number that controls the diversity of generated text by reducing the likelihood of repeated sequences.
* Higher values decrease repetition.
* @default {1}
*/
repetitionPenalty?: number;
/**
* An integer that specifies how many top token log probabilities are included in the response for each token generation step.
*/
logprobs?: number;
/**
* Run an LLM-based input-output safeguard model on top of any model.
*/
safetyModel?: string;
/**
* Limit the number of tokens generated.
*/
maxTokens?: number;
/**
* A list of tokens at which the generation should stop.
*/
stop?: string[];
}
export interface TogetherAICallOptions
extends BaseLLMCallOptions,
Pick<
TogetherAIInputs,
| "modelName"
| "model"
| "temperature"
| "topP"
| "topK"
| "repetitionPenalty"
| "logprobs"
| "safetyModel"
| "maxTokens"
| "stop"
> {}
export class TogetherAI extends LLM<TogetherAICallOptions> {
lc_serializable = true;
static inputs: TogetherAIInputs;
temperature = 0.7;
topP = 0.7;
topK = 50;
modelName: string;
model: string;
streaming = false;
repetitionPenalty = 1;
logprobs?: number;
maxTokens?: number;
safetyModel?: string;
stop?: string[];
private apiKey: string;
private inferenceAPIUrl = "https://api.together.xyz/inference";
static lc_name() {
return "TogetherAI";
}
constructor(inputs: TogetherAIInputs) {
super(inputs);
const apiKey =
inputs.apiKey ?? getEnvironmentVariable("TOGETHER_AI_API_KEY");
if (!apiKey) {
throw new Error("TOGETHER_AI_API_KEY not found.");
}
if (!inputs.model && !inputs.modelName) {
throw new Error("Model name is required for TogetherAI.");
}
this.apiKey = apiKey;
this.temperature = inputs?.temperature ?? this.temperature;
this.topK = inputs?.topK ?? this.topK;
this.topP = inputs?.topP ?? this.topP;
this.modelName = inputs.model ?? inputs.modelName ?? "";
this.model = this.modelName;
this.streaming = inputs.streaming ?? this.streaming;
this.repetitionPenalty = inputs.repetitionPenalty ?? this.repetitionPenalty;
this.logprobs = inputs.logprobs;
this.safetyModel = inputs.safetyModel;
this.maxTokens = inputs.maxTokens;
this.stop = inputs.stop;
}
_llmType() {
return "together_ai";
}
private constructHeaders() {
return {
accept: "application/json",
"content-type": "application/json",
Authorization: `Bearer ${this.apiKey}`,
};
}
private constructBody(prompt: string, options?: this["ParsedCallOptions"]) {
const body = {
model: options?.model ?? options?.modelName ?? this?.model,
prompt,
temperature: this?.temperature ?? options?.temperature,
top_k: this?.topK ?? options?.topK,
top_p: this?.topP ?? options?.topP,
repetition_penalty: this?.repetitionPenalty ?? options?.repetitionPenalty,
logprobs: this?.logprobs ?? options?.logprobs,
stream_tokens: this?.streaming,
safety_model: this?.safetyModel ?? options?.safetyModel,
max_tokens: this?.maxTokens ?? options?.maxTokens,
stop: this?.stop ?? options?.stop,
};
return body;
}
async completionWithRetry(
prompt: string,
options?: this["ParsedCallOptions"]
) {
return this.caller.call(async () => {
const fetchResponse = await fetch(this.inferenceAPIUrl, {
method: "POST",
headers: {
...this.constructHeaders(),
},
body: JSON.stringify(this.constructBody(prompt, options)),
});
if (fetchResponse.status === 200) {
return fetchResponse.json();
}
const errorResponse = await fetchResponse.json();
throw new Error(
`Error getting prompt completion from Together AI. ${JSON.stringify(
errorResponse,
null,
2
)}`
);
});
}
/** @ignore */
async _call(
prompt: string,
options?: this["ParsedCallOptions"]
): Promise<string> {
const response: TogetherAIInferenceResult = await this.completionWithRetry(
prompt,
options
);
const outputText = response.output.choices[0].text;
return outputText ?? "";
}
async *_streamResponseChunks(
prompt: string,
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<GenerationChunk> {
const fetchResponse = await fetch(this.inferenceAPIUrl, {
method: "POST",
headers: {
...this.constructHeaders(),
},
body: JSON.stringify(this.constructBody(prompt, options)),
});
if (fetchResponse.status !== 200 ?? !fetchResponse.body) {
const errorResponse = await fetchResponse.json();
throw new Error(
`Error getting prompt completion from Together AI. ${JSON.stringify(
errorResponse,
null,
2
)}`
);
}
const stream = convertEventStreamToIterableReadableDataStream(
fetchResponse.body
);
for await (const chunk of stream) {
if (chunk !== "[DONE]") {
const parsedChunk = JSON.parse(chunk);
const generationChunk = new GenerationChunk({
text: parsedChunk.choices[0].text ?? "",
});
yield generationChunk;
// eslint-disable-next-line no-void
void runManager?.handleLLMNewToken(generationChunk.text ?? "");
}
}
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/cloudflare_workersai.ts
|
import { LLM, type BaseLLMParams } from "@langchain/core/language_models/llms";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import { GenerationChunk } from "@langchain/core/outputs";
import { convertEventStreamToIterableReadableDataStream } from "../utils/event_source_parse.js";
/**
* @deprecated Install and import from "@langchain/cloudflare" instead.
*
* Interface for CloudflareWorkersAI input parameters.
*/
export interface CloudflareWorkersAIInput {
cloudflareAccountId?: string;
cloudflareApiToken?: string;
model?: string;
baseUrl?: string;
streaming?: boolean;
}
/**
* @deprecated Install and import from "@langchain/cloudflare" instead.
*
* Class representing the CloudflareWorkersAI language model. It extends the LLM (Large
* Language Model) class, providing a standard interface for interacting
* with the CloudflareWorkersAI language model.
*/
export class CloudflareWorkersAI
extends LLM
implements CloudflareWorkersAIInput
{
model = "@cf/meta/llama-2-7b-chat-int8";
cloudflareAccountId?: string;
cloudflareApiToken?: string;
baseUrl: string;
streaming = false;
static lc_name() {
return "CloudflareWorkersAI";
}
lc_serializable = true;
constructor(fields?: CloudflareWorkersAIInput & BaseLLMParams) {
super(fields ?? {});
this.model = fields?.model ?? this.model;
this.streaming = fields?.streaming ?? this.streaming;
this.cloudflareAccountId =
fields?.cloudflareAccountId ??
getEnvironmentVariable("CLOUDFLARE_ACCOUNT_ID");
this.cloudflareApiToken =
fields?.cloudflareApiToken ??
getEnvironmentVariable("CLOUDFLARE_API_TOKEN");
this.baseUrl =
fields?.baseUrl ??
`https://api.cloudflare.com/client/v4/accounts/${this.cloudflareAccountId}/ai/run`;
if (this.baseUrl.endsWith("/")) {
this.baseUrl = this.baseUrl.slice(0, -1);
}
}
/**
* Method to validate the environment.
*/
validateEnvironment() {
if (this.baseUrl === undefined) {
if (!this.cloudflareAccountId) {
throw new Error(
`No Cloudflare account ID found. Please provide it when instantiating the CloudflareWorkersAI class, or set it as "CLOUDFLARE_ACCOUNT_ID" in your environment variables.`
);
}
if (!this.cloudflareApiToken) {
throw new Error(
`No Cloudflare API key found. Please provide it when instantiating the CloudflareWorkersAI class, or set it as "CLOUDFLARE_API_KEY" in your environment variables.`
);
}
}
}
/** Get the identifying parameters for this LLM. */
get identifyingParams() {
return { model: this.model };
}
/**
* Get the parameters used to invoke the model
*/
invocationParams() {
return {
model: this.model,
};
}
/** Get the type of LLM. */
_llmType() {
return "cloudflare";
}
async _request(
prompt: string,
options: this["ParsedCallOptions"],
stream?: boolean
) {
this.validateEnvironment();
const url = `${this.baseUrl}/${this.model}`;
const headers = {
Authorization: `Bearer ${this.cloudflareApiToken}`,
"Content-Type": "application/json",
};
const data = { prompt, stream };
return this.caller.call(async () => {
const response = await fetch(url, {
method: "POST",
headers,
body: JSON.stringify(data),
signal: options.signal,
});
if (!response.ok) {
const error = new Error(
`Cloudflare LLM call failed with status code ${response.status}`
);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(error as any).response = response;
throw error;
}
return response;
});
}
async *_streamResponseChunks(
prompt: string,
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<GenerationChunk> {
const response = await this._request(prompt, options, true);
if (!response.body) {
throw new Error("Empty response from Cloudflare. Please try again.");
}
const stream = convertEventStreamToIterableReadableDataStream(
response.body
);
for await (const chunk of stream) {
if (chunk !== "[DONE]") {
const parsedChunk = JSON.parse(chunk);
const generationChunk = new GenerationChunk({
text: parsedChunk.response,
});
yield generationChunk;
// eslint-disable-next-line no-void
void runManager?.handleLLMNewToken(generationChunk.text ?? "");
}
}
}
/** Call out to CloudflareWorkersAI's complete endpoint.
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
let response = CloudflareWorkersAI.call("Tell me a joke.");
*/
async _call(
prompt: string,
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): Promise<string> {
if (!this.streaming) {
const response = await this._request(prompt, options);
const responseData = await response.json();
return responseData.result.response;
} else {
const stream = this._streamResponseChunks(prompt, options, runManager);
let finalResult: GenerationChunk | undefined;
for await (const chunk of stream) {
if (finalResult === undefined) {
finalResult = chunk;
} else {
finalResult = finalResult.concat(chunk);
}
}
return finalResult?.text ?? "";
}
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/deepinfra.ts
|
import { LLM, type BaseLLMParams } from "@langchain/core/language_models/llms";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
export const DEEPINFRA_API_BASE =
"https://api.deepinfra.com/v1/openai/completions";
export const DEFAULT_MODEL_NAME = "mistralai/Mixtral-8x22B-Instruct-v0.1";
export const ENV_VARIABLE = "DEEPINFRA_API_TOKEN";
export interface DeepInfraLLMParams extends BaseLLMParams {
apiKey?: string;
model?: string;
maxTokens?: number;
temperature?: number;
}
export class DeepInfraLLM extends LLM implements DeepInfraLLMParams {
static lc_name() {
return "DeepInfraLLM";
}
lc_serializable = true;
apiKey?: string;
model?: string;
maxTokens?: number;
temperature?: number;
constructor(fields: Partial<DeepInfraLLMParams> = {}) {
super(fields);
this.apiKey = fields.apiKey ?? getEnvironmentVariable(ENV_VARIABLE);
this.model = fields.model ?? DEFAULT_MODEL_NAME;
this.maxTokens = fields.maxTokens;
this.temperature = fields.temperature;
}
_llmType(): string {
return "DeepInfra";
}
async _call(
prompt: string,
options: this["ParsedCallOptions"]
): Promise<string> {
const body = {
temperature: this.temperature,
max_tokens: this.maxTokens,
...options,
prompt,
model: this.model,
};
const response = await this.caller.call(() =>
fetch(DEEPINFRA_API_BASE, {
method: "POST",
headers: {
Authorization: `Bearer ${this.apiKey}`,
"Content-Type": "application/json",
},
body: JSON.stringify(body),
}).then((res) => res.json())
);
return response as string;
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/sagemaker_endpoint.ts
|
import {
InvokeEndpointCommand,
InvokeEndpointWithResponseStreamCommand,
SageMakerRuntimeClient,
SageMakerRuntimeClientConfig,
} from "@aws-sdk/client-sagemaker-runtime";
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import { GenerationChunk } from "@langchain/core/outputs";
import {
type BaseLLMCallOptions,
type BaseLLMParams,
LLM,
} from "@langchain/core/language_models/llms";
/**
* A handler class to transform input from LLM to a format that SageMaker
* endpoint expects. Similarily, the class also handles transforming output from
* the SageMaker endpoint to a format that LLM class expects.
*
* Example:
* ```
* class ContentHandler implements ContentHandlerBase<string, string> {
* contentType = "application/json"
* accepts = "application/json"
*
* transformInput(prompt: string, modelKwargs: Record<string, unknown>) {
* const inputString = JSON.stringify({
* prompt,
* ...modelKwargs
* })
* return Buffer.from(inputString)
* }
*
* transformOutput(output: Uint8Array) {
* const responseJson = JSON.parse(Buffer.from(output).toString("utf-8"))
* return responseJson[0].generated_text
* }
*
* }
* ```
*/
export abstract class BaseSageMakerContentHandler<InputType, OutputType> {
contentType = "text/plain";
accepts = "text/plain";
/**
* Transforms the prompt and model arguments into a specific format for sending to SageMaker.
* @param {InputType} prompt The prompt to be transformed.
* @param {Record<string, unknown>} modelKwargs Additional arguments.
* @returns {Promise<Uint8Array>} A promise that resolves to the formatted data for sending.
*/
abstract transformInput(
prompt: InputType,
modelKwargs: Record<string, unknown>
): Promise<Uint8Array>;
/**
* Transforms SageMaker output into a desired format.
* @param {Uint8Array} output The raw output from SageMaker.
* @returns {Promise<OutputType>} A promise that resolves to the transformed data.
*/
abstract transformOutput(output: Uint8Array): Promise<OutputType>;
}
export type SageMakerLLMContentHandler = BaseSageMakerContentHandler<
string,
string
>;
/**
* The SageMakerEndpointInput interface defines the input parameters for
* the SageMakerEndpoint class, which includes the endpoint name, client
* options for the SageMaker client, the content handler, and optional
* keyword arguments for the model and the endpoint.
*/
export interface SageMakerEndpointInput extends BaseLLMParams {
/**
* The name of the endpoint from the deployed SageMaker model. Must be unique
* within an AWS Region.
*/
endpointName: string;
/**
* Options passed to the SageMaker client.
*/
clientOptions: SageMakerRuntimeClientConfig;
/**
* Key word arguments to pass to the model.
*/
modelKwargs?: Record<string, unknown>;
/**
* Optional attributes passed to the InvokeEndpointCommand
*/
endpointKwargs?: Record<string, unknown>;
/**
* The content handler class that provides an input and output transform
* functions to handle formats between LLM and the endpoint.
*/
contentHandler: SageMakerLLMContentHandler;
streaming?: boolean;
}
/**
* The SageMakerEndpoint class is used to interact with SageMaker
* Inference Endpoint models. It uses the AWS client for authentication,
* which automatically loads credentials.
* If a specific credential profile is to be used, the name of the profile
* from the ~/.aws/credentials file must be passed. The credentials or
* roles used should have the required policies to access the SageMaker
* endpoint.
*/
export class SageMakerEndpoint extends LLM<BaseLLMCallOptions> {
lc_serializable = true;
static lc_name() {
return "SageMakerEndpoint";
}
get lc_secrets(): { [key: string]: string } | undefined {
return {
"clientOptions.credentials.accessKeyId": "AWS_ACCESS_KEY_ID",
"clientOptions.credentials.secretAccessKey": "AWS_SECRET_ACCESS_KEY",
"clientOptions.credentials.sessionToken": "AWS_SESSION_TOKEN",
};
}
endpointName: string;
modelKwargs?: Record<string, unknown>;
endpointKwargs?: Record<string, unknown>;
client: SageMakerRuntimeClient;
contentHandler: SageMakerLLMContentHandler;
streaming: boolean;
constructor(fields: SageMakerEndpointInput) {
super(fields);
if (!fields.clientOptions.region) {
throw new Error(
`Please pass a "clientOptions" object with a "region" field to the constructor`
);
}
const endpointName = fields?.endpointName;
if (!endpointName) {
throw new Error(`Please pass an "endpointName" field to the constructor`);
}
const contentHandler = fields?.contentHandler;
if (!contentHandler) {
throw new Error(
`Please pass a "contentHandler" field to the constructor`
);
}
this.endpointName = fields.endpointName;
this.contentHandler = fields.contentHandler;
this.endpointKwargs = fields.endpointKwargs;
this.modelKwargs = fields.modelKwargs;
this.streaming = fields.streaming ?? false;
this.client = new SageMakerRuntimeClient(fields.clientOptions);
}
_llmType() {
return "sagemaker_endpoint";
}
/**
* Calls the SageMaker endpoint and retrieves the result.
* @param {string} prompt The input prompt.
* @param {this["ParsedCallOptions"]} options Parsed call options.
* @param {CallbackManagerForLLMRun} runManager Optional run manager.
* @returns {Promise<string>} A promise that resolves to the generated string.
*/
/** @ignore */
async _call(
prompt: string,
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): Promise<string> {
return this.streaming
? await this.streamingCall(prompt, options, runManager)
: await this.noStreamingCall(prompt, options);
}
private async streamingCall(
prompt: string,
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): Promise<string> {
const chunks = [];
for await (const chunk of this._streamResponseChunks(
prompt,
options,
runManager
)) {
chunks.push(chunk.text);
}
return chunks.join("");
}
private async noStreamingCall(
prompt: string,
options: this["ParsedCallOptions"]
): Promise<string> {
const body = await this.contentHandler.transformInput(
prompt,
this.modelKwargs ?? {}
);
const { contentType, accepts } = this.contentHandler;
const response = await this.caller.call(() =>
this.client.send(
new InvokeEndpointCommand({
EndpointName: this.endpointName,
Body: body,
ContentType: contentType,
Accept: accepts,
...this.endpointKwargs,
}),
{ abortSignal: options.signal }
)
);
if (response.Body === undefined) {
throw new Error("Inference result missing Body");
}
return this.contentHandler.transformOutput(response.Body);
}
/**
* Streams response chunks from the SageMaker endpoint.
* @param {string} prompt The input prompt.
* @param {this["ParsedCallOptions"]} options Parsed call options.
* @returns {AsyncGenerator<GenerationChunk>} An asynchronous generator yielding generation chunks.
*/
async *_streamResponseChunks(
prompt: string,
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<GenerationChunk> {
const body = await this.contentHandler.transformInput(
prompt,
this.modelKwargs ?? {}
);
const { contentType, accepts } = this.contentHandler;
const stream = await this.caller.call(() =>
this.client.send(
new InvokeEndpointWithResponseStreamCommand({
EndpointName: this.endpointName,
Body: body,
ContentType: contentType,
Accept: accepts,
...this.endpointKwargs,
}),
{ abortSignal: options.signal }
)
);
if (!stream.Body) {
throw new Error("Inference result missing Body");
}
for await (const chunk of stream.Body) {
if (chunk.PayloadPart && chunk.PayloadPart.Bytes) {
const text = await this.contentHandler.transformOutput(
chunk.PayloadPart.Bytes
);
yield new GenerationChunk({
text,
generationInfo: {
...chunk,
response: undefined,
},
});
await runManager?.handleLLMNewToken(text);
} else if (chunk.InternalStreamFailure) {
throw new Error(chunk.InternalStreamFailure.message);
} else if (chunk.ModelStreamError) {
throw new Error(chunk.ModelStreamError.message);
}
}
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/layerup_security.ts
|
import {
LLM,
BaseLLM,
type BaseLLMCallOptions,
} from "@langchain/core/language_models/llms";
import {
GuardrailResponse,
LayerupSecurity as LayerupSecuritySDK,
LLMMessage,
} from "@layerup/layerup-security";
export interface LayerupSecurityOptions extends BaseLLMCallOptions {
llm: BaseLLM;
layerupApiKey?: string;
layerupApiBaseUrl?: string;
promptGuardrails?: string[];
responseGuardrails?: string[];
mask?: boolean;
metadata?: Record<string, unknown>;
handlePromptGuardrailViolation?: (violation: GuardrailResponse) => LLMMessage;
handleResponseGuardrailViolation?: (
violation: GuardrailResponse
) => LLMMessage;
}
function defaultGuardrailViolationHandler(
violation: GuardrailResponse
): LLMMessage {
if (violation.canned_response) return violation.canned_response;
const guardrailName = violation.offending_guardrail
? `Guardrail ${violation.offending_guardrail}`
: "A guardrail";
throw new Error(
`${guardrailName} was violated without a proper guardrail violation handler.`
);
}
export class LayerupSecurity extends LLM {
static lc_name() {
return "LayerupSecurity";
}
lc_serializable = true;
llm: BaseLLM;
layerupApiKey: string;
layerupApiBaseUrl = "https://api.uselayerup.com/v1";
promptGuardrails: string[] = [];
responseGuardrails: string[] = [];
mask = false;
metadata: Record<string, unknown> = {};
handlePromptGuardrailViolation: (violation: GuardrailResponse) => LLMMessage =
defaultGuardrailViolationHandler;
handleResponseGuardrailViolation: (
violation: GuardrailResponse
) => LLMMessage = defaultGuardrailViolationHandler;
private layerup: LayerupSecuritySDK;
constructor(options: LayerupSecurityOptions) {
super(options);
if (!options.llm) {
throw new Error("Layerup Security requires an LLM to be provided.");
} else if (!options.layerupApiKey) {
throw new Error("Layerup Security requires an API key to be provided.");
}
this.llm = options.llm;
this.layerupApiKey = options.layerupApiKey;
this.layerupApiBaseUrl =
options.layerupApiBaseUrl || this.layerupApiBaseUrl;
this.promptGuardrails = options.promptGuardrails || this.promptGuardrails;
this.responseGuardrails =
options.responseGuardrails || this.responseGuardrails;
this.mask = options.mask || this.mask;
this.metadata = options.metadata || this.metadata;
this.handlePromptGuardrailViolation =
options.handlePromptGuardrailViolation ||
this.handlePromptGuardrailViolation;
this.handleResponseGuardrailViolation =
options.handleResponseGuardrailViolation ||
this.handleResponseGuardrailViolation;
this.layerup = new LayerupSecuritySDK({
apiKey: this.layerupApiKey,
baseURL: this.layerupApiBaseUrl,
});
}
_llmType() {
return "layerup_security";
}
async _call(input: string, options?: BaseLLMCallOptions): Promise<string> {
// Since LangChain LLMs only support string inputs, we will wrap each call to Layerup in a single-message
// array of messages, then extract the string element when we need to access it.
let messages: LLMMessage[] = [
{
role: "user",
content: input,
},
];
let unmaskResponse;
if (this.mask) {
[messages, unmaskResponse] = await this.layerup.maskPrompt(
messages,
this.metadata
);
}
if (this.promptGuardrails.length > 0) {
const securityResponse = await this.layerup.executeGuardrails(
this.promptGuardrails,
messages,
input,
this.metadata
);
// If there is a guardrail violation, extract the canned response and reply with that instead
if (!securityResponse.all_safe) {
const replacedResponse: LLMMessage =
this.handlePromptGuardrailViolation(securityResponse);
return replacedResponse.content as string;
}
}
// Invoke the underlying LLM with the prompt and options
let result = await this.llm.invoke(messages[0].content as string, options);
if (this.mask && unmaskResponse) {
result = unmaskResponse(result);
}
// Add to messages array for response guardrail handler
messages.push({
role: "assistant",
content: result,
});
if (this.responseGuardrails.length > 0) {
const securityResponse = await this.layerup.executeGuardrails(
this.responseGuardrails,
messages,
result,
this.metadata
);
// If there is a guardrail violation, extract the canned response and reply with that instead
if (!securityResponse.all_safe) {
const replacedResponse: LLMMessage =
this.handleResponseGuardrailViolation(securityResponse);
return replacedResponse.content as string;
}
}
return result;
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/writer.ts
|
import { Writer as WriterClient } from "@writerai/writer-sdk";
import { type BaseLLMParams, LLM } from "@langchain/core/language_models/llms";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
/**
* Interface for the input parameters specific to the Writer model.
*/
export interface WriterInput extends BaseLLMParams {
/** Writer API key */
apiKey?: string;
/** Writer organization ID */
orgId?: string | number;
/** Model to use */
model?: string;
/** Sampling temperature to use */
temperature?: number;
/** Minimum number of tokens to generate. */
minTokens?: number;
/** Maximum number of tokens to generate in the completion. */
maxTokens?: number;
/** Generates this many completions server-side and returns the "best"." */
bestOf?: number;
/** Penalizes repeated tokens according to frequency. */
frequencyPenalty?: number;
/** Whether to return log probabilities. */
logprobs?: number;
/** Number of completions to generate. */
n?: number;
/** Penalizes repeated tokens regardless of frequency. */
presencePenalty?: number;
/** Total probability mass of tokens to consider at each step. */
topP?: number;
}
/**
* Class representing a Writer Large Language Model (LLM). It interacts
* with the Writer API to generate text completions.
*/
export class Writer extends LLM implements WriterInput {
static lc_name() {
return "Writer";
}
get lc_secrets(): { [key: string]: string } | undefined {
return {
apiKey: "WRITER_API_KEY",
orgId: "WRITER_ORG_ID",
};
}
get lc_aliases(): { [key: string]: string } | undefined {
return {
apiKey: "writer_api_key",
orgId: "writer_org_id",
};
}
lc_serializable = true;
apiKey: string;
orgId: number;
model = "palmyra-instruct";
temperature?: number;
minTokens?: number;
maxTokens?: number;
bestOf?: number;
frequencyPenalty?: number;
logprobs?: number;
n?: number;
presencePenalty?: number;
topP?: number;
constructor(fields?: WriterInput) {
super(fields ?? {});
const apiKey = fields?.apiKey ?? getEnvironmentVariable("WRITER_API_KEY");
const orgId = fields?.orgId ?? getEnvironmentVariable("WRITER_ORG_ID");
if (!apiKey) {
throw new Error(
"Please set the WRITER_API_KEY environment variable or pass it to the constructor as the apiKey field."
);
}
if (!orgId) {
throw new Error(
"Please set the WRITER_ORG_ID environment variable or pass it to the constructor as the orgId field."
);
}
this.apiKey = apiKey;
this.orgId = typeof orgId === "string" ? parseInt(orgId, 10) : orgId;
this.model = fields?.model ?? this.model;
this.temperature = fields?.temperature ?? this.temperature;
this.minTokens = fields?.minTokens ?? this.minTokens;
this.maxTokens = fields?.maxTokens ?? this.maxTokens;
this.bestOf = fields?.bestOf ?? this.bestOf;
this.frequencyPenalty = fields?.frequencyPenalty ?? this.frequencyPenalty;
this.logprobs = fields?.logprobs ?? this.logprobs;
this.n = fields?.n ?? this.n;
this.presencePenalty = fields?.presencePenalty ?? this.presencePenalty;
this.topP = fields?.topP ?? this.topP;
}
_llmType() {
return "writer";
}
/** @ignore */
async _call(
prompt: string,
options: this["ParsedCallOptions"]
): Promise<string> {
const sdk = new WriterClient({
security: {
apiKey: this.apiKey,
},
organizationId: this.orgId,
});
return this.caller.callWithOptions({ signal: options.signal }, async () => {
try {
const res = await sdk.completions.create({
completionRequest: {
prompt,
stop: options.stop,
temperature: this.temperature,
minTokens: this.minTokens,
maxTokens: this.maxTokens,
bestOf: this.bestOf,
n: this.n,
frequencyPenalty: this.frequencyPenalty,
logprobs: this.logprobs,
presencePenalty: this.presencePenalty,
topP: this.topP,
},
modelId: this.model,
});
return (
res.completionResponse?.choices?.[0].text ?? "No completion found."
);
} catch (e) {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(e as any).response = (e as any).rawResponse;
throw e;
}
});
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/ollama.ts
|
import type { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base";
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import { GenerationChunk } from "@langchain/core/outputs";
import type { StringWithAutocomplete } from "@langchain/core/utils/types";
import { LLM, type BaseLLMParams } from "@langchain/core/language_models/llms";
import { createOllamaGenerateStream, OllamaInput } from "../utils/ollama.js";
export { type OllamaInput };
export interface OllamaCallOptions extends BaseLanguageModelCallOptions {
images?: string[];
}
/**
* @deprecated Ollama LLM has moved to the `@langchain/ollama` package. Please install it using `npm install @langchain/ollama` and import it from there.
*
* Class that represents the Ollama language model. It extends the base
* LLM class and implements the OllamaInput interface.
* @example
* ```typescript
* const ollama = new Ollama({
* baseUrl: "http://api.example.com",
* model: "llama2",
* });
*
* // Streaming translation from English to German
* const stream = await ollama.stream(
* `Translate "I love programming" into German.`
* );
*
* const chunks = [];
* for await (const chunk of stream) {
* chunks.push(chunk);
* }
*
* console.log(chunks.join(""));
* ```
*/
export class Ollama extends LLM<OllamaCallOptions> implements OllamaInput {
static lc_name() {
return "Ollama";
}
lc_serializable = true;
model = "llama2";
baseUrl = "http://localhost:11434";
keepAlive = "5m";
embeddingOnly?: boolean;
f16KV?: boolean;
frequencyPenalty?: number;
headers?: Record<string, string>;
logitsAll?: boolean;
lowVram?: boolean;
mainGpu?: number;
mirostat?: number;
mirostatEta?: number;
mirostatTau?: number;
numBatch?: number;
numCtx?: number;
numGpu?: number;
numGqa?: number;
numKeep?: number;
numPredict?: number;
numThread?: number;
penalizeNewline?: boolean;
presencePenalty?: number;
repeatLastN?: number;
repeatPenalty?: number;
ropeFrequencyBase?: number;
ropeFrequencyScale?: number;
temperature?: number;
stop?: string[];
tfsZ?: number;
topK?: number;
topP?: number;
typicalP?: number;
useMLock?: boolean;
useMMap?: boolean;
vocabOnly?: boolean;
format?: StringWithAutocomplete<"json">;
constructor(fields: OllamaInput & BaseLLMParams) {
super(fields);
this.model = fields.model ?? this.model;
this.baseUrl = fields.baseUrl?.endsWith("/")
? fields.baseUrl.slice(0, -1)
: fields.baseUrl ?? this.baseUrl;
this.keepAlive = fields.keepAlive ?? this.keepAlive;
this.headers = fields.headers ?? this.headers;
this.embeddingOnly = fields.embeddingOnly;
this.f16KV = fields.f16KV;
this.frequencyPenalty = fields.frequencyPenalty;
this.logitsAll = fields.logitsAll;
this.lowVram = fields.lowVram;
this.mainGpu = fields.mainGpu;
this.mirostat = fields.mirostat;
this.mirostatEta = fields.mirostatEta;
this.mirostatTau = fields.mirostatTau;
this.numBatch = fields.numBatch;
this.numCtx = fields.numCtx;
this.numGpu = fields.numGpu;
this.numGqa = fields.numGqa;
this.numKeep = fields.numKeep;
this.numPredict = fields.numPredict;
this.numThread = fields.numThread;
this.penalizeNewline = fields.penalizeNewline;
this.presencePenalty = fields.presencePenalty;
this.repeatLastN = fields.repeatLastN;
this.repeatPenalty = fields.repeatPenalty;
this.ropeFrequencyBase = fields.ropeFrequencyBase;
this.ropeFrequencyScale = fields.ropeFrequencyScale;
this.temperature = fields.temperature;
this.stop = fields.stop;
this.tfsZ = fields.tfsZ;
this.topK = fields.topK;
this.topP = fields.topP;
this.typicalP = fields.typicalP;
this.useMLock = fields.useMLock;
this.useMMap = fields.useMMap;
this.vocabOnly = fields.vocabOnly;
this.format = fields.format;
}
_llmType() {
return "ollama";
}
invocationParams(options?: this["ParsedCallOptions"]) {
return {
model: this.model,
format: this.format,
keep_alive: this.keepAlive,
images: options?.images,
options: {
embedding_only: this.embeddingOnly,
f16_kv: this.f16KV,
frequency_penalty: this.frequencyPenalty,
logits_all: this.logitsAll,
low_vram: this.lowVram,
main_gpu: this.mainGpu,
mirostat: this.mirostat,
mirostat_eta: this.mirostatEta,
mirostat_tau: this.mirostatTau,
num_batch: this.numBatch,
num_ctx: this.numCtx,
num_gpu: this.numGpu,
num_gqa: this.numGqa,
num_keep: this.numKeep,
num_predict: this.numPredict,
num_thread: this.numThread,
penalize_newline: this.penalizeNewline,
presence_penalty: this.presencePenalty,
repeat_last_n: this.repeatLastN,
repeat_penalty: this.repeatPenalty,
rope_frequency_base: this.ropeFrequencyBase,
rope_frequency_scale: this.ropeFrequencyScale,
temperature: this.temperature,
stop: options?.stop ?? this.stop,
tfs_z: this.tfsZ,
top_k: this.topK,
top_p: this.topP,
typical_p: this.typicalP,
use_mlock: this.useMLock,
use_mmap: this.useMMap,
vocab_only: this.vocabOnly,
},
};
}
async *_streamResponseChunks(
prompt: string,
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<GenerationChunk> {
const stream = await this.caller.call(async () =>
createOllamaGenerateStream(
this.baseUrl,
{ ...this.invocationParams(options), prompt },
{
...options,
headers: this.headers,
}
)
);
for await (const chunk of stream) {
if (!chunk.done) {
yield new GenerationChunk({
text: chunk.response,
generationInfo: {
...chunk,
response: undefined,
},
});
await runManager?.handleLLMNewToken(chunk.response ?? "");
} else {
yield new GenerationChunk({
text: "",
generationInfo: {
model: chunk.model,
total_duration: chunk.total_duration,
load_duration: chunk.load_duration,
prompt_eval_count: chunk.prompt_eval_count,
prompt_eval_duration: chunk.prompt_eval_duration,
eval_count: chunk.eval_count,
eval_duration: chunk.eval_duration,
},
});
}
}
}
/** @ignore */
async _call(
prompt: string,
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): Promise<string> {
const chunks = [];
for await (const chunk of this._streamResponseChunks(
prompt,
options,
runManager
)) {
chunks.push(chunk.text);
}
return chunks.join("");
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/fireworks.ts
|
import {
type OpenAIClient,
type OpenAICallOptions,
type OpenAIInput,
type OpenAICoreRequestOptions,
OpenAI,
} from "@langchain/openai";
import type { BaseLLMParams } from "@langchain/core/language_models/llms";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
type FireworksUnsupportedArgs =
| "frequencyPenalty"
| "presencePenalty"
| "bestOf"
| "logitBias";
type FireworksUnsupportedCallOptions = "functions" | "function_call" | "tools";
export type FireworksCallOptions = Partial<
Omit<OpenAICallOptions, FireworksUnsupportedCallOptions>
>;
/**
* Wrapper around Fireworks API for large language models
*
* Fireworks API is compatible to the OpenAI API with some limitations described in
* https://readme.fireworks.ai/docs/openai-compatibility.
*
* To use, you should have the `openai` package installed and
* the `FIREWORKS_API_KEY` environment variable set.
*/
export class Fireworks extends OpenAI<FireworksCallOptions> {
static lc_name() {
return "Fireworks";
}
_llmType() {
return "fireworks";
}
get lc_secrets(): { [key: string]: string } | undefined {
return {
fireworksApiKey: "FIREWORKS_API_KEY",
};
}
lc_serializable = true;
fireworksApiKey?: string;
constructor(
fields?: Partial<
Omit<OpenAIInput, "openAIApiKey" | FireworksUnsupportedArgs>
> &
BaseLLMParams & { fireworksApiKey?: string }
) {
const fireworksApiKey =
fields?.fireworksApiKey || getEnvironmentVariable("FIREWORKS_API_KEY");
if (!fireworksApiKey) {
throw new Error(
`Fireworks API key not found. Please set the FIREWORKS_API_KEY environment variable or provide the key into "fireworksApiKey"`
);
}
super({
...fields,
openAIApiKey: fireworksApiKey,
modelName: fields?.modelName || "accounts/fireworks/models/llama-v2-13b",
configuration: {
baseURL: "https://api.fireworks.ai/inference/v1",
},
});
this.fireworksApiKey = fireworksApiKey;
}
toJSON() {
const result = super.toJSON();
if (
"kwargs" in result &&
typeof result.kwargs === "object" &&
result.kwargs != null
) {
delete result.kwargs.openai_api_key;
delete result.kwargs.configuration;
}
return result;
}
async completionWithRetry(
request: OpenAIClient.CompletionCreateParamsStreaming,
options?: OpenAICoreRequestOptions
): Promise<AsyncIterable<OpenAIClient.Completion>>;
async completionWithRetry(
request: OpenAIClient.CompletionCreateParamsNonStreaming,
options?: OpenAICoreRequestOptions
): Promise<OpenAIClient.Completions.Completion>;
/**
* Calls the Fireworks API with retry logic in case of failures.
* @param request The request to send to the Fireworks API.
* @param options Optional configuration for the API call.
* @returns The response from the Fireworks API.
*/
async completionWithRetry(
request:
| OpenAIClient.CompletionCreateParamsStreaming
| OpenAIClient.CompletionCreateParamsNonStreaming,
options?: OpenAICoreRequestOptions
): Promise<
AsyncIterable<OpenAIClient.Completion> | OpenAIClient.Completions.Completion
> {
// https://readme.fireworks.ai/docs/openai-compatibility#api-compatibility
if (Array.isArray(request.prompt)) {
if (request.prompt.length > 1) {
throw new Error("Multiple prompts are not supported by Fireworks");
}
const prompt = request.prompt[0];
if (typeof prompt !== "string") {
throw new Error("Only string prompts are supported by Fireworks");
}
request.prompt = prompt;
}
delete request.frequency_penalty;
delete request.presence_penalty;
delete request.best_of;
delete request.logit_bias;
if (request.stream === true) {
return super.completionWithRetry(request, options);
}
return super.completionWithRetry(request, options);
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/watsonx_ai.ts
|
import {
type BaseLLMCallOptions,
type BaseLLMParams,
LLM,
} from "@langchain/core/language_models/llms";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
/**
* The WatsonxAIParams interface defines the input parameters for
* the WatsonxAI class.
*/
/** @deprecated Please use newer implementation @langchain/community/llms/ibm instead */
export interface WatsonxAIParams extends BaseLLMParams {
/**
* WatsonX AI Complete Endpoint.
* Can be used if you want a fully custom endpoint.
*/
endpoint?: string;
/**
* IBM Cloud Compute Region.
* eg. us-south, us-east, etc.
*/
region?: string;
/**
* WatsonX AI Version.
* Date representing the WatsonX AI Version.
* eg. 2023-05-29
*/
version?: string;
/**
* WatsonX AI Key.
* Provide API Key if you do not wish to automatically pull from env.
*/
ibmCloudApiKey?: string;
/**
* WatsonX AI Key.
* Provide API Key if you do not wish to automatically pull from env.
*/
projectId?: string;
/**
* Parameters accepted by the WatsonX AI Endpoint.
*/
modelParameters?: Record<string, unknown>;
/**
* WatsonX AI Model ID.
*/
modelId?: string;
}
const endpointConstructor = (region: string, version: string) =>
`https://${region}.ml.cloud.ibm.com/ml/v1-beta/generation/text?version=${version}`;
/**
* The WatsonxAI class is used to interact with Watsonx AI
* Inference Endpoint models. It uses IBM Cloud for authentication.
* This requires your IBM Cloud API Key which is autoloaded if not specified.
*/
export class WatsonxAI extends LLM<BaseLLMCallOptions> {
lc_serializable = true;
static lc_name() {
return "WatsonxAI";
}
get lc_secrets(): { [key: string]: string } | undefined {
return {
ibmCloudApiKey: "IBM_CLOUD_API_KEY",
projectId: "WATSONX_PROJECT_ID",
};
}
endpoint: string;
region = "us-south";
version = "2023-05-29";
modelId = "meta-llama/llama-2-70b-chat";
modelKwargs?: Record<string, unknown>;
ibmCloudApiKey?: string;
ibmCloudToken?: string;
ibmCloudTokenExpiresAt?: number;
projectId?: string;
modelParameters?: Record<string, unknown>;
constructor(fields: WatsonxAIParams) {
super(fields);
this.region = fields?.region ?? this.region;
this.version = fields?.version ?? this.version;
this.modelId = fields?.modelId ?? this.modelId;
this.ibmCloudApiKey =
fields?.ibmCloudApiKey ?? getEnvironmentVariable("IBM_CLOUD_API_KEY");
this.projectId =
fields?.projectId ?? getEnvironmentVariable("WATSONX_PROJECT_ID");
this.endpoint =
fields?.endpoint ?? endpointConstructor(this.region, this.version);
this.modelParameters = fields.modelParameters;
if (!this.ibmCloudApiKey) {
throw new Error("Missing IBM Cloud API Key");
}
if (!this.projectId) {
throw new Error("Missing WatsonX AI Project ID");
}
}
_llmType() {
return "watsonx_ai";
}
/**
* Calls the WatsonX AI endpoint and retrieves the result.
* @param {string} prompt The input prompt.
* @returns {Promise<string>} A promise that resolves to the generated string.
*/
/** @ignore */
async _call(
prompt: string,
_options: this["ParsedCallOptions"]
): Promise<string> {
interface WatsonxAIResponse {
results: {
generated_text: string;
generated_token_count: number;
input_token_count: number;
}[];
errors: {
code: string;
message: string;
}[];
}
const response = (await this.caller.call(async () =>
fetch(this.endpoint, {
method: "POST",
headers: {
"Content-Type": "application/json",
Accept: "application/json",
Authorization: `Bearer ${await this.generateToken()}`,
},
body: JSON.stringify({
project_id: this.projectId,
model_id: this.modelId,
input: prompt,
parameters: this.modelParameters,
}),
}).then((res) => res.json())
)) as WatsonxAIResponse;
/**
* Handle Errors for invalid requests.
*/
if (response.errors) {
throw new Error(response.errors[0].message);
}
return response.results[0].generated_text;
}
async generateToken(): Promise<string> {
if (this.ibmCloudToken && this.ibmCloudTokenExpiresAt) {
if (this.ibmCloudTokenExpiresAt > Date.now()) {
return this.ibmCloudToken;
}
}
interface TokenResponse {
access_token: string;
expiration: number;
}
const urlTokenParams = new URLSearchParams();
urlTokenParams.append(
"grant_type",
"urn:ibm:params:oauth:grant-type:apikey"
);
urlTokenParams.append("apikey", this.ibmCloudApiKey as string);
const data = (await fetch("https://iam.cloud.ibm.com/identity/token", {
method: "POST",
headers: {
"Content-Type": "application/x-www-form-urlencoded",
},
body: urlTokenParams,
}).then((res) => res.json())) as TokenResponse;
this.ibmCloudTokenExpiresAt = data.expiration * 1000;
this.ibmCloudToken = data.access_token;
return this.ibmCloudToken;
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/friendli.ts
|
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import {
type BaseLLMCallOptions,
type BaseLLMParams,
LLM,
} from "@langchain/core/language_models/llms";
import { GenerationChunk } from "@langchain/core/outputs";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { convertEventStreamToIterableReadableDataStream } from "../utils/event_source_parse.js";
/**
* The FriendliParams interface defines the input parameters for
* the Friendli class.
*/
export interface FriendliParams extends BaseLLMParams {
/**
* Model name to use.
*/
model?: string;
/**
* Base endpoint url.
*/
baseUrl?: string;
/**
* Friendli personal access token to run as.
*/
friendliToken?: string;
/**
* Friendli team ID to run as.
*/
friendliTeam?: string;
/**
* Number between -2.0 and 2.0. Positive values penalizes tokens that have been
* sampled, taking into account their frequency in the preceding text. This
* penalization diminishes the model's tendency to reproduce identical lines
* verbatim.
*/
frequencyPenalty?: number;
/**
* Number between -2.0 and 2.0. Positive values penalizes tokens that have been
* sampled at least once in the existing text.
* presence_penalty: Optional[float] = None
* The maximum number of tokens to generate. The length of your input tokens plus
* `max_tokens` should not exceed the model's maximum length (e.g., 2048 for OpenAI
* GPT-3)
*/
maxTokens?: number;
/**
* When one of the stop phrases appears in the generation result, the API will stop
* generation. The phrase is included in the generated result. If you are using
* beam search, all of the active beams should contain the stop phrase to terminate
* generation. Before checking whether a stop phrase is included in the result, the
* phrase is converted into tokens.
*/
stop?: string[];
/**
* Sampling temperature. Smaller temperature makes the generation result closer to
* greedy, argmax (i.e., `top_k = 1`) sampling. If it is `None`, then 1.0 is used.
*/
temperature?: number;
/**
* Tokens comprising the top `top_p` probability mass are kept for sampling. Numbers
* between 0.0 (exclusive) and 1.0 (inclusive) are allowed. If it is `None`, then 1.0
* is used by default.
*/
topP?: number;
/**
* Additional kwargs to pass to the model.
*/
modelKwargs?: Record<string, unknown>;
}
/**
* The Friendli class is used to interact with Friendli inference Endpoint models.
* This requires your Friendli Token and Friendli Team which is autoloaded if not specified.
*/
export class Friendli extends LLM<BaseLLMCallOptions> {
lc_serializable = true;
static lc_name() {
return "Friendli";
}
get lc_secrets(): { [key: string]: string } | undefined {
return {
friendliToken: "FRIENDLI_TOKEN",
friendliTeam: "FRIENDLI_TEAM",
};
}
model = "mixtral-8x7b-instruct-v0-1";
baseUrl = "https://inference.friendli.ai";
friendliToken?: string;
friendliTeam?: string;
frequencyPenalty?: number;
maxTokens?: number;
stop?: string[];
temperature?: number;
topP?: number;
modelKwargs?: Record<string, unknown>;
constructor(fields: FriendliParams) {
super(fields);
this.model = fields?.model ?? this.model;
this.baseUrl = fields?.baseUrl ?? this.baseUrl;
this.friendliToken =
fields?.friendliToken ?? getEnvironmentVariable("FRIENDLI_TOKEN");
this.friendliTeam =
fields?.friendliTeam ?? getEnvironmentVariable("FRIENDLI_TEAM");
this.frequencyPenalty = fields?.frequencyPenalty ?? this.frequencyPenalty;
this.maxTokens = fields?.maxTokens ?? this.maxTokens;
this.stop = fields?.stop ?? this.stop;
this.temperature = fields?.temperature ?? this.temperature;
this.topP = fields?.topP ?? this.topP;
this.modelKwargs = fields?.modelKwargs ?? {};
if (!this.friendliToken) {
throw new Error("Missing Friendli Token");
}
}
_llmType() {
return "friendli";
}
private constructHeaders(stream: boolean) {
return {
"Content-Type": "application/json",
Accept: stream ? "text/event-stream" : "application/json",
Authorization: `Bearer ${this.friendliToken}`,
"X-Friendli-Team": this.friendliTeam ?? "",
};
}
private constructBody(
prompt: string,
stream: boolean,
_options?: this["ParsedCallOptions"]
) {
const body = JSON.stringify({
prompt,
stream,
model: this.model,
max_tokens: this.maxTokens,
frequency_penalty: this.frequencyPenalty,
stop: this.stop,
temperature: this.temperature,
top_p: this.topP,
...this.modelKwargs,
});
return body;
}
/**
* Calls the Friendli endpoint and retrieves the result.
* @param {string} prompt The input prompt.
* @returns {Promise<string>} A promise that resolves to the generated string.
*/
/** @ignore */
async _call(
prompt: string,
_options: this["ParsedCallOptions"]
): Promise<string> {
interface FriendliResponse {
choices: {
index: number;
seed: number;
text: string;
tokens: number[];
}[];
usage: {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
};
}
const response = (await this.caller.call(async () =>
fetch(`${this.baseUrl}/v1/completions`, {
method: "POST",
headers: this.constructHeaders(false),
body: this.constructBody(prompt, false, _options),
}).then((res) => res.json())
)) as FriendliResponse;
return response.choices[0].text;
}
async *_streamResponseChunks(
prompt: string,
_options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<GenerationChunk> {
interface FriendliResponse {
event: string;
index: number;
text: string;
token: number;
}
interface FriendliCompleteResponse {
event: string;
choices: {
index: number;
seed: number;
text: string;
tokens: number[];
}[];
usage: {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
};
}
const response = await this.caller.call(async () =>
fetch(`${this.baseUrl}/v1/completions`, {
method: "POST",
headers: this.constructHeaders(true),
body: this.constructBody(prompt, true, _options),
})
);
if (response.status !== 200 ?? !response.body) {
const errorResponse = await response.json();
throw new Error(JSON.stringify(errorResponse));
}
const stream = convertEventStreamToIterableReadableDataStream(
response.body
);
for await (const chunk of stream) {
if (chunk.event !== "complete") {
const parsedChunk = JSON.parse(chunk) as FriendliResponse;
const generationChunk = new GenerationChunk({
text: parsedChunk.text ?? "",
});
yield generationChunk;
void runManager?.handleLLMNewToken(generationChunk.text ?? "");
} else {
const parsedChunk = JSON.parse(chunk) as FriendliCompleteResponse;
const generationChunk = new GenerationChunk({
text: "",
generationInfo: {
choices: parsedChunk.choices,
usage: parsedChunk.usage,
},
});
yield generationChunk;
}
}
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/ibm.ts
|
/* eslint-disable @typescript-eslint/no-unused-vars */
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import { BaseLLM, BaseLLMParams } from "@langchain/core/language_models/llms";
import { WatsonXAI } from "@ibm-cloud/watsonx-ai";
import {
DeploymentTextGenProperties,
ReturnOptionProperties,
TextGenLengthPenalty,
TextGenParameters,
TextTokenizationParams,
TextTokenizeParameters,
} from "@ibm-cloud/watsonx-ai/dist/watsonx-ai-ml/vml_v1.js";
import {
Generation,
LLMResult,
GenerationChunk,
} from "@langchain/core/outputs";
import { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base";
import { AsyncCaller } from "@langchain/core/utils/async_caller";
import { authenticateAndSetInstance } from "../utils/ibm.js";
import {
GenerationInfo,
ResponseChunk,
TokenUsage,
WatsonxAuth,
WatsonxParams,
} from "../types/ibm.js";
/**
* Input to LLM class.
*/
export interface WatsonxCallOptionsLLM extends BaseLanguageModelCallOptions {
maxRetries?: number;
parameters?: Partial<WatsonxInputLLM>;
idOrName?: string;
}
export interface WatsonxInputLLM extends WatsonxParams, BaseLLMParams {
streaming?: boolean;
maxNewTokens?: number;
decodingMethod?: TextGenParameters.Constants.DecodingMethod | string;
lengthPenalty?: TextGenLengthPenalty;
minNewTokens?: number;
randomSeed?: number;
stopSequence?: string[];
temperature?: number;
timeLimit?: number;
topK?: number;
topP?: number;
repetitionPenalty?: number;
truncateInpuTokens?: number;
returnOptions?: ReturnOptionProperties;
includeStopSequence?: boolean;
}
/**
* Integration with an LLM.
*/
export class WatsonxLLM<
CallOptions extends WatsonxCallOptionsLLM = WatsonxCallOptionsLLM
>
extends BaseLLM<CallOptions>
implements WatsonxInputLLM
{
// Used for tracing, replace with the same name as your class
static lc_name() {
return "Watsonx";
}
lc_serializable = true;
streaming = false;
model: string;
maxRetries = 0;
version = "2024-05-31";
serviceUrl: string;
maxNewTokens?: number;
spaceId?: string;
projectId?: string;
idOrName?: string;
decodingMethod?: TextGenParameters.Constants.DecodingMethod | string;
lengthPenalty?: TextGenLengthPenalty;
minNewTokens?: number;
randomSeed?: number;
stopSequence?: string[];
temperature?: number;
timeLimit?: number;
topK?: number;
topP?: number;
repetitionPenalty?: number;
truncateInpuTokens?: number;
returnOptions?: ReturnOptionProperties;
includeStopSequence?: boolean;
maxConcurrency?: number;
private service: WatsonXAI;
constructor(fields: WatsonxInputLLM & WatsonxAuth) {
super(fields);
this.model = fields.model ?? this.model;
this.version = fields.version;
this.maxNewTokens = fields.maxNewTokens ?? this.maxNewTokens;
this.serviceUrl = fields.serviceUrl;
this.decodingMethod = fields.decodingMethod;
this.lengthPenalty = fields.lengthPenalty;
this.minNewTokens = fields.minNewTokens;
this.randomSeed = fields.randomSeed;
this.stopSequence = fields.stopSequence;
this.temperature = fields.temperature;
this.timeLimit = fields.timeLimit;
this.topK = fields.topK;
this.topP = fields.topP;
this.repetitionPenalty = fields.repetitionPenalty;
this.truncateInpuTokens = fields.truncateInpuTokens;
this.returnOptions = fields.returnOptions;
this.includeStopSequence = fields.includeStopSequence;
this.maxRetries = fields.maxRetries || this.maxRetries;
this.maxConcurrency = fields.maxConcurrency;
this.streaming = fields.streaming || this.streaming;
if (
(fields.projectId && fields.spaceId) ||
(fields.idOrName && fields.projectId) ||
(fields.spaceId && fields.idOrName)
)
throw new Error("Maximum 1 id type can be specified per instance");
if (!fields.projectId && !fields.spaceId && !fields.idOrName)
throw new Error(
"No id specified! At least id of 1 type has to be specified"
);
this.projectId = fields?.projectId;
this.spaceId = fields?.spaceId;
this.idOrName = fields?.idOrName;
this.serviceUrl = fields?.serviceUrl;
const {
watsonxAIApikey,
watsonxAIAuthType,
watsonxAIBearerToken,
watsonxAIUsername,
watsonxAIPassword,
watsonxAIUrl,
version,
serviceUrl,
} = fields;
const auth = authenticateAndSetInstance({
watsonxAIApikey,
watsonxAIAuthType,
watsonxAIBearerToken,
watsonxAIUsername,
watsonxAIPassword,
watsonxAIUrl,
version,
serviceUrl,
});
if (auth) this.service = auth;
else throw new Error("You have not provided one type of authentication");
}
get lc_secrets(): { [key: string]: string } {
return {
authenticator: "AUTHENTICATOR",
apiKey: "WATSONX_AI_APIKEY",
apikey: "WATSONX_AI_APIKEY",
watsonxAIAuthType: "WATSONX_AI_AUTH_TYPE",
watsonxAIApikey: "WATSONX_AI_APIKEY",
watsonxAIBearerToken: "WATSONX_AI_BEARER_TOKEN",
watsonxAIUsername: "WATSONX_AI_USERNAME",
watsonxAIPassword: "WATSONX_AI_PASSWORD",
watsonxAIUrl: "WATSONX_AI_URL",
};
}
get lc_aliases(): { [key: string]: string } {
return {
authenticator: "authenticator",
apikey: "watsonx_ai_apikey",
apiKey: "watsonx_ai_apikey",
watsonxAIAuthType: "watsonx_ai_auth_type",
watsonxAIApikey: "watsonx_ai_apikey",
watsonxAIBearerToken: "watsonx_ai_bearer_token",
watsonxAIUsername: "watsonx_ai_username",
watsonxAIPassword: "watsonx_ai_password",
watsonxAIUrl: "watsonx_ai_url",
};
}
invocationParams(
options: this["ParsedCallOptions"]
): TextGenParameters | DeploymentTextGenProperties {
const { parameters } = options;
return {
max_new_tokens: parameters?.maxNewTokens ?? this.maxNewTokens,
decoding_method: parameters?.decodingMethod ?? this.decodingMethod,
length_penalty: parameters?.lengthPenalty ?? this.lengthPenalty,
min_new_tokens: parameters?.minNewTokens ?? this.minNewTokens,
random_seed: parameters?.randomSeed ?? this.randomSeed,
stop_sequences: options?.stop ?? this.stopSequence,
temperature: parameters?.temperature ?? this.temperature,
time_limit: parameters?.timeLimit ?? this.timeLimit,
top_k: parameters?.topK ?? this.topK,
top_p: parameters?.topP ?? this.topP,
repetition_penalty:
parameters?.repetitionPenalty ?? this.repetitionPenalty,
truncate_input_tokens:
parameters?.truncateInpuTokens ?? this.truncateInpuTokens,
return_options: parameters?.returnOptions ?? this.returnOptions,
include_stop_sequence:
parameters?.includeStopSequence ?? this.includeStopSequence,
};
}
scopeId() {
if (this.projectId)
return { projectId: this.projectId, modelId: this.model };
else if (this.spaceId)
return { spaceId: this.spaceId, modelId: this.model };
else if (this.idOrName)
return { idOrName: this.idOrName, modelId: this.model };
else return { spaceId: this.spaceId, modelId: this.model };
}
async listModels() {
const listModelParams = {
filters: "function_text_generation",
};
const listModels = await this.completionWithRetry(() =>
this.service.listFoundationModelSpecs(listModelParams)
);
return listModels.result.resources?.map((item) => item.model_id);
}
private async generateSingleMessage(
input: string,
options: this["ParsedCallOptions"],
stream: true
): Promise<
AsyncIterable<WatsonXAI.ObjectStreamed<WatsonXAI.TextGenResponse>>
>;
private async generateSingleMessage(
input: string,
options: this["ParsedCallOptions"],
stream: false
): Promise<Generation[]>;
private async generateSingleMessage(
input: string,
options: this["ParsedCallOptions"],
stream: boolean
) {
const {
signal,
stop,
maxRetries,
maxConcurrency,
timeout,
...requestOptions
} = options;
const tokenUsage = { generated_token_count: 0, input_token_count: 0 };
const idOrName = options?.idOrName ?? this.idOrName;
const parameters = this.invocationParams(options);
if (stream) {
const textStream = idOrName
? await this.service.deploymentGenerateTextStream({
idOrName,
...requestOptions,
parameters: {
...parameters,
prompt_variables: {
input,
},
},
returnObject: true,
})
: await this.service.generateTextStream({
input,
parameters,
...this.scopeId(),
...requestOptions,
returnObject: true,
});
return textStream;
} else {
const textGenerationPromise = idOrName
? this.service.deploymentGenerateText({
...requestOptions,
idOrName,
parameters: {
...parameters,
prompt_variables: {
input,
},
},
})
: this.service.generateText({
input,
parameters,
...this.scopeId(),
...requestOptions,
});
const textGeneration = await textGenerationPromise;
const singleGeneration: Generation[] = textGeneration.result.results.map(
(result) => {
tokenUsage.generated_token_count += result.generated_token_count
? result.generated_token_count
: 0;
tokenUsage.input_token_count += result.input_token_count
? result.input_token_count
: 0;
return {
text: result.generated_text,
generationInfo: {
stop_reason: result.stop_reason,
input_token_count: result.input_token_count,
generated_token_count: result.generated_token_count,
},
};
}
);
return singleGeneration;
}
}
async completionWithRetry<T>(
callback: () => T,
options?: this["ParsedCallOptions"]
) {
const caller = new AsyncCaller({
maxConcurrency: options?.maxConcurrency || this.maxConcurrency,
maxRetries: this.maxRetries,
});
const result = options
? caller.callWithOptions(
{
signal: options.signal,
},
async () => callback()
)
: caller.call(async () => callback());
return result;
}
async _generate(
prompts: string[],
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): Promise<LLMResult> {
const tokenUsage: TokenUsage = {
generated_token_count: 0,
input_token_count: 0,
};
if (this.streaming) {
const generations: Generation[][] = await Promise.all(
prompts.map(async (prompt, promptIdx) => {
if (options.signal?.aborted) {
throw new Error("AbortError");
}
const stream = this._streamResponseChunks(prompt, options);
const geneartionsArray: GenerationInfo[] = [];
for await (const chunk of stream) {
const completion = chunk?.generationInfo?.completion ?? 0;
const generationInfo: GenerationInfo = {
text: "",
stop_reason: "",
generated_token_count: 0,
input_token_count: 0,
};
geneartionsArray[completion] ??= generationInfo;
geneartionsArray[completion].generated_token_count =
chunk?.generationInfo?.usage_metadata.generated_token_count ?? 0;
geneartionsArray[completion].input_token_count +=
chunk?.generationInfo?.usage_metadata.input_token_count ?? 0;
geneartionsArray[completion].stop_reason =
chunk?.generationInfo?.stop_reason;
geneartionsArray[completion].text += chunk.text;
if (chunk.text)
void runManager?.handleLLMNewToken(chunk.text, {
prompt: promptIdx,
completion: 0,
});
}
return geneartionsArray.map((item) => {
const { text, ...rest } = item;
tokenUsage.generated_token_count = rest.generated_token_count;
tokenUsage.input_token_count += rest.input_token_count;
return {
text,
generationInfo: rest,
};
});
})
);
const result: LLMResult = { generations, llmOutput: { tokenUsage } };
return result;
} else {
const generations: Generation[][] = await Promise.all(
prompts.map(async (prompt) => {
if (options.signal?.aborted) {
throw new Error("AbortError");
}
const callback = () =>
this.generateSingleMessage(prompt, options, false);
type ReturnMessage = ReturnType<typeof callback>;
const response = await this.completionWithRetry<ReturnMessage>(
callback,
options
);
const [generated_token_count, input_token_count] = response.reduce(
(acc, curr) => {
let generated = 0;
let inputed = 0;
if (curr?.generationInfo?.generated_token_count)
generated = curr.generationInfo.generated_token_count + acc[0];
if (curr?.generationInfo?.input_token_count)
inputed = curr.generationInfo.input_token_count + acc[1];
return [generated, inputed];
},
[0, 0]
);
tokenUsage.generated_token_count += generated_token_count;
tokenUsage.input_token_count += input_token_count;
return response;
})
);
const result: LLMResult = { generations, llmOutput: { tokenUsage } };
return result;
}
}
async getNumTokens(
content: string,
options?: TextTokenizeParameters
): Promise<number> {
const params: TextTokenizationParams = {
...this.scopeId(),
input: content,
parameters: options,
};
const callback = () => this.service.tokenizeText(params);
type ReturnTokens = ReturnType<typeof callback>;
const response = await this.completionWithRetry<ReturnTokens>(callback);
return response.result.result.token_count;
}
async *_streamResponseChunks(
prompt: string,
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<GenerationChunk> {
const callback = () => this.generateSingleMessage(prompt, options, true);
type ReturnStream = ReturnType<typeof callback>;
const streamInferDeployedPrompt =
await this.completionWithRetry<ReturnStream>(callback);
const responseChunk: ResponseChunk = {
id: 0,
event: "",
data: {
results: [],
},
};
for await (const chunk of streamInferDeployedPrompt) {
if (options.signal?.aborted) {
throw new Error("AbortError");
}
for (const [index, item] of chunk.data.results.entries()) {
yield new GenerationChunk({
text: item.generated_text,
generationInfo: {
stop_reason: item.stop_reason,
completion: index,
usage_metadata: {
generated_token_count: item.generated_token_count,
input_token_count: item.input_token_count,
stop_reason: item.stop_reason,
},
},
});
if (item.generated_text)
void runManager?.handleLLMNewToken(item.generated_text);
}
Object.assign(responseChunk, { id: 0, event: "", data: {} });
}
}
_llmType() {
return "watsonx";
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/cohere.ts
|
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { LLM, type BaseLLMParams } from "@langchain/core/language_models/llms";
/**
* Interface for the input parameters specific to the Cohere model.
* @deprecated Use `CohereInput` from `@langchain/cohere` instead.
*/
export interface CohereInput extends BaseLLMParams {
/** Sampling temperature to use */
temperature?: number;
/**
* Maximum number of tokens to generate in the completion.
*/
maxTokens?: number;
/** Model to use */
model?: string;
apiKey?: string;
}
/**
* Class representing a Cohere Large Language Model (LLM). It interacts
* with the Cohere API to generate text completions.
* @example
* ```typescript
* const model = new Cohere({
* temperature: 0.7,
* maxTokens: 20,
* maxRetries: 5,
* });
*
* const res = await model.invoke(
* "Question: What would be a good company name for a company that makes colorful socks?\nAnswer:"
* );
* console.log({ res });
* ```
* @deprecated Use `Cohere` from `@langchain/cohere` instead.
*/
export class Cohere extends LLM implements CohereInput {
static lc_name() {
return "Cohere";
}
get lc_secrets(): { [key: string]: string } | undefined {
return {
apiKey: "COHERE_API_KEY",
};
}
get lc_aliases(): { [key: string]: string } | undefined {
return {
apiKey: "cohere_api_key",
};
}
lc_serializable = true;
temperature = 0;
maxTokens = 250;
model: string;
apiKey: string;
constructor(fields?: CohereInput) {
super(fields ?? {});
const apiKey = fields?.apiKey ?? getEnvironmentVariable("COHERE_API_KEY");
if (!apiKey) {
throw new Error(
"Please set the COHERE_API_KEY environment variable or pass it to the constructor as the apiKey field."
);
}
this.apiKey = apiKey;
this.maxTokens = fields?.maxTokens ?? this.maxTokens;
this.temperature = fields?.temperature ?? this.temperature;
this.model = fields?.model ?? this.model;
}
_llmType() {
return "cohere";
}
/** @ignore */
async _call(
prompt: string,
options: this["ParsedCallOptions"]
): Promise<string> {
const { cohere } = await Cohere.imports();
cohere.init(this.apiKey);
// Hit the `generate` endpoint on the `large` model
const generateResponse = await this.caller.callWithOptions(
{ signal: options.signal },
cohere.generate.bind(cohere),
{
prompt,
model: this.model,
max_tokens: this.maxTokens,
temperature: this.temperature,
end_sequences: options.stop,
}
);
try {
return generateResponse.body.generations[0].text;
} catch {
console.log(generateResponse);
throw new Error("Could not parse response.");
}
}
/** @ignore */
static async imports(): Promise<{
cohere: typeof import("cohere-ai");
}> {
try {
const { default: cohere } = await import("cohere-ai");
return { cohere };
} catch (e) {
throw new Error(
"Please install cohere-ai as a dependency with, e.g. `yarn add cohere-ai`"
);
}
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/portkey.ts
|
import _ from "lodash";
import { LLMOptions, Portkey as _Portkey } from "portkey-ai";
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import { GenerationChunk, LLMResult } from "@langchain/core/outputs";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { BaseLLM } from "@langchain/core/language_models/llms";
interface PortkeyOptions {
apiKey?: string;
baseURL?: string;
mode?: string;
llms?: [LLMOptions] | null;
}
const readEnv = (env: string, default_val?: string): string | undefined =>
getEnvironmentVariable(env) ?? default_val;
export class PortkeySession {
portkey: _Portkey;
constructor(options: PortkeyOptions = {}) {
if (!options.apiKey) {
/* eslint-disable no-param-reassign */
options.apiKey = readEnv("PORTKEY_API_KEY");
}
if (!options.baseURL) {
/* eslint-disable no-param-reassign */
options.baseURL = readEnv("PORTKEY_BASE_URL", "https://api.portkey.ai");
}
this.portkey = new _Portkey({});
this.portkey.llms = [{}];
if (!options.apiKey) {
throw new Error("Set Portkey ApiKey in PORTKEY_API_KEY env variable");
}
this.portkey = new _Portkey(options);
}
}
const defaultPortkeySession: {
session: PortkeySession;
options: PortkeyOptions;
}[] = [];
/**
* Get a session for the Portkey API. If one already exists with the same options,
* it will be returned. Otherwise, a new session will be created.
* @param options
* @returns
*/
export function getPortkeySession(options: PortkeyOptions = {}) {
let session = defaultPortkeySession.find((session) =>
_.isEqual(session.options, options)
)?.session;
if (!session) {
session = new PortkeySession(options);
defaultPortkeySession.push({ session, options });
}
return session;
}
/**
* @example
* ```typescript
* const model = new Portkey({
* mode: "single",
* llms: [
* {
* provider: "openai",
* virtual_key: "open-ai-key-1234",
* model: "gpt-3.5-turbo-instruct",
* max_tokens: 2000,
* },
* ],
* });
*
* // Stream the output of the model and process it
* const res = await model.stream(
* "Question: Write a story about a king\nAnswer:"
* );
* for await (const i of res) {
* process.stdout.write(i);
* }
* ```
*/
export class Portkey extends BaseLLM {
apiKey?: string = undefined;
baseURL?: string = undefined;
mode?: string = undefined;
llms?: [LLMOptions] | null = undefined;
session: PortkeySession;
constructor(init?: Partial<Portkey>) {
super(init ?? {});
this.apiKey = init?.apiKey;
this.baseURL = init?.baseURL;
this.mode = init?.mode;
this.llms = init?.llms;
this.session = getPortkeySession({
apiKey: this.apiKey,
baseURL: this.baseURL,
llms: this.llms,
mode: this.mode,
});
}
_llmType() {
return "portkey";
}
async _generate(
prompts: string[],
options: this["ParsedCallOptions"],
_?: CallbackManagerForLLMRun
): Promise<LLMResult> {
const choices = [];
for (let i = 0; i < prompts.length; i += 1) {
const response = await this.session.portkey.completions.create({
prompt: prompts[i],
...options,
stream: false,
});
choices.push(response.choices);
}
const generations = choices.map((promptChoices) =>
promptChoices.map((choice) => ({
text: choice.text ?? "",
generationInfo: {
finishReason: choice.finish_reason,
logprobs: choice.logprobs,
},
}))
);
return {
generations,
};
}
async *_streamResponseChunks(
input: string,
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<GenerationChunk> {
const response = await this.session.portkey.completions.create({
prompt: input,
...options,
stream: true,
});
for await (const data of response) {
const choice = data?.choices[0];
if (!choice) {
continue;
}
const chunk = new GenerationChunk({
text: choice.text ?? "",
generationInfo: {
finishReason: choice.finish_reason,
},
});
yield chunk;
void runManager?.handleLLMNewToken(chunk.text ?? "");
}
if (options.signal?.aborted) {
throw new Error("AbortError");
}
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/bedrock/web.ts
|
import { SignatureV4 } from "@smithy/signature-v4";
import { HttpRequest } from "@smithy/protocol-http";
import { EventStreamCodec } from "@smithy/eventstream-codec";
import { fromUtf8, toUtf8 } from "@smithy/util-utf8";
import { Sha256 } from "@aws-crypto/sha256-js";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import { GenerationChunk } from "@langchain/core/outputs";
import { LLM, type BaseLLMParams } from "@langchain/core/language_models/llms";
import {
BaseBedrockInput,
BedrockLLMInputOutputAdapter,
type CredentialType,
} from "../../utils/bedrock/index.js";
import type { SerializedFields } from "../../load/map_keys.js";
const AWS_REGIONS = [
"us",
"sa",
"me",
"il",
"eu",
"cn",
"ca",
"ap",
"af",
"us-gov",
];
const ALLOWED_MODEL_PROVIDERS = [
"ai21",
"anthropic",
"amazon",
"cohere",
"meta",
"mistral",
];
const PRELUDE_TOTAL_LENGTH_BYTES = 4;
/**
* A type of Large Language Model (LLM) that interacts with the Bedrock
* service. It extends the base `LLM` class and implements the
* `BaseBedrockInput` interface. The class is designed to authenticate and
* interact with the Bedrock service, which is a part of Amazon Web
* Services (AWS). It uses AWS credentials for authentication and can be
* configured with various parameters such as the model to use, the AWS
* region, and the maximum number of tokens to generate.
*/
export class Bedrock extends LLM implements BaseBedrockInput {
model = "amazon.titan-tg1-large";
modelProvider: string;
region: string;
credentials: CredentialType;
temperature?: number | undefined = undefined;
maxTokens?: number | undefined = undefined;
fetchFn: typeof fetch;
endpointHost?: string;
/** @deprecated */
stopSequences?: string[];
modelKwargs?: Record<string, unknown>;
codec: EventStreamCodec = new EventStreamCodec(toUtf8, fromUtf8);
streaming = false;
lc_serializable = true;
get lc_aliases(): Record<string, string> {
return {
model: "model_id",
region: "region_name",
};
}
get lc_secrets(): { [key: string]: string } | undefined {
return {
"credentials.accessKeyId": "BEDROCK_AWS_ACCESS_KEY_ID",
"credentials.secretAccessKey": "BEDROCK_AWS_SECRET_ACCESS_KEY",
};
}
get lc_attributes(): SerializedFields | undefined {
return { region: this.region };
}
_llmType() {
return "bedrock";
}
static lc_name() {
return "Bedrock";
}
constructor(fields?: Partial<BaseBedrockInput> & BaseLLMParams) {
super(fields ?? {});
this.model = fields?.model ?? this.model;
this.modelProvider = getModelProvider(this.model);
if (!ALLOWED_MODEL_PROVIDERS.includes(this.modelProvider)) {
throw new Error(
`Unknown model provider: '${this.modelProvider}', only these are supported: ${ALLOWED_MODEL_PROVIDERS}`
);
}
const region =
fields?.region ?? getEnvironmentVariable("AWS_DEFAULT_REGION");
if (!region) {
throw new Error(
"Please set the AWS_DEFAULT_REGION environment variable or pass it to the constructor as the region field."
);
}
this.region = region;
const credentials = fields?.credentials;
if (!credentials) {
throw new Error(
"Please set the AWS credentials in the 'credentials' field."
);
}
this.credentials = credentials;
this.temperature = fields?.temperature ?? this.temperature;
this.maxTokens = fields?.maxTokens ?? this.maxTokens;
this.fetchFn = fields?.fetchFn ?? fetch.bind(globalThis);
this.endpointHost = fields?.endpointHost ?? fields?.endpointUrl;
this.stopSequences = fields?.stopSequences;
this.modelKwargs = fields?.modelKwargs;
this.streaming = fields?.streaming ?? this.streaming;
}
/** Call out to Bedrock service model.
Arguments:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
response = model.invoke("Tell me a joke.")
*/
async _call(
prompt: string,
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): Promise<string> {
const service = "bedrock-runtime";
const endpointHost =
this.endpointHost ?? `${service}.${this.region}.amazonaws.com`;
const provider = this.modelProvider;
if (this.streaming) {
const stream = this._streamResponseChunks(prompt, options, runManager);
let finalResult: GenerationChunk | undefined;
for await (const chunk of stream) {
if (finalResult === undefined) {
finalResult = chunk;
} else {
finalResult = finalResult.concat(chunk);
}
}
return finalResult?.text ?? "";
}
const response = await this._signedFetch(prompt, options, {
bedrockMethod: "invoke",
endpointHost,
provider,
});
const json = await response.json();
if (!response.ok) {
throw new Error(
`Error ${response.status}: ${json.message ?? JSON.stringify(json)}`
);
}
const text = BedrockLLMInputOutputAdapter.prepareOutput(provider, json);
return text;
}
async _signedFetch(
prompt: string,
options: this["ParsedCallOptions"],
fields: {
bedrockMethod: "invoke" | "invoke-with-response-stream";
endpointHost: string;
provider: string;
}
) {
const { bedrockMethod, endpointHost, provider } = fields;
const inputBody = BedrockLLMInputOutputAdapter.prepareInput(
provider,
prompt,
this.maxTokens,
this.temperature,
options.stop ?? this.stopSequences,
this.modelKwargs,
fields.bedrockMethod
);
const url = new URL(
`https://${endpointHost}/model/${this.model}/${bedrockMethod}`
);
const request = new HttpRequest({
hostname: url.hostname,
path: url.pathname,
protocol: url.protocol,
method: "POST", // method must be uppercase
body: JSON.stringify(inputBody),
query: Object.fromEntries(url.searchParams.entries()),
headers: {
// host is required by AWS Signature V4: https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
host: url.host,
accept: "application/json",
"content-type": "application/json",
},
});
const signer = new SignatureV4({
credentials: this.credentials,
service: "bedrock",
region: this.region,
sha256: Sha256,
});
const signedRequest = await signer.sign(request);
// Send request to AWS using the low-level fetch API
const response = await this.caller.callWithOptions(
{ signal: options.signal },
async () =>
this.fetchFn(url, {
headers: signedRequest.headers,
body: signedRequest.body,
method: signedRequest.method,
})
);
return response;
}
invocationParams(options?: this["ParsedCallOptions"]) {
return {
model: this.model,
region: this.region,
temperature: this.temperature,
maxTokens: this.maxTokens,
stop: options?.stop ?? this.stopSequences,
modelKwargs: this.modelKwargs,
};
}
async *_streamResponseChunks(
prompt: string,
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<GenerationChunk> {
const provider = this.modelProvider;
const bedrockMethod =
provider === "anthropic" ||
provider === "cohere" ||
provider === "meta" ||
provider === "mistral"
? "invoke-with-response-stream"
: "invoke";
const service = "bedrock-runtime";
const endpointHost =
this.endpointHost ?? `${service}.${this.region}.amazonaws.com`;
// Send request to AWS using the low-level fetch API
const response = await this._signedFetch(prompt, options, {
bedrockMethod,
endpointHost,
provider,
});
if (response.status < 200 || response.status >= 300) {
throw Error(
`Failed to access underlying url '${endpointHost}': got ${
response.status
} ${response.statusText}: ${await response.text()}`
);
}
if (
provider === "anthropic" ||
provider === "cohere" ||
provider === "meta" ||
provider === "mistral"
) {
const reader = response.body?.getReader();
const decoder = new TextDecoder();
for await (const chunk of this._readChunks(reader)) {
const event = this.codec.decode(chunk);
if (
(event.headers[":event-type"] !== undefined &&
event.headers[":event-type"].value !== "chunk") ||
event.headers[":content-type"].value !== "application/json"
) {
throw Error(`Failed to get event chunk: got ${chunk}`);
}
const body = JSON.parse(decoder.decode(event.body));
if (body.message) {
throw new Error(body.message);
}
if (body.bytes !== undefined) {
const chunkResult = JSON.parse(
decoder.decode(
Uint8Array.from(atob(body.bytes), (m) => m.codePointAt(0) ?? 0)
)
);
const text = BedrockLLMInputOutputAdapter.prepareOutput(
provider,
chunkResult
);
yield new GenerationChunk({
text,
generationInfo: {},
});
// eslint-disable-next-line no-void
void runManager?.handleLLMNewToken(text);
}
}
} else {
const json = await response.json();
const text = BedrockLLMInputOutputAdapter.prepareOutput(provider, json);
yield new GenerationChunk({
text,
generationInfo: {},
});
// eslint-disable-next-line no-void
void runManager?.handleLLMNewToken(text);
}
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
_readChunks(reader: any) {
function _concatChunks(a: Uint8Array, b: Uint8Array) {
const newBuffer = new Uint8Array(a.length + b.length);
newBuffer.set(a);
newBuffer.set(b, a.length);
return newBuffer;
}
function getMessageLength(buffer: Uint8Array) {
if (buffer.byteLength < PRELUDE_TOTAL_LENGTH_BYTES) return 0;
const view = new DataView(
buffer.buffer,
buffer.byteOffset,
buffer.byteLength
);
return view.getUint32(0, false);
}
return {
async *[Symbol.asyncIterator]() {
let readResult = await reader.read();
let buffer: Uint8Array = new Uint8Array(0);
while (!readResult.done) {
const chunk: Uint8Array = readResult.value;
buffer = _concatChunks(buffer, chunk);
let messageLength = getMessageLength(buffer);
while (
buffer.byteLength >= PRELUDE_TOTAL_LENGTH_BYTES &&
buffer.byteLength >= messageLength
) {
yield buffer.slice(0, messageLength);
buffer = buffer.slice(messageLength);
messageLength = getMessageLength(buffer);
}
readResult = await reader.read();
}
},
};
}
}
function isInferenceModel(modelId: string): boolean {
const parts = modelId.split(".");
return AWS_REGIONS.some((region) => parts[0] === region);
}
function getModelProvider(modelId: string): string {
const parts = modelId.split(".");
if (isInferenceModel(modelId)) {
return parts[1];
} else {
return parts[0];
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/bedrock/index.ts
|
import { defaultProvider } from "@aws-sdk/credential-provider-node";
import type { BaseLLMParams } from "@langchain/core/language_models/llms";
import { BaseBedrockInput } from "../../utils/bedrock/index.js";
import { Bedrock as BaseBedrock } from "./web.js";
export class Bedrock extends BaseBedrock {
static lc_name() {
return "Bedrock";
}
constructor(fields?: Partial<BaseBedrockInput> & BaseLLMParams) {
super({
...fields,
credentials: fields?.credentials ?? defaultProvider(),
});
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/tests/huggingface_hub.int.test.ts
|
import { test } from "@jest/globals";
import { HuggingFaceInference } from "../hf.js";
test.skip("Test HuggingFace", async () => {
const model = new HuggingFaceInference({ temperature: 0.1, topP: 0.5 });
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await model.invoke("1 + 1 =");
// console.log(res);
}, 50000);
test.skip("Test HuggingFace with streaming", async () => {
const model = new HuggingFaceInference({
model: "mistralai/Mistral-7B-v0.1",
temperature: 0.1,
maxTokens: 10,
topP: 0.5,
});
const stream = await model.stream("What is your name?");
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
// console.log(chunk);
}
// console.log(chunks.join(""));
expect(chunks.length).toBeGreaterThan(1);
}, 50000);
test.skip("Test HuggingFace with stop sequence", async () => {
const model = new HuggingFaceInference({
model: "mistralai/Mistral-7B-v0.1",
temperature: 0.1,
topP: 0.5,
});
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await model
.bind({
stop: ["ramento"],
})
.invoke(`What is the capital of California?`);
// console.log(res);
}, 50000);
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/tests/bedrock.int.test.ts
|
/* eslint-disable no-process-env */
/* eslint-disable @typescript-eslint/no-non-null-assertion */
import { test, expect } from "@jest/globals";
import { Bedrock } from "../bedrock/index.js";
test("Test Bedrock LLM: AI21", async () => {
const region = process.env.BEDROCK_AWS_REGION!;
const model = "ai21.j2-grande-instruct";
const prompt = "Human: What is your name?";
const bedrock = new Bedrock({
maxTokens: 20,
region,
model,
maxRetries: 0,
credentials: {
accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,
sessionToken: process.env.BEDROCK_AWS_SESSION_TOKEN,
},
});
const res = await bedrock.invoke(prompt);
expect(typeof res).toBe("string");
// console.log(res);
});
test.skip("Test Bedrock LLM: Meta Llama2", async () => {
const region = process.env.BEDROCK_AWS_REGION!;
const model = "meta.llama2-13b-chat-v1";
const prompt = "Human: What is your name?";
const bedrock = new Bedrock({
maxTokens: 20,
region,
model,
maxRetries: 0,
credentials: {
accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,
sessionToken: process.env.BEDROCK_AWS_SESSION_TOKEN,
},
});
const res = await bedrock.invoke(prompt);
expect(typeof res).toBe("string");
// console.log(res);
});
test.skip("Test Bedrock LLM streaming: Meta Llama2", async () => {
const region = process.env.BEDROCK_AWS_REGION!;
const model = "meta.llama2-13b-chat-v1";
const prompt = "What is your name?";
const bedrock = new Bedrock({
maxTokens: 20,
region,
model,
maxRetries: 0,
credentials: {
accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,
sessionToken: process.env.BEDROCK_AWS_SESSION_TOKEN,
},
});
const stream = await bedrock.stream(prompt);
const chunks = [];
for await (const chunk of stream) {
// console.log(chunk);
chunks.push(chunk);
}
expect(chunks.length).toBeGreaterThan(1);
});
test("Test Bedrock LLM: Claude-v2", async () => {
const region = process.env.BEDROCK_AWS_REGION!;
const model = "anthropic.claude-v2";
const prompt = "Human: What is your name?\n\nAssistant:";
const bedrock = new Bedrock({
maxTokens: 20,
region,
model,
maxRetries: 0,
credentials: {
accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,
sessionToken: process.env.BEDROCK_AWS_SESSION_TOKEN,
},
});
const res = await bedrock.invoke(prompt);
expect(typeof res).toBe("string");
// console.log(res);
});
test("Test Bedrock LLM streaming: AI21", async () => {
const region = process.env.BEDROCK_AWS_REGION!;
const model = "ai21.j2-grande-instruct";
const prompt = "Human: What is your name?";
const bedrock = new Bedrock({
maxTokens: 20,
region,
model,
maxRetries: 0,
credentials: {
accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,
sessionToken: process.env.BEDROCK_AWS_SESSION_TOKEN,
},
});
const stream = await bedrock.stream(prompt);
const chunks = [];
for await (const chunk of stream) {
// console.log(chunk);
chunks.push(chunk);
}
expect(chunks.length).toEqual(1);
});
test("Test Bedrock LLM handleLLMNewToken: Claude-v2", async () => {
const region = process.env.BEDROCK_AWS_REGION!;
const model = "anthropic.claude-v2";
const prompt = "Human: What is your name?\n\nAssistant:";
const tokens: string[] = [];
const bedrock = new Bedrock({
maxTokens: 20,
region,
model,
maxRetries: 0,
credentials: {
accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,
sessionToken: process.env.BEDROCK_AWS_SESSION_TOKEN,
},
streaming: true,
callbacks: [
{
handleLLMNewToken(token) {
tokens.push(token);
},
},
],
});
const stream = await bedrock.invoke(prompt);
expect(tokens.length).toBeGreaterThan(1);
expect(stream).toEqual(tokens.join(""));
});
test("Test Bedrock LLM streaming: Claude-v2", async () => {
const region = process.env.BEDROCK_AWS_REGION!;
const model = "anthropic.claude-v2";
const prompt = "Human: What is your name?\n\nAssistant:";
const bedrock = new Bedrock({
maxTokens: 20,
region,
model,
maxRetries: 0,
credentials: {
accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,
sessionToken: process.env.BEDROCK_AWS_SESSION_TOKEN,
},
});
const stream = await bedrock.stream(prompt);
const chunks = [];
for await (const chunk of stream) {
// console.log(chunk);
chunks.push(chunk);
}
expect(chunks.length).toBeGreaterThan(1);
});
test("Test Bedrock LLM: Inference Models", async () => {
const region = process.env.BEDROCK_AWS_REGION!;
const model = "eu.anthropic.claude-3-5-sonnet-20240620-v1:0";
const prompt = "Human: What is your name?\n\nAssistant:";
const bedrock = new Bedrock({
maxTokens: 20,
region,
model,
maxRetries: 0,
credentials: {
accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,
sessionToken: process.env.BEDROCK_AWS_SESSION_TOKEN,
},
});
const res = await bedrock.invoke(prompt);
expect(typeof res).toBe("string");
// console.log(res);
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/tests/writer.int.test.ts
|
import { test } from "@jest/globals";
import { Writer } from "../writer.js";
test.skip("Test Writer", async () => {
const model = new Writer({ maxTokens: 20 });
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await model.invoke("1 + 1 =");
// console.log(res);
}, 50000);
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/tests/ollama.int.test.ts
|
import { test } from "@jest/globals";
import * as fs from "node:fs/promises";
import { fileURLToPath } from "node:url";
import * as path from "node:path";
import { PromptTemplate } from "@langchain/core/prompts";
import {
BytesOutputParser,
StringOutputParser,
} from "@langchain/core/output_parsers";
import { Ollama } from "../ollama.js";
test.skip("test call", async () => {
const ollama = new Ollama({});
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const result = await ollama.invoke(
"What is a good name for a company that makes colorful socks?"
);
// console.log({ result });
});
test.skip("test call with callback", async () => {
const ollama = new Ollama({
baseUrl: "http://localhost:11434",
});
const tokens: string[] = [];
const result = await ollama.invoke(
"What is a good name for a company that makes colorful socks?",
{
callbacks: [
{
handleLLMNewToken(token) {
tokens.push(token);
},
},
],
}
);
expect(tokens.length).toBeGreaterThan(1);
expect(result).toEqual(tokens.join(""));
});
test.skip("test streaming call", async () => {
const ollama = new Ollama({
baseUrl: "http://localhost:11434",
});
const stream = await ollama.stream(
`Translate "I love programming" into German.`
);
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
// console.log(chunks.join(""));
expect(chunks.length).toBeGreaterThan(1);
});
test.skip("should abort the request", async () => {
const ollama = new Ollama({
baseUrl: "http://localhost:11434",
});
const controller = new AbortController();
await expect(() => {
const ret = ollama.invoke("Respond with an extremely verbose response", {
signal: controller.signal,
});
controller.abort();
return ret;
}).rejects.toThrow("This operation was aborted");
});
test.skip("should stream through with a bytes output parser", async () => {
const TEMPLATE = `You are a pirate named Patchy. All responses must be extremely verbose and in pirate dialect.
User: {input}
AI:`;
const prompt = PromptTemplate.fromTemplate(TEMPLATE);
const ollama = new Ollama({
model: "llama2",
baseUrl: "http://127.0.0.1:11434",
});
const outputParser = new BytesOutputParser();
const chain = prompt.pipe(ollama).pipe(outputParser);
const stream = await chain.stream({
input: `Translate "I love programming" into German.`,
});
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
// console.log(chunks.join(""));
expect(chunks.length).toBeGreaterThan(1);
});
test.skip("JSON mode", async () => {
const TEMPLATE = `You are a pirate named Patchy. All responses must be in pirate dialect and in JSON format, with a property named "response" followed by the value.
User: {input}
AI:`;
// Infer the input variables from the template
const prompt = PromptTemplate.fromTemplate(TEMPLATE);
const ollama = new Ollama({
model: "llama2",
baseUrl: "http://127.0.0.1:11434",
format: "json",
});
const outputParser = new StringOutputParser();
const chain = prompt.pipe(ollama).pipe(outputParser);
const res = await chain.invoke({
input: `Translate "I love programming" into German.`,
});
// console.log(res);
expect(JSON.parse(res).response).toBeDefined();
});
test.skip("Test Ollama with an image", async () => {
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const imageData = await fs.readFile(path.join(__dirname, "/data/hotdog.jpg"));
const model = new Ollama({
model: "llava",
baseUrl: "http://127.0.0.1:11434",
}).bind({
images: [imageData.toString("base64")],
});
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await model.invoke("What's in this image?");
// console.log({ res });
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/tests/layerup_security.test.ts
|
import { test } from "@jest/globals";
import {
LLM,
type BaseLLMCallOptions,
} from "@langchain/core/language_models/llms";
import { GuardrailResponse } from "@layerup/layerup-security/types.js";
import {
LayerupSecurity,
LayerupSecurityOptions,
} from "../layerup_security.js";
// Mock LLM for testing purposes
export class MockLLM extends LLM {
static lc_name() {
return "MockLLM";
}
lc_serializable = true;
_llmType() {
return "mock_llm";
}
async _call(_input: string, _options?: BaseLLMCallOptions): Promise<string> {
return "Hi Bob! How are you?";
}
}
test("Test LayerupSecurity with invalid API key", async () => {
const mockLLM = new MockLLM({});
const layerupSecurityOptions: LayerupSecurityOptions = {
llm: mockLLM,
layerupApiKey: "-- invalid API key --",
layerupApiBaseUrl: "https://api.uselayerup.com/v1",
promptGuardrails: [],
responseGuardrails: ["layerup.hallucination"],
mask: false,
metadata: { customer: "example@uselayerup.com" },
handleResponseGuardrailViolation: (violation: GuardrailResponse) => ({
role: "assistant",
content: `Custom canned response with dynamic data! The violation rule was ${violation.offending_guardrail}.`,
}),
};
await expect(async () => {
const layerupSecurity = new LayerupSecurity(layerupSecurityOptions);
await layerupSecurity.invoke(
"My name is Bob Dylan. My SSN is 123-45-6789."
);
}).rejects.toThrowError();
}, 50000);
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/tests/llama_cpp.int.test.ts
|
/* eslint-disable @typescript-eslint/no-non-null-assertion */
import { test } from "@jest/globals";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { LlamaCpp } from "../llama_cpp.js";
const llamaPath = getEnvironmentVariable("LLAMA_PATH")!;
test.skip("Test Llama_CPP", async () => {
const model = await LlamaCpp.initialize({ modelPath: llamaPath });
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await model.invoke("Where do Llamas live?");
// console.log(res);
}, 100000);
test.skip("Test Llama_CPP", async () => {
const model = await LlamaCpp.initialize({ modelPath: llamaPath });
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await model.invoke("Where do Pandas live?");
// console.log(res);
}, 100000);
test.skip("Test Llama_CPP", async () => {
const model = await LlamaCpp.initialize({ modelPath: llamaPath });
// Attempt to make several queries and make sure that the system prompt
// is not returned as part of any follow-on query.
for (let i = 0; i < 5; i += 1) {
const res = await model.invoke("Where do Pandas live?");
expect(res).not.toContain(
"You are a helpful, respectful and honest assistant."
);
}
}, 100000);
test.skip("Test Llama_CPP", async () => {
const model = await LlamaCpp.initialize({
modelPath: llamaPath,
temperature: 0.7,
});
const stream = await model.stream(
"Tell me a short story about a happy Llama."
);
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
process.stdout.write(chunks.join(""));
}
expect(chunks.length).toBeGreaterThan(1);
});
// gbnf grammer test
const gbnfListGrammer =
'root ::= item+ # Excludes various line break characters item ::= "- " [^\r\n\x0b\x0c\x85\u2028\u2029]+ "\n"';
test.skip("Test Llama_CPP", async () => {
const model = await LlamaCpp.initialize({
modelPath: llamaPath,
gbnf: gbnfListGrammer,
});
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await model.invoke(
"Can you give me a list of 3 cute llama names?"
);
// console.log(res);
}, 100000);
// JSON schema test
const schemaJSON = {
type: "object",
properties: {
responseMessage: {
type: "string",
},
responseMetaData: {
type: "string",
},
requestPositivityScoreFromOneToTen: {
type: "number",
},
},
};
test.skip("Test Llama_CPP", async () => {
const model = await LlamaCpp.initialize({
modelPath: llamaPath,
jsonSchema: schemaJSON,
});
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await model.invoke("Where do llamas live?");
// console.log(res);
}, 100000);
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/tests/ibm.test.ts
|
/* eslint-disable no-process-env */
/* eslint-disable @typescript-eslint/no-explicit-any */
import WatsonxAiMlVml_v1 from "@ibm-cloud/watsonx-ai/dist/watsonx-ai-ml/vml_v1.js";
import { WatsonxLLM, WatsonxInputLLM } from "../ibm.js";
import { authenticateAndSetInstance } from "../../utils/ibm.js";
import { WatsonxEmbeddings } from "../../embeddings/ibm.js";
const fakeAuthProp = {
watsonxAIAuthType: "iam",
watsonxAIApikey: "fake_key",
};
export function getKey<K>(key: K): K {
return key;
}
export const testProperties = (
instance: WatsonxLLM | WatsonxEmbeddings,
testProps: WatsonxInputLLM,
notExTestProps?: { [key: string]: any }
) => {
const checkProperty = <T extends { [key: string]: any }>(
testProps: T,
instance: T,
existing = true
) => {
Object.keys(testProps).forEach((key) => {
const keys = getKey<keyof T>(key);
type Type = Pick<T, typeof keys>;
if (typeof testProps[key as keyof T] === "object")
checkProperty<Type>(testProps[key as keyof T], instance[key], existing);
else {
if (existing)
expect(instance[key as keyof T]).toBe(testProps[key as keyof T]);
else if (instance) expect(instance[key as keyof T]).toBeUndefined();
}
});
};
checkProperty<typeof testProps>(testProps, instance);
if (notExTestProps)
checkProperty<typeof notExTestProps>(notExTestProps, instance, false);
};
describe("LLM unit tests", () => {
describe("Positive tests", () => {
test("Test authentication function", () => {
const instance = authenticateAndSetInstance({
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
...fakeAuthProp,
});
expect(instance).toBeInstanceOf(WatsonxAiMlVml_v1);
});
test("Test basic properties after init", async () => {
const testProps = {
model: "ibm/granite-13b-chat-v2",
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: process.env.WATSONX_AI_PROJECT_ID || "testString",
};
const instance = new WatsonxLLM({ ...testProps, ...fakeAuthProp });
testProperties(instance, testProps);
});
test("Test methods after init", () => {
const testProps: WatsonxInputLLM = {
model: "ibm/granite-13b-chat-v2",
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: process.env.WATSONX_AI_PROJECT_ID || "testString",
};
const instance = new WatsonxLLM({
...testProps,
...fakeAuthProp,
});
expect(instance.getNumTokens).toBeDefined();
expect(instance._generate).toBeDefined();
expect(instance._streamResponseChunks).toBeDefined();
expect(instance.invocationParams).toBeDefined();
});
test("Test properties after init", async () => {
const testProps: WatsonxInputLLM = {
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: process.env.WATSONX_AI_PROJECT_ID || "testString",
model: "ibm/granite-13b-chat-v2",
maxNewTokens: 100,
decodingMethod: "sample",
lengthPenalty: { decay_factor: 1, start_index: 1 },
minNewTokens: 10,
randomSeed: 1,
stopSequence: ["hello"],
temperature: 0.1,
timeLimit: 10000,
topK: 1,
topP: 1,
repetitionPenalty: 1,
truncateInpuTokens: 1,
returnOptions: {
input_text: true,
generated_tokens: true,
input_tokens: true,
token_logprobs: true,
token_ranks: true,
top_n_tokens: 2,
},
includeStopSequence: false,
maxRetries: 3,
maxConcurrency: 3,
};
const instance = new WatsonxLLM({ ...testProps, ...fakeAuthProp });
testProperties(instance, testProps);
});
});
describe("Negative tests", () => {
test("Missing id", async () => {
const testProps: WatsonxInputLLM = {
model: "ibm/granite-13b-chat-v2",
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
};
expect(
() =>
new WatsonxLLM({
...testProps,
...fakeAuthProp,
})
).toThrowError();
});
test("Missing other props", async () => {
// @ts-expect-error Intentionally passing not enough parameters
const testPropsProjectId: WatsonxInputLLM = {
projectId: process.env.WATSONX_AI_PROJECT_ID || "testString",
};
expect(
() =>
new WatsonxLLM({
...testPropsProjectId,
...fakeAuthProp,
})
).toThrowError();
// @ts-expect-error Intentionally passing not enough parameters
const testPropsServiceUrl: WatsonxInputLLM = {
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
};
expect(
() =>
new WatsonxLLM({
...testPropsServiceUrl,
...fakeAuthProp,
})
).toThrowError();
const testPropsVersion = {
version: "2024-05-31",
};
expect(
() =>
new WatsonxLLM({
// @ts-expect-error Intentionally passing wrong type of an object
testPropsVersion,
})
).toThrowError();
});
test("Passing more than one id", async () => {
const testProps: WatsonxInputLLM = {
model: "ibm/granite-13b-chat-v2",
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: process.env.WATSONX_AI_PROJECT_ID || "testString",
spaceId: process.env.WATSONX_AI_PROJECT_ID || "testString",
};
expect(
() =>
new WatsonxLLM({
...testProps,
...fakeAuthProp,
})
).toThrowError();
});
test("Not existing property passed", async () => {
const testProps = {
model: "ibm/granite-13b-chat-v2",
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: process.env.WATSONX_AI_PROJECT_ID || "testString",
};
const notExTestProps = {
notExisting: 12,
notExObj: {
notExProp: 12,
},
};
const instance = new WatsonxLLM({
...testProps,
...notExTestProps,
...fakeAuthProp,
});
testProperties(instance, testProps, notExTestProps);
});
});
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/tests/cohere.int.test.ts
|
import { test } from "@jest/globals";
import { Cohere } from "../cohere.js";
test("Test Cohere", async () => {
const model = new Cohere({ maxTokens: 20 });
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await model.invoke("1 + 1 =");
// console.log(res);
}, 50000);
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/tests/ai21.int.test.ts
|
import { test, describe, expect } from "@jest/globals";
import { AI21 } from "../ai21.js";
describe.skip("AI21", () => {
test("test call", async () => {
const ai21 = new AI21({});
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const result = await ai21.invoke(
"What is a good name for a company that makes colorful socks?"
);
// console.log({ result });
});
test("test translation call", async () => {
const ai21 = new AI21({});
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const result = await ai21.invoke(
`Translate "I love programming" into German.`
);
// console.log({ result });
});
test("test JSON output call", async () => {
const ai21 = new AI21({});
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const result = await ai21.invoke(
`Output a JSON object with three string fields: "name", "birthplace", "bio".`
);
// console.log({ result });
});
test("should abort the request", async () => {
const ai21 = new AI21({});
const controller = new AbortController();
await expect(() => {
const ret = ai21.invoke("Respond with an extremely verbose response", {
signal: controller.signal,
});
controller.abort();
return ret;
}).rejects.toThrow("AbortError: This operation was aborted");
});
test("throws an error when response status is not ok", async () => {
const ai21 = new AI21({
ai21ApiKey: "BAD_KEY",
});
await expect(ai21.invoke("Test prompt")).rejects.toThrow(
"AI21 call failed with status code 401"
);
});
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/tests/sagemaker_endpoint.int.test.ts
|
/* eslint-disable no-process-env */
/* eslint-disable @typescript-eslint/no-non-null-assertion */
import { expect, test } from "@jest/globals";
import {
SageMakerEndpoint,
SageMakerLLMContentHandler,
} from "../sagemaker_endpoint.js";
// yarn test:single /{path_to}/langchain/src/llms/tests/sagemaker.int.test.ts
describe.skip("Test SageMaker LLM", () => {
test("without streaming", async () => {
interface ResponseJsonInterface {
generation: {
content: string;
};
}
class LLama213BHandler implements SageMakerLLMContentHandler {
contentType = "application/json";
accepts = "application/json";
async transformInput(
prompt: string,
modelKwargs: Record<string, unknown>
): Promise<Uint8Array> {
const payload = {
inputs: [[{ role: "user", content: prompt }]],
parameters: modelKwargs,
};
const input_str = JSON.stringify(payload);
return new TextEncoder().encode(input_str);
}
async transformOutput(output: Uint8Array): Promise<string> {
const response_json = JSON.parse(
new TextDecoder("utf-8").decode(output)
) as ResponseJsonInterface[];
const content = response_json[0]?.generation.content ?? "";
return content;
}
}
const contentHandler = new LLama213BHandler();
const model = new SageMakerEndpoint({
endpointName: "aws-productbot-ai-dev-llama-2-13b-chat",
streaming: false,
modelKwargs: {
temperature: 0.5,
max_new_tokens: 700,
top_p: 0.9,
},
endpointKwargs: {
CustomAttributes: "accept_eula=true",
},
contentHandler,
clientOptions: {
region: "us-east-1",
credentials: {
accessKeyId: process.env.AWS_ACCESS_KEY_ID!,
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY!,
},
},
});
const response = await model.invoke(
"hello, my name is John Doe, tell me a fun story about llamas."
);
expect(response.length).toBeGreaterThan(0);
});
test("with streaming", async () => {
class LLama213BHandler implements SageMakerLLMContentHandler {
contentType = "application/json";
accepts = "application/json";
async transformInput(
prompt: string,
modelKwargs: Record<string, unknown>
): Promise<Uint8Array> {
const payload = {
inputs: [[{ role: "user", content: prompt }]],
parameters: modelKwargs,
};
const input_str = JSON.stringify(payload);
return new TextEncoder().encode(input_str);
}
async transformOutput(output: Uint8Array): Promise<string> {
return new TextDecoder("utf-8").decode(output);
}
}
const contentHandler = new LLama213BHandler();
const model = new SageMakerEndpoint({
endpointName: "aws-productbot-ai-dev-llama-2-13b-chat",
streaming: true, // specify streaming
modelKwargs: {
temperature: 0.5,
max_new_tokens: 700,
top_p: 0.9,
},
endpointKwargs: {
CustomAttributes: "accept_eula=true",
},
contentHandler,
clientOptions: {
region: "us-east-1",
credentials: {
accessKeyId: process.env.AWS_ACCESS_KEY_ID!,
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY!,
},
},
});
const response = await model.invoke(
"hello, my name is John Doe, tell me a fun story about llamas in 3 paragraphs"
);
const chunks = [];
for await (const chunk of response) {
chunks.push(chunk);
}
expect(response.length).toBeGreaterThan(0);
});
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/tests/deepinfra.int.test.ts
|
import { test } from "@jest/globals";
import { DeepInfraLLM } from "../deepinfra.js";
test("Test DeepInfra", async () => {
const model = new DeepInfraLLM({ maxTokens: 20 });
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await model.invoke("1 + 1 =");
// console.log(res);
}, 50000);
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/tests/fireworks.int.test.ts
|
import { test, expect } from "@jest/globals";
import { Fireworks } from "../fireworks.js";
describe("Fireworks", () => {
test("call", async () => {
const model = new Fireworks({ maxTokens: 50 });
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await model.invoke("1 + 1 = ");
// console.log({ res });
});
test("generate", async () => {
const model = new Fireworks({ maxTokens: 50 });
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await model.generate(["1 + 1 = "]);
// console.log(JSON.stringify(res, null, 2));
await expect(
async () => await model.generate(["1 + 1 = ", "2 + 2 = "])
).rejects.toThrow();
});
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/tests/replicate.int.test.ts
|
import { test, expect } from "@jest/globals";
import { Replicate } from "../replicate.js";
// Test skipped because Replicate appears to be timing out often when called
test.skip("Test Replicate", async () => {
const model = new Replicate({
model:
"a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
input: {
max_length: 10,
},
});
const res = await model.invoke("Hello, my name is ");
// console.log({ res });
expect(typeof res).toBe("string");
});
test.skip("Test Replicate streaming", async () => {
const model = new Replicate({
model:
"a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
input: {
max_length: 10,
},
});
const stream = await model.stream("Hello, my name is ");
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
// console.log(chunks);
expect(chunks.length).toBeGreaterThan(1);
});
test.skip("Serialise Replicate", () => {
const model = new Replicate({
model:
"a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
input: {
max_length: 10,
},
});
const serialised = JSON.stringify(model.toJSON());
expect(JSON.parse(serialised)).toMatchInlineSnapshot(`
{
"id": [
"langchain",
"llms",
"replicate",
"Replicate",
],
"kwargs": {
"api_key": {
"id": [
"REPLICATE_API_TOKEN",
],
"lc": 1,
"type": "secret",
},
"input": {
"max_length": 10,
},
"model": "a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
},
"lc": 1,
"type": "constructor",
}
`);
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/tests/arcjet.test.ts
|
import { test } from "@jest/globals";
import {
LLM,
type BaseLLMCallOptions,
} from "@langchain/core/language_models/llms";
import { ArcjetRedact } from "../arcjet.js";
// Mock LLM for testing purposes
export class MockLLM extends LLM {
static lc_name() {
return "MockLLM";
}
lc_serializable = true;
callback?: (input: string) => string;
constructor(callback?: (input: string) => string) {
super({});
this.callback = callback;
}
_llmType() {
return "mock_llm";
}
async _call(input: string, _options?: BaseLLMCallOptions): Promise<string> {
if (typeof this.callback !== "undefined") {
return this.callback(input);
} else {
throw new Error("no callback");
}
}
}
test("It calls the base LLM correctly", async () => {
const callback = (input: string) => {
expect(input).toEqual("this is the input");
return "this is the output";
};
const mockLLM = new MockLLM(callback);
const options = {
llm: mockLLM,
};
const arcjetRedact = new ArcjetRedact(options);
const output = await arcjetRedact.invoke("this is the input");
expect(output).toEqual("this is the output");
});
test("It performs redactions and unredactions", async () => {
const callback = (input: string) => {
expect(input).toEqual("email <Redacted email #0>");
return "your email is <Redacted email #0>";
};
const mockLLM = new MockLLM(callback);
const options = {
llm: mockLLM,
};
const arcjetRedact = new ArcjetRedact(options);
const output = await arcjetRedact.invoke("email test@example.com");
expect(output).toEqual("your email is test@example.com");
});
test("It only redacts configured entities", async () => {
const callback = (input: string) => {
expect(input).toEqual(
"email test@example.com phone <Redacted phone number #0>"
);
return "your phone number is <Redacted phone number #0>";
};
const mockLLM = new MockLLM(callback);
const options = {
llm: mockLLM,
entities: ["phone-number" as const],
};
const arcjetRedact = new ArcjetRedact(options);
const output = await arcjetRedact.invoke(
"email test@example.com phone +35312345678"
);
expect(output).toEqual("your phone number is +35312345678");
});
test("It redacts custom entities", async () => {
const callback = (input: string) => {
expect(input).toEqual("custom <Redacted custom-entity #0>");
return "custom is <Redacted custom-entity #0>";
};
const mockLLM = new MockLLM(callback);
const customDetector = (tokens: string[]) => {
return tokens.map((t) =>
t === "my-custom-string-to-be-detected" ? "custom-entity" : undefined
);
};
const options = {
llm: mockLLM,
entities: ["custom-entity" as const],
detect: customDetector,
};
const arcjetRedact = new ArcjetRedact(options);
const output = await arcjetRedact.invoke(
"custom my-custom-string-to-be-detected"
);
expect(output).toEqual("custom is my-custom-string-to-be-detected");
});
test("It provides the correct number of tokens to the context window", async () => {
const callback = (input: string) => {
expect(input).toEqual("this is a sentence for testing");
return "this is a sentence for testing";
};
const mockLLM = new MockLLM(callback);
const customDetector = (tokens: string[]) => {
expect(tokens).toHaveLength(4);
return tokens.map(() => undefined);
};
const options = {
llm: mockLLM,
entities: ["email" as const],
detect: customDetector,
contextWindowSize: 4,
};
const arcjetRedact = new ArcjetRedact(options);
const output = await arcjetRedact.invoke("this is a sentence for testing");
expect(output).toEqual("this is a sentence for testing");
});
test("It uses custom replacers", async () => {
const callback = (input: string) => {
expect(input).toEqual(
"custom <Redacted custom-entity #0> email redacted@example.com"
);
return "custom is <Redacted custom-entity #0> email is redacted@example.com";
};
const mockLLM = new MockLLM(callback);
const customDetector = (tokens: string[]) => {
return tokens.map((t) =>
t === "my-custom-string-to-be-detected" ? "custom-entity" : undefined
);
};
const customReplacer = (detected: string) => {
return detected === "email" ? "redacted@example.com" : undefined;
};
const options = {
llm: mockLLM,
entities: ["custom-entity" as const, "email" as const],
detect: customDetector,
replace: customReplacer,
};
const arcjetRedact = new ArcjetRedact(options);
const output = await arcjetRedact.invoke(
"custom my-custom-string-to-be-detected email test@example.com"
);
expect(output).toEqual(
"custom is my-custom-string-to-be-detected email is test@example.com"
);
});
test("It throws when no entities are configured", async () => {
const mockLLM = new MockLLM();
const options = {
llm: mockLLM,
entities: [],
};
expect(() => {
// eslint-disable-next-line no-new
new ArcjetRedact(options);
}).toThrow("no entities configured for redaction");
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/tests/togetherai.int.test.ts
|
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { TogetherAI } from "../togetherai.js";
test.skip("TogetherAI can make a request to an LLM", async () => {
const model = new TogetherAI({
modelName: "togethercomputer/StripedHyena-Nous-7B",
});
const prompt = ChatPromptTemplate.fromMessages([
["ai", "You are a helpful assistant."],
["human", "Tell me a joke about bears."],
]);
const chain = prompt.pipe(model);
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const result = await chain.invoke({});
// console.log("result", result);
});
test.skip("TogetherAI can stream responses", async () => {
const model = new TogetherAI({
modelName: "togethercomputer/StripedHyena-Nous-7B",
streaming: true,
});
const prompt = ChatPromptTemplate.fromMessages([
["ai", "You are a helpful assistant."],
["human", "Tell me a joke about bears."],
]);
const chain = prompt.pipe(model);
const result = await chain.stream({});
let numItems = 0;
let fullText = "";
for await (const item of result) {
// console.log("stream item", item);
fullText += item;
numItems += 1;
}
// console.log(fullText);
expect(numItems).toBeGreaterThan(1);
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/tests/friendli.int.test.ts
|
import { test } from "@jest/globals";
import { Friendli } from "../friendli.js";
describe.skip("Friendli", () => {
test("call", async () => {
const friendli = new Friendli({ maxTokens: 20 });
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await friendli.invoke("1 + 1 = ");
// console.log({ res });
});
test("generate", async () => {
const friendli = new Friendli({ maxTokens: 20 });
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await friendli.generate(["1 + 1 = "]);
// console.log(JSON.stringify(res, null, 2));
});
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/tests/ibm.int.test.ts
|
/* eslint-disable no-process-env */
import { CallbackManager } from "@langchain/core/callbacks/manager";
import { LLMResult } from "@langchain/core/outputs";
import { StringPromptValue } from "@langchain/core/prompt_values";
import { TokenUsage } from "../../types/ibm.js";
import { WatsonxLLM, WatsonxInputLLM } from "../ibm.js";
const originalBackground = process.env.LANGCHAIN_CALLBACKS_BACKGROUND;
describe("Text generation", () => {
describe("Test invoke method", () => {
test("Correct value", async () => {
const watsonXInstance = new WatsonxLLM({
model: "ibm/granite-13b-chat-v2",
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: process.env.WATSONX_AI_PROJECT_ID,
});
await watsonXInstance.invoke("Hello world?");
});
test("Overwritte params", async () => {
const watsonXInstance = new WatsonxLLM({
model: "ibm/granite-13b-chat-v2",
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: process.env.WATSONX_AI_PROJECT_ID,
});
await watsonXInstance.invoke("Hello world?", {
parameters: { maxNewTokens: 10 },
});
});
test("Invalid projectId", async () => {
const watsonXInstance = new WatsonxLLM({
model: "ibm/granite-13b-chat-v2",
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: "Test wrong value",
});
await expect(watsonXInstance.invoke("Hello world?")).rejects.toThrow();
});
test("Invalid credentials", async () => {
const watsonXInstance = new WatsonxLLM({
model: "ibm/granite-13b-chat-v2",
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: "Test wrong value",
watsonxAIAuthType: "iam",
watsonxAIApikey: "WrongApiKey",
watsonxAIUrl: "https://wrong.wrong/",
});
await expect(watsonXInstance.invoke("Hello world?")).rejects.toThrow();
});
test("Wrong value", async () => {
const watsonXInstance = new WatsonxLLM({
model: "ibm/granite-13b-chat-v2",
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: process.env.WATSONX_AI_PROJECT_ID,
});
// @ts-expect-error Intentionally passing wrong value
await watsonXInstance.invoke({});
});
test("Stop", async () => {
const watsonXInstance = new WatsonxLLM({
model: "ibm/granite-13b-chat-v2",
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: process.env.WATSONX_AI_PROJECT_ID,
});
await watsonXInstance.invoke("Hello, how are you?", {
stop: ["Hello"],
});
}, 5000);
test("Stop with timeout", async () => {
const watsonXInstance = new WatsonxLLM({
model: "ibm/granite-13b-chat-v2",
version: "2024-05-31",
serviceUrl: "sdadasdas" as string,
projectId: process.env.WATSONX_AI_PROJECT_ID,
maxNewTokens: 5,
maxRetries: 3,
});
await expect(() =>
watsonXInstance.invoke("Print hello world", { timeout: 10 })
).rejects.toThrowError("AbortError");
}, 5000);
test("Signal in call options", async () => {
const watsonXInstance = new WatsonxLLM({
model: "ibm/granite-13b-chat-v2",
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: process.env.WATSONX_AI_PROJECT_ID,
maxNewTokens: 5,
maxRetries: 3,
});
const controllerNoAbortion = new AbortController();
await expect(
watsonXInstance.invoke("Print hello world", {
signal: controllerNoAbortion.signal,
})
).resolves.toBeDefined();
const controllerToAbort = new AbortController();
await expect(async () => {
const ret = watsonXInstance.invoke("Print hello world", {
signal: controllerToAbort.signal,
});
controllerToAbort.abort();
return ret;
}).rejects.toThrowError("AbortError");
}, 5000);
test("Concurenccy", async () => {
const model = new WatsonxLLM({
model: "ibm/granite-13b-chat-v2",
maxConcurrency: 1,
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: process.env.WATSONX_AI_PROJECT_ID,
});
await Promise.all([
model.invoke("Print hello world"),
model.invoke("Print hello world"),
]);
});
test("Token usage", async () => {
process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false";
try {
const tokenUsage: TokenUsage = {
generated_token_count: 0,
input_token_count: 0,
};
const model = new WatsonxLLM({
model: "ibm/granite-13b-chat-v2",
version: "2024-05-31",
maxNewTokens: 1,
maxConcurrency: 1,
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: process.env.WATSONX_AI_PROJECT_ID,
callbacks: CallbackManager.fromHandlers({
async handleLLMEnd(output: LLMResult) {
const singleTokenUsage: TokenUsage | undefined =
output.llmOutput?.tokenUsage;
if (singleTokenUsage) {
tokenUsage.generated_token_count +=
singleTokenUsage.generated_token_count;
tokenUsage.input_token_count +=
singleTokenUsage.input_token_count;
}
},
}),
});
await model.invoke("Hello");
expect(tokenUsage.generated_token_count).toBe(1);
expect(tokenUsage.input_token_count).toBe(1);
} finally {
process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground;
}
});
test("Streaming mode", async () => {
let countedTokens = 0;
let streamedText = "";
let usedTokens = 0;
const model = new WatsonxLLM({
model: "ibm/granite-13b-chat-v2",
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: process.env.WATSONX_AI_PROJECT_ID,
maxNewTokens: 5,
streaming: true,
callbacks: CallbackManager.fromHandlers({
async handleLLMEnd(output) {
usedTokens = output.llmOutput?.tokenUsage.generated_token_count;
},
async handleLLMNewToken(token: string) {
countedTokens += 1;
streamedText += token;
},
}),
});
const res = await model.invoke("Print hello world?");
expect(countedTokens).toBe(usedTokens);
expect(res).toBe(streamedText);
});
});
describe("Test generate methods", () => {
test("Basic usage", async () => {
const model = new WatsonxLLM({
model: "ibm/granite-13b-chat-v2",
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: process.env.WATSONX_AI_PROJECT_ID,
maxNewTokens: 5,
});
const res = await model.generate([
"Print hello world!",
"Print hello universe!",
]);
expect(res.generations.length).toBe(2);
});
test("Stop", async () => {
const model = new WatsonxLLM({
model: "ibm/granite-13b-chat-v2",
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: process.env.WATSONX_AI_PROJECT_ID,
maxNewTokens: 100,
});
const res = await model.generate(
["Print hello world!", "Print hello world hello!"],
{
stop: ["Hello"],
}
);
expect(
res.generations
.map((generation) => generation.map((item) => item.text))
.join("")
.indexOf("world")
).toBe(-1);
});
test("Streaming mode with multiple prompts", async () => {
const nrNewTokens = [0, 0, 0];
const completions = ["", "", ""];
const model = new WatsonxLLM({
model: "ibm/granite-13b-chat-v2",
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: process.env.WATSONX_AI_PROJECT_ID,
maxNewTokens: 5,
streaming: true,
callbacks: CallbackManager.fromHandlers({
async handleLLMNewToken(token: string, idx) {
nrNewTokens[idx.prompt] += 1;
completions[idx.prompt] += token;
},
}),
});
const res = await model.generate([
"Print bye bye world!",
"Print bye bye world!",
"Print Hello IBM!",
]);
res.generations.forEach((generation, index) => {
generation.forEach((g) => {
expect(g.generationInfo?.generated_token_count).toBe(
nrNewTokens[index]
);
});
});
nrNewTokens.forEach((tokens) => expect(tokens > 0).toBe(true));
expect(res.generations.length).toBe(3);
});
test("Prompt value", async () => {
const model = new WatsonxLLM({
model: "ibm/granite-13b-chat-v2",
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: process.env.WATSONX_AI_PROJECT_ID,
maxNewTokens: 5,
});
const res = await model.generatePrompt([
new StringPromptValue("Print hello world!"),
]);
for (const generation of res.generations) {
expect(generation.length).toBe(1);
}
});
});
describe("Test stream method", () => {
test("Basic usage", async () => {
let countedTokens = 0;
let streamedText = "";
const model = new WatsonxLLM({
model: "ibm/granite-13b-chat-v2",
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: process.env.WATSONX_AI_PROJECT_ID,
maxNewTokens: 100,
callbacks: CallbackManager.fromHandlers({
async handleLLMNewToken(token: string) {
countedTokens += 1;
streamedText += token;
},
}),
});
const stream = await model.stream("Print hello world.");
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
expect(chunks.length).toBeGreaterThan(1);
expect(chunks.join("")).toBe(streamedText);
});
test("Stop", async () => {
const model = new WatsonxLLM({
model: "ibm/granite-13b-chat-v2",
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: process.env.WATSONX_AI_PROJECT_ID,
maxNewTokens: 100,
});
const stream = await model.stream("Print hello world!", {
stop: ["Hello"],
});
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
expect(chunks.join("").indexOf("world")).toBe(-1);
});
test("Timeout", async () => {
const model = new WatsonxLLM({
model: "ibm/granite-13b-chat-v2",
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: process.env.WATSONX_AI_PROJECT_ID,
maxNewTokens: 1000,
});
await expect(async () => {
const stream = await model.stream(
"How is your day going? Be precise and tell me a lot about it/",
{
signal: AbortSignal.timeout(750),
}
);
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
}).rejects.toThrowError();
});
test("Signal in call options", async () => {
const model = new WatsonxLLM({
model: "ibm/granite-13b-chat-v2",
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: process.env.WATSONX_AI_PROJECT_ID,
maxNewTokens: 1000,
});
const controller = new AbortController();
await expect(async () => {
const stream = await model.stream(
"How is your day going? Be precise and tell me a lot about it",
{
signal: controller.signal,
}
);
const chunks = [];
let i = 0;
for await (const chunk of stream) {
i += 1;
chunks.push(chunk);
if (i === 5) {
controller.abort();
}
}
}).rejects.toThrowError();
});
});
describe("Test getNumToken method", () => {
test("Passing correct value", async () => {
const testProps: WatsonxInputLLM = {
model: "ibm/granite-13b-chat-v2",
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: process.env.WATSONX_AI_PROJECT_ID,
};
const instance = new WatsonxLLM({
...testProps,
});
await expect(
instance.getNumTokens("Hello")
).resolves.toBeGreaterThanOrEqual(0);
await expect(
instance.getNumTokens("Hello", { return_tokens: true })
).resolves.toBeGreaterThanOrEqual(0);
});
test("Passing wrong value", async () => {
const testProps: WatsonxInputLLM = {
model: "ibm/granite-13b-chat-v2",
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: process.env.WATSONX_AI_PROJECT_ID,
maxRetries: 3,
};
const instance = new WatsonxLLM({
...testProps,
});
// @ts-expect-error Intentionally passing wrong parameter
await expect(instance.getNumTokens(12)).rejects.toThrowError();
await expect(
// @ts-expect-error Intentionally passing wrong parameter
instance.getNumTokens(12, { wrong: "Wrong" })
).rejects.toThrowError();
});
});
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms
|
lc_public_repos/langchainjs/libs/langchain-community/src/llms/tests/aleph_alpha.int.test.ts
|
import { test, describe, expect } from "@jest/globals";
import { AlephAlpha } from "../aleph_alpha.js";
describe("Aleph Alpha", () => {
test("test call", async () => {
const aleph_alpha = new AlephAlpha({});
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const result = await aleph_alpha.invoke(
"What is a good name for a company that makes colorful socks?"
);
// console.log({ result });
});
test("test translation call", async () => {
const aleph_alpha = new AlephAlpha({});
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const result = await aleph_alpha.invoke(
`Translate "I love programming" into German.`
);
// console.log({ result });
});
test("test JSON output call", async () => {
const aleph_alpha = new AlephAlpha({});
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const result = await aleph_alpha.invoke(
`Output a JSON object with three string fields: "name", "birthplace", "bio".`
);
// console.log({ result });
});
test("should abort the request", async () => {
const aleph_alpha = new AlephAlpha({});
const controller = new AbortController();
await expect(() => {
const ret = aleph_alpha.invoke(
"Respond with an extremely verbose response",
{
signal: controller.signal,
}
);
controller.abort();
return ret;
}).rejects.toThrow("AbortError: This operation was aborted");
});
test("throws an error when response status is not ok", async () => {
const aleph_alpha = new AlephAlpha({
aleph_alpha_api_key: "BAD_KEY",
});
await expect(aleph_alpha.invoke("Test prompt")).rejects.toThrow(
'Aleph Alpha call failed with status 401 and body {"error":"InvalidToken","code":"UNAUTHENTICATED"}'
);
});
});
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/tools/aws_sfn.ts
|
import {
SFNClient as Client,
StartExecutionCommand as Invoker,
DescribeExecutionCommand as Describer,
SendTaskSuccessCommand as TaskSuccessSender,
} from "@aws-sdk/client-sfn";
import { Tool, ToolParams } from "@langchain/core/tools";
/**
* Interface for AWS Step Functions configuration.
*/
export interface SfnConfig {
stateMachineArn: string;
region?: string;
accessKeyId?: string;
secretAccessKey?: string;
}
/**
* Interface for AWS Step Functions client constructor arguments.
*/
interface SfnClientConstructorArgs {
region?: string;
credentials?: {
accessKeyId: string;
secretAccessKey: string;
};
}
/**
* Class for starting the execution of an AWS Step Function.
*/
export class StartExecutionAWSSfnTool extends Tool {
static lc_name() {
return "StartExecutionAWSSfnTool";
}
private sfnConfig: SfnConfig;
public name: string;
public description: string;
constructor({
name,
description,
...rest
}: SfnConfig & { name: string; description: string }) {
super();
this.name = name;
this.description = description;
this.sfnConfig = rest;
}
/**
* Generates a formatted description for the StartExecutionAWSSfnTool.
* @param name Name of the state machine.
* @param description Description of the state machine.
* @returns A formatted description string.
*/
static formatDescription(name: string, description: string): string {
return `Use to start executing the ${name} state machine. Use to run ${name} workflows. Whenever you need to start (or execute) an asynchronous workflow (or state machine) about ${description} you should ALWAYS use this. Input should be a valid JSON string.`;
}
/** @ignore */
async _call(input: string): Promise<string> {
const clientConstructorArgs: SfnClientConstructorArgs =
getClientConstructorArgs(this.sfnConfig);
const sfnClient = new Client(clientConstructorArgs);
return new Promise((resolve) => {
let payload;
try {
payload = JSON.parse(input);
} catch (e) {
console.error("Error starting state machine execution:", e);
resolve("failed to complete request");
}
const command = new Invoker({
stateMachineArn: this.sfnConfig.stateMachineArn,
input: JSON.stringify(payload),
});
sfnClient
.send(command)
.then((response) =>
resolve(
response.executionArn ? response.executionArn : "request completed."
)
)
.catch((error: Error) => {
console.error("Error starting state machine execution:", error);
resolve("failed to complete request");
});
});
}
}
/**
* Class for checking the status of an AWS Step Function execution.
*/
export class DescribeExecutionAWSSfnTool extends Tool {
static lc_name() {
return "DescribeExecutionAWSSfnTool";
}
name = "describe-execution-aws-sfn";
description =
"This tool should ALWAYS be used for checking the status of any AWS Step Function execution (aka. state machine execution). Input to this tool is a properly formatted AWS Step Function Execution ARN (executionArn). The output is a stringified JSON object containing the executionArn, name, status, startDate, stopDate, input, output, error, and cause of the execution.";
sfnConfig: Omit<SfnConfig, "stateMachineArn">;
constructor(config: Omit<SfnConfig, "stateMachineArn"> & ToolParams) {
super(config);
this.sfnConfig = config;
}
/** @ignore */
async _call(input: string) {
const clientConstructorArgs: SfnClientConstructorArgs =
getClientConstructorArgs(this.sfnConfig);
const sfnClient = new Client(clientConstructorArgs);
const command = new Describer({
executionArn: input,
});
return await sfnClient
.send(command)
.then((response) =>
response.executionArn
? JSON.stringify({
executionArn: response.executionArn,
name: response.name,
status: response.status,
startDate: response.startDate,
stopDate: response.stopDate,
input: response.input,
output: response.output,
error: response.error,
cause: response.cause,
})
: "{}"
)
.catch((error: Error) => {
console.error("Error describing state machine execution:", error);
return "failed to complete request";
});
}
}
/**
* Class for sending a task success signal to an AWS Step Function
* execution.
*/
export class SendTaskSuccessAWSSfnTool extends Tool {
static lc_name() {
return "SendTaskSuccessAWSSfnTool";
}
name = "send-task-success-aws-sfn";
description =
"This tool should ALWAYS be used for sending task success to an AWS Step Function execution (aka. statemachine exeuction). Input to this tool is a stringify JSON object containing the taskToken and output.";
sfnConfig: Omit<SfnConfig, "stateMachineArn">;
constructor(config: Omit<SfnConfig, "stateMachineArn"> & ToolParams) {
super(config);
this.sfnConfig = config;
}
/** @ignore */
async _call(input: string) {
const clientConstructorArgs: SfnClientConstructorArgs =
getClientConstructorArgs(this.sfnConfig);
const sfnClient = new Client(clientConstructorArgs);
let payload;
try {
payload = JSON.parse(input);
} catch (e) {
console.error("Error starting state machine execution:", e);
return "failed to complete request";
}
const command = new TaskSuccessSender({
taskToken: payload.taskToken,
output: JSON.stringify(payload.output),
});
return await sfnClient
.send(command)
.then(() => "request completed.")
.catch((error: Error) => {
console.error(
"Error sending task success to state machine execution:",
error
);
return "failed to complete request";
});
}
}
/**
* Helper function to construct the AWS SFN client.
*/
function getClientConstructorArgs(config: Partial<SfnConfig>) {
const clientConstructorArgs: SfnClientConstructorArgs = {};
if (config.region) {
clientConstructorArgs.region = config.region;
}
if (config.accessKeyId && config.secretAccessKey) {
clientConstructorArgs.credentials = {
accessKeyId: config.accessKeyId,
secretAccessKey: config.secretAccessKey,
};
}
return clientConstructorArgs;
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/tools/dynamic.ts
|
export {
type BaseDynamicToolInput,
type DynamicToolInput,
type DynamicStructuredToolInput,
DynamicTool,
DynamicStructuredTool,
} from "@langchain/core/tools";
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/tools/dataforseo_api_search.ts
|
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { Tool } from "@langchain/core/tools";
/**
* @interface DataForSeoApiConfig
* @description Represents the configuration object used to set up a DataForSeoAPISearch instance.
*/
export interface DataForSeoApiConfig {
/**
* @property apiLogin
* @type {string}
* @description The API login credential for DataForSEO. If not provided, it will be fetched from environment variables.
*/
apiLogin?: string;
/**
* @property apiPassword
* @type {string}
* @description The API password credential for DataForSEO. If not provided, it will be fetched from environment variables.
*/
apiPassword?: string;
/**
* @property params
* @type {Record<string, string | number | boolean>}
* @description Additional parameters to customize the API request.
*/
params?: Record<string, string | number | boolean>;
/**
* @property useJsonOutput
* @type {boolean}
* @description Determines if the output should be in JSON format.
*/
useJsonOutput?: boolean;
/**
* @property jsonResultTypes
* @type {Array<string>}
* @description Specifies the types of results to include in the output.
*/
jsonResultTypes?: Array<string>;
/**
* @property jsonResultFields
* @type {Array<string>}
* @description Specifies the fields to include in each result object.
*/
jsonResultFields?: Array<string>;
/**
* @property topCount
* @type {number}
* @description Specifies the maximum number of results to return.
*/
topCount?: number;
}
/**
* Represents a task in the API response.
*/
type Task = {
id: string;
status_code: number;
status_message: string;
time: string;
result: Result[];
};
/**
* Represents a result in the API response.
*/
type Result = {
keyword: string;
check_url: string;
datetime: string;
spell?: string;
item_types: string[];
se_results_count: number;
items_count: number;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
items: any[];
};
/**
* Represents the API response.
*/
type ApiResponse = {
status_code: number;
status_message: string;
tasks: Task[];
};
/**
* @class DataForSeoAPISearch
* @extends {Tool}
* @description Represents a wrapper class to work with DataForSEO SERP API.
*/
export class DataForSeoAPISearch extends Tool {
static lc_name() {
return "DataForSeoAPISearch";
}
name = "dataforseo-api-wrapper";
description =
"A robust Google Search API provided by DataForSeo. This tool is handy when you need information about trending topics or current events.";
protected apiLogin: string;
protected apiPassword: string;
/**
* @property defaultParams
* @type {Record<string, string | number | boolean>}
* @description These are the default parameters to be used when making an API request.
*/
protected defaultParams: Record<string, string | number | boolean> = {
location_name: "United States",
language_code: "en",
depth: 10,
se_name: "google",
se_type: "organic",
};
protected params: Record<string, string | number | boolean> = {};
protected jsonResultTypes: Array<string> | undefined;
protected jsonResultFields: Array<string> | undefined;
protected topCount: number | undefined;
protected useJsonOutput = false;
/**
* @constructor
* @param {DataForSeoApiConfig} config
* @description Sets up the class, throws an error if the API login/password isn't provided.
*/
constructor(config: DataForSeoApiConfig = {}) {
super();
const apiLogin =
config.apiLogin ?? getEnvironmentVariable("DATAFORSEO_LOGIN");
const apiPassword =
config.apiPassword ?? getEnvironmentVariable("DATAFORSEO_PASSWORD");
const params = config.params ?? {};
if (!apiLogin || !apiPassword) {
throw new Error(
"DataForSEO login or password not set. You can set it as DATAFORSEO_LOGIN and DATAFORSEO_PASSWORD in your .env file, or pass it to DataForSeoAPISearch."
);
}
this.params = { ...this.defaultParams, ...params };
this.apiLogin = apiLogin;
this.apiPassword = apiPassword;
this.jsonResultTypes = config.jsonResultTypes;
this.jsonResultFields = config.jsonResultFields;
this.useJsonOutput = config.useJsonOutput ?? false;
this.topCount = config.topCount;
}
/**
* @method _call
* @param {string} keyword
* @returns {Promise<string>}
* @description Initiates a call to the API and processes the response.
*/
async _call(keyword: string): Promise<string> {
return this.useJsonOutput
? JSON.stringify(await this.results(keyword))
: this.processResponse(await this.getResponseJson(keyword));
}
/**
* @method results
* @param {string} keyword
* @returns {Promise<Array<any>>}
* @description Fetches the results from the API for the given keyword.
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
async results(keyword: string): Promise<Array<any>> {
const res = await this.getResponseJson(keyword);
return this.filterResults(res, this.jsonResultTypes);
}
/**
* @method prepareRequest
* @param {string} keyword
* @returns {{url: string; headers: HeadersInit; data: BodyInit}}
* @description Prepares the request details for the API call.
*/
protected prepareRequest(keyword: string): {
url: string;
headers: HeadersInit;
data: BodyInit;
} {
if (this.apiLogin === undefined || this.apiPassword === undefined) {
throw new Error("api_login or api_password is not provided");
}
const credentials = Buffer.from(
`${this.apiLogin}:${this.apiPassword}`,
"utf-8"
).toString("base64");
const headers = {
Authorization: `Basic ${credentials}`,
"Content-Type": "application/json",
};
const params = { ...this.params };
params.keyword ??= keyword;
const data = [params];
return {
url: `https://api.dataforseo.com/v3/serp/${params.se_name}/${params.se_type}/live/advanced`,
headers,
data: JSON.stringify(data),
};
}
/**
* @method getResponseJson
* @param {string} keyword
* @returns {Promise<ApiResponse>}
* @description Executes a POST request to the provided URL and returns a parsed JSON response.
*/
protected async getResponseJson(keyword: string): Promise<ApiResponse> {
const requestDetails = this.prepareRequest(keyword);
const response = await fetch(requestDetails.url, {
method: "POST",
headers: requestDetails.headers,
body: requestDetails.data,
});
if (!response.ok) {
throw new Error(
`Got ${response.status} error from DataForSEO: ${response.statusText}`
);
}
const result: ApiResponse = await response.json();
return this.checkResponse(result);
}
/**
* @method checkResponse
* @param {ApiResponse} response
* @returns {ApiResponse}
* @description Checks the response status code.
*/
private checkResponse(response: ApiResponse): ApiResponse {
if (response.status_code !== 20000) {
throw new Error(
`Got error from DataForSEO SERP API: ${response.status_message}`
);
}
for (const task of response.tasks) {
if (task.status_code !== 20000) {
throw new Error(
`Got error from DataForSEO SERP API: ${task.status_message}`
);
}
}
return response;
}
/* eslint-disable @typescript-eslint/no-explicit-any */
/**
* @method filterResults
* @param {ApiResponse} res
* @param {Array<string> | undefined} types
* @returns {Array<any>}
* @description Filters the results based on the specified result types.
*/
private filterResults(
res: ApiResponse,
types: Array<string> | undefined
): Array<any> {
const output: Array<any> = [];
for (const task of res.tasks || []) {
for (const result of task.result || []) {
for (const item of result.items || []) {
if (
types === undefined ||
types.length === 0 ||
types.includes(item.type)
) {
const newItem = this.cleanupUnnecessaryItems(item);
if (Object.keys(newItem).length !== 0) {
output.push(newItem);
}
}
if (this.topCount !== undefined && output.length >= this.topCount) {
break;
}
}
}
}
return output;
}
/* eslint-disable @typescript-eslint/no-explicit-any */
/* eslint-disable no-param-reassign */
/**
* @method cleanupUnnecessaryItems
* @param {any} d
* @description Removes unnecessary items from the response.
*/
private cleanupUnnecessaryItems(d: any): any {
if (Array.isArray(d)) {
return d.map((item) => this.cleanupUnnecessaryItems(item));
}
const toRemove = ["xpath", "position", "rectangle"];
if (typeof d === "object" && d !== null) {
return Object.keys(d).reduce((newObj: any, key: string) => {
if (
(this.jsonResultFields === undefined ||
this.jsonResultFields.includes(key)) &&
!toRemove.includes(key)
) {
if (typeof d[key] === "object" && d[key] !== null) {
newObj[key] = this.cleanupUnnecessaryItems(d[key]);
} else {
newObj[key] = d[key];
}
}
return newObj;
}, {});
}
return d;
}
/**
* @method processResponse
* @param {ApiResponse} res
* @returns {string}
* @description Processes the response to extract meaningful data.
*/
protected processResponse(res: ApiResponse): string {
let returnValue = "No good search result found";
for (const task of res.tasks || []) {
for (const result of task.result || []) {
const { item_types } = result;
const items = result.items || [];
if (item_types.includes("answer_box")) {
returnValue = items.find(
(item: { type: string; text: string }) => item.type === "answer_box"
).text;
} else if (item_types.includes("knowledge_graph")) {
returnValue = items.find(
(item: { type: string; description: string }) =>
item.type === "knowledge_graph"
).description;
} else if (item_types.includes("featured_snippet")) {
returnValue = items.find(
(item: { type: string; description: string }) =>
item.type === "featured_snippet"
).description;
} else if (item_types.includes("shopping")) {
returnValue = items.find(
(item: { type: string; price: string }) => item.type === "shopping"
).price;
} else if (item_types.includes("organic")) {
returnValue = items.find(
(item: { type: string; description: string }) =>
item.type === "organic"
).description;
}
if (returnValue) {
break;
}
}
}
return returnValue;
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/tools/searxng_search.ts
|
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { Tool } from "@langchain/core/tools";
/**
* Interface for the results returned by the Searxng search.
*/
interface SearxngResults {
query: string;
number_of_results: number;
results: Array<{
url: string;
title: string;
content: string;
img_src: string;
engine: string;
parsed_url: Array<string>;
template: string;
engines: Array<string>;
positions: Array<number>;
score: number;
category: string;
pretty_url: string;
open_group?: boolean;
close_group?: boolean;
}>;
answers: Array<string>;
corrections: Array<string>;
infoboxes: Array<{
infobox: string;
content: string;
engine: string;
engines: Array<string>;
}>;
suggestions: Array<string>;
unresponsive_engines: Array<string>;
}
/**
* Interface for custom headers used in the Searxng search.
*/
interface SearxngCustomHeaders {
[key: string]: string;
}
interface SearxngSearchParams {
/**
* @default 10
* Number of results included in results
*/
numResults?: number;
/** Comma separated list, specifies the active search categories
* https://docs.searxng.org/user/configured_engines.html#configured-engines
*/
categories?: string;
/** Comma separated list, specifies the active search engines
* https://docs.searxng.org/user/configured_engines.html#configured-engines
*/
engines?: string;
/** Code of the language. */
language?: string;
/** Search page number. */
pageNumber?: number;
/**
* day / month / year
*
* Time range of search for engines which support it. See if an engine supports time range search in the preferences page of an instance.
*/
timeRange?: number;
/**
* Throws Error if format is set anything other than "json"
* Output format of results. Format needs to be activated in search:
*/
format?: "json";
/** Open search results on new tab. */
resultsOnNewTab?: 0 | 1;
/** Proxy image results through SearXNG. */
imageProxy?: boolean;
autocomplete?: string;
/**
* Filter search results of engines which support safe search. See if an engine supports safe search in the preferences page of an instance.
*/
safesearch?: 0 | 1 | 2;
}
/**
* SearxngSearch class represents a meta search engine tool.
* Use this class when you need to answer questions about current events.
* The input should be a search query, and the output is a JSON array of the query results.
*
* note: works best with *agentType*: `structured-chat-zero-shot-react-description`
* https://github.com/searxng/searxng
* @example
* ```typescript
* const executor = AgentExecutor.fromAgentAndTools({
* agent,
* tools: [
* new SearxngSearch({
* params: {
* format: "json",
* engines: "google",
* },
* headers: {},
* }),
* ],
* });
* const result = await executor.invoke({
* input: `What is Langchain? Describe in 50 words`,
* });
* ```
*/
export class SearxngSearch extends Tool {
static lc_name() {
return "SearxngSearch";
}
name = "searxng-search";
description =
"A meta search engine. Useful for when you need to answer questions about current events. Input should be a search query. Output is a JSON array of the query results";
protected apiBase?: string;
protected params?: SearxngSearchParams = {
numResults: 10,
pageNumber: 1,
format: "json",
imageProxy: true,
safesearch: 0,
};
protected headers?: SearxngCustomHeaders;
get lc_secrets(): { [key: string]: string } | undefined {
return {
apiBase: "SEARXNG_API_BASE",
};
}
/**
* Constructor for the SearxngSearch class
* @param apiBase Base URL of the Searxng instance
* @param params SearxNG parameters
* @param headers Custom headers
*/
constructor({
apiBase,
params,
headers,
}: {
/** Base URL of Searxng instance */
apiBase?: string;
/** SearxNG Paramerters
*
* https://docs.searxng.org/dev/search_api.html check here for more details
*/
params?: SearxngSearchParams;
/**
* Custom headers
* Set custom headers if you're using a api from RapidAPI (https://rapidapi.com/iamrony777/api/searxng)
* No headers needed for a locally self-hosted instance
*/
headers?: SearxngCustomHeaders;
}) {
super(...arguments);
this.apiBase = getEnvironmentVariable("SEARXNG_API_BASE") || apiBase;
this.headers = { "content-type": "application/json", ...headers };
if (!this.apiBase) {
throw new Error(
`SEARXNG_API_BASE not set. You can set it as "SEARXNG_API_BASE" in your environment variables.`
);
}
if (params) {
this.params = { ...this.params, ...params };
}
}
/**
* Builds the URL for the Searxng search.
* @param path The path for the URL.
* @param parameters The parameters for the URL.
* @param baseUrl The base URL.
* @returns The complete URL as a string.
*/
protected buildUrl<P extends SearxngSearchParams>(
path: string,
parameters: P,
baseUrl: string
): string {
const nonUndefinedParams: [string, string][] = Object.entries(parameters)
.filter(([_, value]) => value !== undefined)
.map(([key, value]) => [key, value.toString()]); // Avoid string conversion
const searchParams = new URLSearchParams(nonUndefinedParams);
return `${baseUrl}/${path}?${searchParams}`;
}
async _call(input: string): Promise<string> {
const queryParams = {
q: input,
...this.params,
};
const url = this.buildUrl("search", queryParams, this.apiBase as string);
const resp = await fetch(url, {
method: "POST",
headers: this.headers,
signal: AbortSignal.timeout(5 * 1000), // 5 seconds
});
if (!resp.ok) {
throw new Error(resp.statusText);
}
const res: SearxngResults = await resp.json();
if (
!res.results.length &&
!res.answers.length &&
!res.infoboxes.length &&
!res.suggestions.length
) {
return "No good results found.";
} else if (res.results.length) {
const response: string[] = [];
res.results.forEach((r) => {
response.push(
JSON.stringify({
title: r.title || "",
link: r.url || "",
snippet: r.content || "",
})
);
});
return response.slice(0, this.params?.numResults).toString();
} else if (res.answers.length) {
return res.answers[0];
} else if (res.infoboxes.length) {
return res.infoboxes[0]?.content.replaceAll(/<[^>]+>/gi, "");
} else if (res.suggestions.length) {
let suggestions = "Suggestions: ";
res.suggestions.forEach((s) => {
suggestions += `${s}, `;
});
return suggestions;
} else {
return "No good results found.";
}
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/tools/bingserpapi.ts
|
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { Tool } from "@langchain/core/tools";
/**
* A tool for web search functionality using Bing's search engine. It
* extends the base `Tool` class and implements the `_call` method to
* perform the search operation. Requires an API key for Bing's search
* engine, which can be set in the environment variables. Also accepts
* additional parameters for the search query.
*/
class BingSerpAPI extends Tool {
static lc_name() {
return "BingSerpAPI";
}
/**
* Not implemented. Will throw an error if called.
*/
toJSON() {
return this.toJSONNotImplemented();
}
name = "bing-search";
description =
"a search engine. useful for when you need to answer questions about current events. input should be a search query.";
key: string;
params: Record<string, string>;
constructor(
apiKey: string | undefined = getEnvironmentVariable("BingApiKey"),
params: Record<string, string> = {}
) {
super(...arguments);
if (!apiKey) {
throw new Error(
"BingSerpAPI API key not set. You can set it as BingApiKey in your .env file."
);
}
this.key = apiKey;
this.params = params;
}
/** @ignore */
async _call(input: string): Promise<string> {
const headers = { "Ocp-Apim-Subscription-Key": this.key };
const params = { q: input, textDecorations: "true", textFormat: "HTML" };
const searchUrl = new URL("https://api.bing.microsoft.com/v7.0/search");
Object.entries(params).forEach(([key, value]) => {
searchUrl.searchParams.append(key, value);
});
const response = await fetch(searchUrl, { headers });
if (!response.ok) {
throw new Error(`HTTP error ${response.status}`);
}
const res = await response.json();
const results: [] = res.webPages.value;
if (results.length === 0) {
return "No good results found.";
}
const snippets = results
.map((result: { snippet: string }) => result.snippet)
.join(" ");
return snippets;
}
}
export { BingSerpAPI };
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/tools/google_custom_search.ts
|
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { Tool } from "@langchain/core/tools";
/**
* Interface for parameters required by GoogleCustomSearch class.
*/
export interface GoogleCustomSearchParams {
apiKey?: string;
googleCSEId?: string;
}
/**
* Class that uses the Google Search API to perform custom searches.
* Requires environment variables `GOOGLE_API_KEY` and `GOOGLE_CSE_ID` to
* be set.
*/
export class GoogleCustomSearch extends Tool {
static lc_name() {
return "GoogleCustomSearch";
}
get lc_secrets(): { [key: string]: string } | undefined {
return {
apiKey: "GOOGLE_API_KEY",
};
}
name = "google-custom-search";
protected apiKey: string;
protected googleCSEId: string;
description =
"a custom search engine. useful for when you need to answer questions about current events. input should be a search query. outputs a JSON array of results.";
constructor(
fields: GoogleCustomSearchParams = {
apiKey: getEnvironmentVariable("GOOGLE_API_KEY"),
googleCSEId: getEnvironmentVariable("GOOGLE_CSE_ID"),
}
) {
super(...arguments);
if (!fields.apiKey) {
throw new Error(
`Google API key not set. You can set it as "GOOGLE_API_KEY" in your environment variables.`
);
}
if (!fields.googleCSEId) {
throw new Error(
`Google custom search engine id not set. You can set it as "GOOGLE_CSE_ID" in your environment variables.`
);
}
this.apiKey = fields.apiKey;
this.googleCSEId = fields.googleCSEId;
}
async _call(input: string) {
const res = await fetch(
`https://www.googleapis.com/customsearch/v1?key=${this.apiKey}&cx=${
this.googleCSEId
}&q=${encodeURIComponent(input)}`
);
if (!res.ok) {
throw new Error(
`Got ${res.status} error from Google custom search: ${res.statusText}`
);
}
const json = await res.json();
const results =
json?.items?.map(
(item: { title?: string; link?: string; snippet?: string }) => ({
title: item.title,
link: item.link,
snippet: item.snippet,
})
) ?? [];
return JSON.stringify(results);
}
}
|
0
|
lc_public_repos/langchainjs/libs/langchain-community/src
|
lc_public_repos/langchainjs/libs/langchain-community/src/tools/tavily_search.ts
|
import { CallbackManagerForToolRun } from "@langchain/core/callbacks/manager";
import { Tool, type ToolParams } from "@langchain/core/tools";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
/**
* Options for the TavilySearchResults tool.
*/
export type TavilySearchAPIRetrieverFields = ToolParams & {
maxResults?: number;
kwargs?: Record<string, unknown>;
apiKey?: string;
};
/**
* Tavily search API tool integration.
*
* Setup:
* Install `@langchain/community`. You'll also need an API key set as `TAVILY_API_KEY`.
*
* ```bash
* npm install @langchain/community
* ```
*
* ## [Constructor args](https://api.js.langchain.com/classes/_langchain_community.tools_tavily_search.TavilySearchResults.html#constructor)
*
* <details open>
* <summary><strong>Instantiate</strong></summary>
*
* ```typescript
* import { TavilySearchResults } from "@langchain/community/tools/tavily_search";
*
* const tool = new TavilySearchResults({
* maxResults: 2,
* // ...
* });
* ```
* </details>
*
* <br />
*
* <details>
*
* <summary><strong>Invocation</strong></summary>
*
* ```typescript
* await tool.invoke("what is the current weather in sf?");
* ```
* </details>
*
* <br />
*
* <details>
*
* <summary><strong>Invocation with tool call</strong></summary>
*
* ```typescript
* // This is usually generated by a model, but we'll create a tool call directly for demo purposes.
* const modelGeneratedToolCall = {
* args: {
* input: "what is the current weather in sf?",
* },
* id: "tool_call_id",
* name: tool.name,
* type: "tool_call",
* };
* await tool.invoke(modelGeneratedToolCall);
* ```
*
* ```text
* ToolMessage {
* "content": "...",
* "name": "tavily_search_results_json",
* "additional_kwargs": {},
* "response_metadata": {},
* "tool_call_id": "tool_call_id"
* }
* ```
* </details>
*/
export class TavilySearchResults extends Tool {
static lc_name(): string {
return "TavilySearchResults";
}
description =
"A search engine optimized for comprehensive, accurate, and trusted results. Useful for when you need to answer questions about current events. Input should be a search query.";
name = "tavily_search_results_json";
protected maxResults = 5;
protected apiKey?: string;
protected kwargs: Record<string, unknown> = {};
constructor(fields?: TavilySearchAPIRetrieverFields) {
super(fields);
this.maxResults = fields?.maxResults ?? this.maxResults;
this.kwargs = fields?.kwargs ?? this.kwargs;
this.apiKey = fields?.apiKey ?? getEnvironmentVariable("TAVILY_API_KEY");
if (this.apiKey === undefined) {
throw new Error(
`No Tavily API key found. Either set an environment variable named "TAVILY_API_KEY" or pass an API key as "apiKey".`
);
}
}
protected async _call(
input: string,
_runManager?: CallbackManagerForToolRun
): Promise<string> {
const body: Record<string, unknown> = {
query: input,
max_results: this.maxResults,
api_key: this.apiKey,
};
const response = await fetch("https://api.tavily.com/search", {
method: "POST",
headers: {
"content-type": "application/json",
},
body: JSON.stringify({ ...body, ...this.kwargs }),
});
const json = await response.json();
if (!response.ok) {
throw new Error(
`Request failed with status code ${response.status}: ${json.error}`
);
}
if (!Array.isArray(json.results)) {
throw new Error(`Could not parse Tavily results. Please try again.`);
}
return JSON.stringify(json.results);
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.