index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/chat_quick_start.ts | import { ChatOpenAI } from "@langchain/openai";
import { HumanMessage } from "@langchain/core/messages";
const chat = new ChatOpenAI({});
// Pass in a list of messages to `call` to start a conversation. In this simple example, we only pass in one message.
const response = await chat.invoke([
new HumanMessage(
"What is a good name for a company that makes colorful socks?"
),
]);
console.log(response);
// AIMessage { text: '\n\nRainbow Sox Co.' }
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/googlegenerativeai_tools.ts | import { StructuredTool } from "@langchain/core/tools";
import { ChatGoogleGenerativeAI } from "@langchain/google-genai";
import { z } from "zod";
const model = new ChatGoogleGenerativeAI({
model: "gemini-pro",
});
// Define your tool
class FakeBrowserTool extends StructuredTool {
schema = z.object({
url: z.string(),
query: z.string().optional(),
});
name = "fake_browser_tool";
description =
"useful for when you need to find something on the web or summarize a webpage.";
async _call(_: z.infer<this["schema"]>): Promise<string> {
return "fake_browser_tool";
}
}
// Bind your tools to the model
const modelWithTools = model.bind({
tools: [new FakeBrowserTool()],
});
// Or, you can use `.bindTools` which works the same under the hood
// const modelWithTools = model.bindTools([new FakeBrowserTool()]);
const res = await modelWithTools.invoke([
[
"human",
"Search the web and tell me what the weather will be like tonight in new york. use a popular weather website",
],
]);
console.log(res.tool_calls);
/*
[
{
name: 'fake_browser_tool',
args: {
query: 'weather in new york',
url: 'https://www.google.com/search?q=weather+in+new+york'
}
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/chat_streaming_stream_method.ts | import { ChatOpenAI } from "@langchain/openai";
const chat = new ChatOpenAI({
maxTokens: 25,
});
// Pass in a human message. Also accepts a raw string, which is automatically
// inferred to be a human message.
const stream = await chat.stream([["human", "Tell me a joke about bears."]]);
for await (const chunk of stream) {
console.log(chunk);
}
/*
AIMessageChunk {
content: '',
additional_kwargs: {}
}
AIMessageChunk {
content: 'Why',
additional_kwargs: {}
}
AIMessageChunk {
content: ' did',
additional_kwargs: {}
}
AIMessageChunk {
content: ' the',
additional_kwargs: {}
}
AIMessageChunk {
content: ' bear',
additional_kwargs: {}
}
AIMessageChunk {
content: ' bring',
additional_kwargs: {}
}
AIMessageChunk {
content: ' a',
additional_kwargs: {}
}
...
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/chat_mistralai_wsa_json.ts | import { ChatMistralAI } from "@langchain/mistralai";
import { ChatPromptTemplate } from "@langchain/core/prompts";
const calculatorJsonSchema = {
type: "object",
properties: {
operation: {
type: "string",
enum: ["add", "subtract", "multiply", "divide"],
description: "The type of operation to execute.",
},
number1: { type: "number", description: "The first number to operate on." },
number2: {
type: "number",
description: "The second number to operate on.",
},
},
required: ["operation", "number1", "number2"],
description: "A simple calculator tool",
};
const model = new ChatMistralAI({
apiKey: process.env.MISTRAL_API_KEY,
model: "mistral-large-latest",
});
// Pass the schema and tool name to the withStructuredOutput method
const modelWithTool = model.withStructuredOutput(calculatorJsonSchema);
const prompt = ChatPromptTemplate.fromMessages([
[
"system",
"You are a helpful assistant who always needs to use a calculator.",
],
["human", "{input}"],
]);
// Chain your prompt and model together
const chain = prompt.pipe(modelWithTool);
const response = await chain.invoke({
input: "What is 2 + 2?",
});
console.log(response);
/*
{ operation: 'add', number1: 2, number2: 2 }
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_openai_wsa_json_schema.ts | import { ChatPromptTemplate } from "@langchain/core/prompts";
import { ChatOpenAI } from "@langchain/openai";
const model = new ChatOpenAI({
temperature: 0,
model: "gpt-4-turbo-preview",
});
const calculatorSchema = {
type: "object",
properties: {
operation: {
type: "string",
enum: ["add", "subtract", "multiply", "divide"],
},
number1: { type: "number" },
number2: { type: "number" },
},
required: ["operation", "number1", "number2"],
};
// Default mode is "functionCalling"
const modelWithStructuredOutput = model.withStructuredOutput(calculatorSchema);
const prompt = ChatPromptTemplate.fromMessages([
[
"system",
`You are VERY bad at math and must always use a calculator.
Respond with a JSON object containing three keys:
'operation': the type of operation to execute, either 'add', 'subtract', 'multiply' or 'divide',
'number1': the first number to operate on,
'number2': the second number to operate on.
`,
],
["human", "Please help me!! What is 2 + 2?"],
]);
const chain = prompt.pipe(modelWithStructuredOutput);
const result = await chain.invoke({});
console.log(result);
/*
{ operation: 'add', number1: 2, number2: 2 }
*/
/**
* You can also specify 'includeRaw' to return the parsed
* and raw output in the result, as well as a "name" field
* to give the LLM additional context as to what you are generating.
*/
const includeRawModel = model.withStructuredOutput(calculatorSchema, {
name: "calculator",
includeRaw: true,
method: "jsonMode",
});
const includeRawChain = prompt.pipe(includeRawModel);
const includeRawResult = await includeRawChain.invoke({});
console.log(JSON.stringify(includeRawResult, null, 2));
/*
{
"raw": {
"kwargs": {
"content": "{\n \"operation\": \"add\",\n \"number1\": 2,\n \"number2\": 2\n}",
"additional_kwargs": {}
}
},
"parsed": {
"operation": "add",
"number1": 2,
"number2": 2
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/chat.ts | import { ChatOpenAI } from "@langchain/openai";
import { HumanMessage, SystemMessage } from "@langchain/core/messages";
export const run = async () => {
const chat = new ChatOpenAI({ model: "gpt-3.5-turbo" });
// Pass in a list of messages to `call` to start a conversation. In this simple example, we only pass in one message.
const responseA = await chat.invoke([
new HumanMessage(
"What is a good name for a company that makes colorful socks?"
),
]);
console.log(responseA);
// AIMessage { text: '\n\nRainbow Sox Co.' }
// You can also pass in multiple messages to start a conversation.
// The first message is a system message that describes the context of the conversation.
// The second message is a human message that starts the conversation.
const responseB = await chat.invoke([
new SystemMessage(
"You are a helpful assistant that translates English to French."
),
new HumanMessage("Translate: I love programming."),
]);
console.log(responseB);
// AIMessage { text: "J'aime programmer." }
// Similar to LLMs, you can also use `generate` to generate chat completions for multiple sets of messages.
const responseC = await chat.invoke([
new SystemMessage(
"You are a helpful assistant that translates English to French."
),
new HumanMessage(
"Translate this sentence from English to French. I love programming."
),
new SystemMessage(
"You are a helpful assistant that translates English to French."
),
new HumanMessage(
"Translate this sentence from English to French. I love artificial intelligence."
),
]);
console.log(responseC);
/*
{
generations: [
[
{
text: "J'aime programmer.",
message: AIMessage { text: "J'aime programmer." },
}
],
[
{
text: "J'aime l'intelligence artificielle.",
message: AIMessage { text: "J'aime l'intelligence artificielle." }
}
]
]
}
*/
};
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_googlevertexai-wso.ts | import { ChatVertexAI } from "@langchain/google-vertexai";
import { z } from "zod";
// Or, if using the web entrypoint:
// import { ChatVertexAI } from "@langchain/google-vertexai-web";
const calculatorSchema = z.object({
operation: z
.enum(["add", "subtract", "multiply", "divide"])
.describe("The type of operation to execute"),
number1: z.number().describe("The first number to operate on."),
number2: z.number().describe("The second number to operate on."),
});
const model = new ChatVertexAI({
temperature: 0.7,
model: "gemini-1.5-flash-001",
}).withStructuredOutput(calculatorSchema);
const response = await model.invoke("What is 1628253239 times 81623836?");
console.log(response);
/*
{ operation: 'multiply', number1: 1628253239, number2: 81623836 }
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_anthropic_custom_headers.ts | import { ChatAnthropic } from "@langchain/anthropic";
const model = new ChatAnthropic({
model: "claude-3-sonnet-20240229",
maxTokens: 1024,
clientOptions: {
defaultHeaders: {
"X-Api-Key": process.env.ANTHROPIC_API_KEY,
},
},
});
const res = await model.invoke("Why is the sky blue?");
console.log(res);
/*
AIMessage {
content: "The sky appears blue because of the way sunlight interacts with the gases in Earth's atmosphere. Here's a more detailed explanation:\n" +
'\n' +
'- Sunlight is made up of different wavelengths of light, including the entire visible spectrum from red to violet.\n' +
'\n' +
'- As sunlight passes through the atmosphere, the gases (nitrogen, oxygen, etc.) cause the shorter wavelengths of light, in the blue and violet range, to be scattered more efficiently in different directions.\n' +
'\n' +
'- The blue wavelengths of about 475 nanometers get scattered more than the other visible wavelengths by the tiny gas molecules in the atmosphere.\n' +
'\n' +
'- This preferential scattering of blue light in all directions by the gas molecules is called Rayleigh scattering.\n' +
'\n' +
'- When we look at the sky, we see this scattered blue light from the sun coming at us from all parts of the sky.\n' +
'\n' +
"- At sunrise and sunset, the sun's rays have to travel further through the atmosphere before reaching our eyes, causing more of the blue light to be scattered out, leaving more of the red/orange wavelengths visible - which is why sunrises and sunsets appear reddish.\n" +
'\n' +
'So in summary, the blueness of the sky is caused by this selective scattering of blue wavelengths of sunlight by the gases in the atmosphere.',
name: undefined,
additional_kwargs: {
id: 'msg_01Mvvc5GvomqbUxP3YaeWXRe',
type: 'message',
role: 'assistant',
model: 'claude-3-sonnet-20240229',
stop_reason: 'end_turn',
stop_sequence: null,
usage: { input_tokens: 13, output_tokens: 284 }
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/chat_cancellation.ts | import { ChatOpenAI } from "@langchain/openai";
import { HumanMessage } from "@langchain/core/messages";
const model = new ChatOpenAI({ temperature: 1 });
const controller = new AbortController();
// Call `controller.abort()` somewhere to cancel the request.
const res = await model.invoke(
[
new HumanMessage(
"What is a good name for a company that makes colorful socks?"
),
],
{ signal: controller.signal }
);
console.log(res);
/*
'\n\nSocktastic Colors'
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_yandex.ts | import { ChatYandexGPT } from "@langchain/yandex/chat_models";
import { HumanMessage, SystemMessage } from "@langchain/core/messages";
const chat = new ChatYandexGPT();
const res = await chat.invoke([
new SystemMessage(
"You are a helpful assistant that translates English to French."
),
new HumanMessage("I love programming."),
]);
console.log(res);
/*
AIMessage {
lc_serializable: true,
lc_kwargs: { content: "Je t'aime programmer.", additional_kwargs: {} },
lc_namespace: [ 'langchain', 'schema' ],
content: "Je t'aime programmer.",
name: undefined,
additional_kwargs: {}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_anthropic_single_tool.ts | import { ChatAnthropic } from "@langchain/anthropic";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { z } from "zod";
import { zodToJsonSchema } from "zod-to-json-schema";
const calculatorSchema = z.object({
operation: z
.enum(["add", "subtract", "multiply", "divide"])
.describe("The type of operation to execute."),
number1: z.number().describe("The first number to operate on."),
number2: z.number().describe("The second number to operate on."),
});
const tool = {
name: "calculator",
description: "A simple calculator tool",
input_schema: zodToJsonSchema(calculatorSchema),
};
const model = new ChatAnthropic({
apiKey: process.env.ANTHROPIC_API_KEY,
model: "claude-3-haiku-20240307",
}).bind({
tools: [tool],
});
const prompt = ChatPromptTemplate.fromMessages([
[
"system",
"You are a helpful assistant who always needs to use a calculator.",
],
["human", "{input}"],
]);
// Chain your prompt and model together
const chain = prompt.pipe(model);
const response = await chain.invoke({
input: "What is 2 + 2?",
});
console.log(JSON.stringify(response, null, 2));
/*
{
"kwargs": {
"content": "Okay, let's calculate that using the calculator tool:",
"additional_kwargs": {
"id": "msg_01YcT1KFV8qH7xG6T6C4EpGq",
"role": "assistant",
"model": "claude-3-haiku-20240307",
"tool_calls": [
{
"id": "toolu_01UiqGsTTH45MUveRQfzf7KH",
"type": "function",
"function": {
"arguments": "{\"number1\":2,\"number2\":2,\"operation\":\"add\"}",
"name": "calculator"
}
}
]
},
"response_metadata": {}
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_anthropic_multimodal.ts | import * as fs from "node:fs/promises";
import { ChatAnthropic } from "@langchain/anthropic";
import { HumanMessage } from "@langchain/core/messages";
const imageData = await fs.readFile("./hotdog.jpg");
const chat = new ChatAnthropic({
model: "claude-3-sonnet-20240229",
});
const message = new HumanMessage({
content: [
{
type: "text",
text: "What's in this image?",
},
{
type: "image_url",
image_url: {
url: `data:image/jpeg;base64,${imageData.toString("base64")}`,
},
},
],
});
const res = await chat.invoke([message]);
console.log({ res });
/*
{
res: AIMessage {
content: 'The image shows a hot dog or frankfurter. It has a reddish-pink sausage filling encased in a light brown bun or bread roll. The hot dog is cut lengthwise, revealing the bright red sausage interior contrasted against the lightly toasted bread exterior. This classic fast food item is depicted in detail against a plain white background.',
name: undefined,
additional_kwargs: {
id: 'msg_0153boCaPL54QDEMQExkVur6',
type: 'message',
role: 'assistant',
model: 'claude-3-sonnet-20240229',
stop_reason: 'end_turn',
stop_sequence: null,
usage: [Object]
}
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_bedrock_multimodal_converse.ts | import * as fs from "node:fs/promises";
import { ChatBedrockConverse } from "@langchain/aws";
import { HumanMessage } from "@langchain/core/messages";
const model = new ChatBedrockConverse({
model: "anthropic.claude-3-sonnet-20240229-v1:0",
region: "us-east-1",
credentials: {
accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,
},
});
const imageData = await fs.readFile("./hotdog.jpg");
const res = await model.invoke([
new HumanMessage({
content: [
{
type: "text",
text: "What's in this image?",
},
{
type: "image_url",
image_url: {
url: `data:image/jpeg;base64,${imageData.toString("base64")}`,
},
},
],
}),
]);
console.log(res);
/*
AIMessage {
content: 'The image shows a hot dog or frankfurter. It has a reddish-pink sausage inside a light tan-colored bread bun. The hot dog bun is split open, allowing the sausage filling to be visible. The image appears to be focused solely on depicting this classic American fast food item against a plain white background.',
response_metadata: { ... },
id: '1608d043-575a-450e-8eac-2fef6297cfe2',
usage_metadata: { input_tokens: 276, output_tokens: 75, total_tokens: 351 }
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/chat_streaming.ts | import { ChatOpenAI } from "@langchain/openai";
import { HumanMessage } from "@langchain/core/messages";
const chat = new ChatOpenAI({
maxTokens: 25,
streaming: true,
});
const response = await chat.invoke([new HumanMessage("Tell me a joke.")], {
callbacks: [
{
handleLLMNewToken(token: string) {
console.log({ token });
},
},
],
});
console.log(response);
// { token: '' }
// { token: '\n\n' }
// { token: 'Why' }
// { token: ' don' }
// { token: "'t" }
// { token: ' scientists' }
// { token: ' trust' }
// { token: ' atoms' }
// { token: '?\n\n' }
// { token: 'Because' }
// { token: ' they' }
// { token: ' make' }
// { token: ' up' }
// { token: ' everything' }
// { token: '.' }
// { token: '' }
// AIMessage {
// text: "\n\nWhy don't scientists trust atoms?\n\nBecause they make up everything."
// }
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_fireworks.ts | import { ChatFireworks } from "@langchain/community/chat_models/fireworks";
const model = new ChatFireworks({
temperature: 0.9,
// In Node.js defaults to process.env.FIREWORKS_API_KEY
apiKey: "YOUR-API-KEY",
});
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_bedrock_converse.ts | import { ChatBedrockConverse } from "@langchain/aws";
import { HumanMessage } from "@langchain/core/messages";
const model = new ChatBedrockConverse({
model: "anthropic.claude-3-sonnet-20240229-v1:0",
region: "us-east-1",
credentials: {
accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,
},
});
const res = await model.invoke([
new HumanMessage({ content: "Tell me a joke" }),
]);
console.log(res);
/*
AIMessage {
content: "Here's a joke for you:\n" +
'\n' +
"Why can't a bicycle stand up by itself? Because it's two-tired!",
response_metadata: { ... },
id: '08afa4fb-c212-4c1e-853a-d854972bec78',
usage_metadata: { input_tokens: 11, output_tokens: 28, total_tokens: 39 }
}
*/
const stream = await model.stream([
new HumanMessage({ content: "Tell me a joke" }),
]);
for await (const chunk of stream) {
console.log(chunk.content);
}
/*
Here
's
a
silly
joke
for
you
:
Why
di
d the
tom
ato
turn
re
d?
Because
it
saw
the
sal
a
d
dressing
!
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_openai_stream_tokens.ts | import type { AIMessageChunk } from "@langchain/core/messages";
import { ChatOpenAI } from "@langchain/openai";
import { concat } from "@langchain/core/utils/stream";
// Instantiate the model
const model = new ChatOpenAI();
const response = await model.stream("Hello, how are you?", {
// Pass the stream options
stream_options: {
include_usage: true,
},
});
// Iterate over the response, only saving the last chunk
let finalResult: AIMessageChunk | undefined;
for await (const chunk of response) {
if (finalResult) {
finalResult = concat(finalResult, chunk);
} else {
finalResult = chunk;
}
}
console.log(finalResult?.usage_metadata);
/*
{ input_tokens: 13, output_tokens: 30, total_tokens: 43 }
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_openai_wsa_zod.ts | import { ChatPromptTemplate } from "@langchain/core/prompts";
import { ChatOpenAI } from "@langchain/openai";
import { z } from "zod";
const model = new ChatOpenAI({
temperature: 0,
model: "gpt-4-turbo-preview",
});
const calculatorSchema = z.object({
operation: z.enum(["add", "subtract", "multiply", "divide"]),
number1: z.number(),
number2: z.number(),
});
const modelWithStructuredOutput = model.withStructuredOutput(calculatorSchema);
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are VERY bad at math and must always use a calculator."],
["human", "Please help me!! What is 2 + 2?"],
]);
const chain = prompt.pipe(modelWithStructuredOutput);
const result = await chain.invoke({});
console.log(result);
/*
{ operation: 'add', number1: 2, number2: 2 }
*/
/**
* You can also specify 'includeRaw' to return the parsed
* and raw output in the result.
*/
const includeRawModel = model.withStructuredOutput(calculatorSchema, {
name: "calculator",
includeRaw: true,
});
const includeRawChain = prompt.pipe(includeRawModel);
const includeRawResult = await includeRawChain.invoke({});
console.log(JSON.stringify(includeRawResult, null, 2));
/*
{
"raw": {
"kwargs": {
"content": "",
"additional_kwargs": {
"tool_calls": [
{
"id": "call_A8yzNBDMiRrCB8dFYqJLhYW7",
"type": "function",
"function": {
"name": "calculator",
"arguments": "{\"operation\":\"add\",\"number1\":2,\"number2\":2}"
}
}
]
}
}
},
"parsed": {
"operation": "add",
"number1": 2,
"number2": 2
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_anthropic_wsa.ts | import { ChatAnthropic } from "@langchain/anthropic";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { z } from "zod";
const calculatorSchema = z
.object({
operation: z
.enum(["add", "subtract", "multiply", "divide"])
.describe("The type of operation to execute."),
number1: z.number().describe("The first number to operate on."),
number2: z.number().describe("The second number to operate on."),
})
.describe("A simple calculator tool");
const model = new ChatAnthropic({
apiKey: process.env.ANTHROPIC_API_KEY,
model: "claude-3-haiku-20240307",
});
// Pass the schema and tool name to the withStructuredOutput method
const modelWithTool = model.withStructuredOutput(calculatorSchema);
const prompt = ChatPromptTemplate.fromMessages([
[
"system",
"You are a helpful assistant who always needs to use a calculator.",
],
["human", "{input}"],
]);
// Chain your prompt and model together
const chain = prompt.pipe(modelWithTool);
const response = await chain.invoke({
input: "What is 2 + 2?",
});
console.log(response);
/*
{ operation: 'add', number1: 2, number2: 2 }
*/
/**
* You can supply a "name" field to give the LLM additional context
* around what you are trying to generate. You can also pass
* 'includeRaw' to get the raw message back from the model too.
*/
const includeRawModel = model.withStructuredOutput(calculatorSchema, {
name: "calculator",
includeRaw: true,
});
const includeRawChain = prompt.pipe(includeRawModel);
const includeRawResponse = await includeRawChain.invoke({
input: "What is 2 + 2?",
});
console.log(JSON.stringify(includeRawResponse, null, 2));
/*
{
"raw": {
"kwargs": {
"content": "Okay, let me use the calculator tool to find the result of 2 + 2:",
"additional_kwargs": {
"id": "msg_01HYwRhJoeqwr5LkSCHHks5t",
"type": "message",
"role": "assistant",
"model": "claude-3-haiku-20240307",
"usage": {
"input_tokens": 458,
"output_tokens": 109
},
"tool_calls": [
{
"id": "toolu_01LDJpdtEQrq6pXSqSgEHErC",
"type": "function",
"function": {
"arguments": "{\"number1\":2,\"number2\":2,\"operation\":\"add\"}",
"name": "calculator"
}
}
]
},
}
},
"parsed": {
"operation": "add",
"number1": 2,
"number2": 2
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_anthropic_forced_tool.ts | import { ChatAnthropic } from "@langchain/anthropic";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { z } from "zod";
import { zodToJsonSchema } from "zod-to-json-schema";
const calculatorSchema = z.object({
operation: z
.enum(["add", "subtract", "multiply", "divide"])
.describe("The type of operation to execute."),
number1: z.number().describe("The first number to operate on."),
number2: z.number().describe("The second number to operate on."),
});
const weatherSchema = z.object({
city: z.string().describe("The city to get the weather from"),
state: z.string().optional().describe("The state to get the weather from"),
});
const tools = [
{
name: "calculator",
description: "A simple calculator tool",
input_schema: zodToJsonSchema(calculatorSchema),
},
{
name: "get_weather",
description:
"Get the weather of a specific location and return the temperature in Celsius.",
input_schema: zodToJsonSchema(weatherSchema),
},
];
const model = new ChatAnthropic({
apiKey: process.env.ANTHROPIC_API_KEY,
model: "claude-3-haiku-20240307",
}).bind({
tools,
tool_choice: {
type: "tool",
name: "get_weather",
},
});
const prompt = ChatPromptTemplate.fromMessages([
[
"system",
"You are a helpful assistant who always needs to use a calculator.",
],
["human", "{input}"],
]);
// Chain your prompt and model together
const chain = prompt.pipe(model);
const response = await chain.invoke({
input: "What is the sum of 2725 and 273639",
});
console.log(JSON.stringify(response, null, 2));
/*
{
"kwargs": {
"tool_calls": [
{
"name": "get_weather",
"args": {
"city": "<UNKNOWN>",
"state": "<UNKNOWN>"
},
"id": "toolu_01MGRNudJvSDrrCZcPa2WrBX"
}
],
"response_metadata": {
"id": "msg_01RW3R4ctq7q5g4GJuGMmRPR",
"model": "claude-3-haiku-20240307",
"stop_sequence": null,
"usage": {
"input_tokens": 672,
"output_tokens": 52
},
"stop_reason": "tool_use"
}
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_azure_openai.ts | import { AzureChatOpenAI } from "@langchain/openai";
const model = new AzureChatOpenAI({
temperature: 0.9,
azureOpenAIApiKey: "<your_key>", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY
azureOpenAIApiInstanceName: "<your_instance_name>", // In Node.js defaults to process.env.AZURE_OPENAI_API_INSTANCE_NAME
azureOpenAIApiDeploymentName: "<your_deployment_name>", // In Node.js defaults to process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME
azureOpenAIApiVersion: "<api_version>", // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION
});
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/googlegenerativeai_wso.ts | import { StructuredTool } from "@langchain/core/tools";
import { ChatGoogleGenerativeAI } from "@langchain/google-genai";
import { z } from "zod";
const model = new ChatGoogleGenerativeAI({
model: "gemini-pro",
});
// Define your tool
class FakeBrowserTool extends StructuredTool {
schema = z.object({
url: z.string(),
query: z.string().optional(),
});
name = "fake_browser_tool";
description =
"useful for when you need to find something on the web or summarize a webpage.";
async _call(_: z.infer<this["schema"]>): Promise<string> {
return "fake_browser_tool";
}
}
const tool = new FakeBrowserTool();
// Bind your tools to the model
const modelWithTools = model.withStructuredOutput(tool.schema, {
name: tool.name, // this is optional
});
// Optionally, you can pass just a Zod schema, or JSONified Zod schema
// const modelWithTools = model.withStructuredOutput(
// zodSchema,
// );
const res = await modelWithTools.invoke([
[
"human",
"Search the web and tell me what the weather will be like tonight in new york. use a popular weather website",
],
]);
console.log(res);
/*
{
url: 'https://www.accuweather.com/en/us/new-york-ny/10007/night-weather-forecast/349014',
query: 'weather tonight'
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_googlevertexai-tools.ts | import { ChatVertexAI } from "@langchain/google-vertexai";
import { type GeminiTool } from "@langchain/google-vertexai/types";
import { zodToGeminiParameters } from "@langchain/google-vertexai/utils";
import { z } from "zod";
// Or, if using the web entrypoint:
// import { ChatVertexAI } from "@langchain/google-vertexai-web";
const calculatorSchema = z.object({
operation: z
.enum(["add", "subtract", "multiply", "divide"])
.describe("The type of operation to execute"),
number1: z.number().describe("The first number to operate on."),
number2: z.number().describe("The second number to operate on."),
});
const geminiCalculatorTool: GeminiTool = {
functionDeclarations: [
{
name: "calculator",
description: "A simple calculator tool",
parameters: zodToGeminiParameters(calculatorSchema),
},
],
};
const model = new ChatVertexAI({
temperature: 0.7,
model: "gemini-1.5-flash-001",
}).bind({
tools: [geminiCalculatorTool],
});
const response = await model.invoke("What is 1628253239 times 81623836?");
console.log(JSON.stringify(response.additional_kwargs, null, 2));
/*
{
"tool_calls": [
{
"id": "a20075d3b0e34f7ca60cc135916e620d",
"type": "function",
"function": {
"name": "calculator",
"arguments": "{\"number1\":1628253239,\"operation\":\"multiply\",\"number2\":81623836}"
}
}
]
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_zhipuai.ts | import { ChatZhipuAI } from "@langchain/community/chat_models/zhipuai";
import { HumanMessage } from "@langchain/core/messages";
// Default model is glm-3-turbo
const glm3turbo = new ChatZhipuAI({
zhipuAIApiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.ZHIPUAI_API_KEY
});
// Use glm-4
const glm4 = new ChatZhipuAI({
model: "glm-4", // Available models:
temperature: 1,
zhipuAIApiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.ZHIPUAI_API_KEY
});
const messages = [new HumanMessage("Hello")];
const res = await glm3turbo.invoke(messages);
/*
AIMessage {
content: "Hello! How can I help you today? Is there something you would like to talk about or ask about? I'm here to assist you with any questions you may have.",
}
*/
const res2 = await glm4.invoke(messages);
/*
AIMessage {
text: "Hello! How can I help you today? Is there something you would like to talk about or ask about? I'm here to assist you with any questions you may have.",
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/chat_baidu_qianfan.ts | import { ChatBaiduQianfan } from "@langchain/baidu-qianfan";
import { HumanMessage } from "@langchain/core/messages";
const chat = new ChatBaiduQianfan({
qianfanAccessKey: process.env.QIANFAN_ACCESS_KEY,
qianfanSecretKey: process.env.QIANFAN_SECRET_KEY,
model: "ERNIE-Lite-8K",
});
const message = new HumanMessage("北京天气");
const res = await chat.invoke([message]);
console.log({ res });
/**
{
res: AIMessage {
lc_serializable: true,
lc_kwargs: {
content: '北京天气**多云,气温13~24°C**,微风,空气质量良,预报无持续降水^[2]^。\n' +
'\n' +
'近期天气情况来说,白天最高气温多在30度左右,而夜晚最低气温仅有几度,早晚较凉,需要做好保暖,昼夜温差较大。由于现在正处于雨水节气,此时天气阴沉、多变,时而下起冰雹,时而下起大雨,天色昏暗。冰雹时间不会持续太长,通常都是下冰雹一小段时间后就会停止,天气就会逐渐恢复晴好^[1]^。',
tool_calls: [],
invalid_tool_calls: [],
additional_kwargs: {},
response_metadata: {}
},
lc_namespace: [ 'langchain_core', 'messages' ],
content: '北京天气**多云,气温13~24°C**,微风,空气质量良,预报无持续降水^[2]^。\n' +
'\n' +
'近期天气情况来说,白天最高气温多在30度左右,而夜晚最低气温仅有几度,早晚较凉,需要做好保暖,昼夜温差较大。由于现在正处于雨水节气,此时天气阴沉、多变,时而下起冰雹,时而下起大雨,天色昏暗。冰雹时间不会持续太长,通常都是下冰雹一小段时间后就会停止,天气就会逐渐恢复晴好^[1]^。',
name: undefined,
additional_kwargs: {},
response_metadata: { tokenUsage: [Object] },
tool_calls: [],
invalid_tool_calls: []
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_moonshot.ts | import { ChatMoonshot } from "@langchain/community/chat_models/moonshot";
import { HumanMessage } from "@langchain/core/messages";
// Default model is moonshot-v1-8k
const moonshotV18K = new ChatMoonshot({
apiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.MOONSHOT_API_KEY
});
// Use moonshot-v1-128k
const moonshotV1128k = new ChatMoonshot({
apiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.MOONSHOT_API_KEY
model: "moonshot-v1-128k", // Available models: moonshot-v1-8k, moonshot-v1-32k, moonshot-v1-128k
temperature: 0.3,
});
const messages = [new HumanMessage("Hello")];
const res = await moonshotV18K.invoke(messages);
/*
AIMessage {
content: "Hello! How can I help you today? Is there something you would like to talk about or ask about? I'm here to assist you with any questions you may have.",
}
*/
const res2 = await moonshotV1128k.invoke(messages);
/*
AIMessage {
text: "Hello! How can I help you today? Is there something you would like to talk about or ask about? I'm here to assist you with any questions you may have.",
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_bedrock_wso_converse.ts | import { ChatBedrockConverse } from "@langchain/aws";
import { z } from "zod";
const model = new ChatBedrockConverse({
model: "anthropic.claude-3-sonnet-20240229-v1:0",
region: "us-east-1",
credentials: {
accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,
},
});
const weatherSchema = z
.object({
city: z.string().describe("The city to get the weather for"),
state: z.string().describe("The state to get the weather for").optional(),
})
.describe("Get the weather for a city");
const modelWithStructuredOutput = model.withStructuredOutput(weatherSchema, {
name: "weather_tool", // Optional, defaults to 'extract'
});
const res = await modelWithStructuredOutput.invoke(
"What's the weather in New York?"
);
console.log(res);
/*
{ city: 'New York', state: 'NY' }
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/chat_stream_mistralai.ts | import { ChatMistralAI } from "@langchain/mistralai";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { StringOutputParser } from "@langchain/core/output_parsers";
const model = new ChatMistralAI({
apiKey: process.env.MISTRAL_API_KEY,
model: "mistral-small",
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["human", "{input}"],
]);
const outputParser = new StringOutputParser();
const chain = prompt.pipe(model).pipe(outputParser);
const response = await chain.stream({
input: "Hello",
});
for await (const item of response) {
console.log("stream item:", item);
}
/**
stream item:
stream item: Hello! I'm here to help answer any questions you
stream item: might have or assist you with any task you'd like to
stream item: accomplish. I can provide information
stream item: on a wide range of topics
stream item: , from math and science to history and literature. I can
stream item: also help you manage your schedule, set reminders, and
stream item: much more. Is there something specific you need help with? Let
stream item: me know!
stream item:
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/chat_mistralai_tools.ts | import { ChatMistralAI } from "@langchain/mistralai";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { JsonOutputKeyToolsParser } from "@langchain/core/output_parsers/openai_tools";
import { z } from "zod";
import { StructuredTool } from "@langchain/core/tools";
const calculatorSchema = z.object({
operation: z
.enum(["add", "subtract", "multiply", "divide"])
.describe("The type of operation to execute."),
number1: z.number().describe("The first number to operate on."),
number2: z.number().describe("The second number to operate on."),
});
// Extend the StructuredTool class to create a new tool
class CalculatorTool extends StructuredTool {
name = "calculator";
description = "A simple calculator tool";
schema = calculatorSchema;
async _call(input: z.infer<typeof calculatorSchema>) {
return JSON.stringify(input);
}
}
// Or you can convert the tool to a JSON schema using
// a library like zod-to-json-schema
// Uncomment the lines below to use tools this way.
// import { zodToJsonSchema } from "zod-to-json-schema";
// const calculatorJsonSchema = zodToJsonSchema(calculatorSchema);
const model = new ChatMistralAI({
apiKey: process.env.MISTRAL_API_KEY,
model: "mistral-large-latest",
});
// Bind the tool to the model
const modelWithTool = model.bind({
tools: [new CalculatorTool()],
});
const prompt = ChatPromptTemplate.fromMessages([
[
"system",
"You are a helpful assistant who always needs to use a calculator.",
],
["human", "{input}"],
]);
// Define an output parser that can handle tool responses
const outputParser = new JsonOutputKeyToolsParser({
keyName: "calculator",
returnSingle: true,
});
// Chain your prompt, model, and output parser together
const chain = prompt.pipe(modelWithTool).pipe(outputParser);
const response = await chain.invoke({
input: "What is 2 + 2?",
});
console.log(response);
/*
{ operation: 'add', number1: 2, number2: 2 }
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_llama_cpp_chain.ts | import { ChatLlamaCpp } from "@langchain/community/chat_models/llama_cpp";
import { LLMChain } from "langchain/chains";
import { PromptTemplate } from "@langchain/core/prompts";
const llamaPath = "/Replace/with/path/to/your/model/gguf-llama2-q4_0.bin";
const model = await ChatLlamaCpp.initialize({
modelPath: llamaPath,
temperature: 0.5,
});
const prompt = PromptTemplate.fromTemplate(
"What is a good name for a company that makes {product}?"
);
const chain = new LLMChain({ llm: model, prompt });
const response = await chain.invoke({ product: "colorful socks" });
console.log({ response });
/*
{
text: `I'm not sure what you mean by "colorful socks" but here are some ideas:\n` +
'\n' +
'- Sock-it to me!\n' +
'- Socks Away\n' +
'- Fancy Footwear'
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_deepinfra.ts | import { ChatDeepInfra } from "@langchain/community/chat_models/deepinfra";
import { HumanMessage } from "@langchain/core/messages";
const apiKey = process.env.DEEPINFRA_API_TOKEN;
const model = "meta-llama/Meta-Llama-3-70B-Instruct";
const chat = new ChatDeepInfra({
model,
apiKey,
});
const messages = [new HumanMessage("Hello")];
const res = await chat.invoke(messages);
console.log(res);
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_googlevertexai.ts | import { ChatVertexAI } from "@langchain/google-vertexai";
// Or, if using the web entrypoint:
// import { ChatVertexAI } from "@langchain/google-vertexai-web";
const model = new ChatVertexAI({
temperature: 0.7,
model: "gemini-1.5-flash-001",
});
const response = await model.invoke("Why is the ocean blue?");
console.log(response);
/*
AIMessageChunk {
"content": "The ocean appears blue due to a combination of factors:\n\n**1. Rayleigh Scattering:**\n\n* This is the primary reason. Sunlight is made up of all colors of the rainbow. When sunlight enters the ocean, blue wavelengths are scattered more than other colors by water molecules. \n* This scattered blue light is what we see. Think of it like sunlight being scattered by the atmosphere, making the sky appear blue.\n\n**2. Absorption of Other Colors:**\n\n* Water absorbs red, orange, yellow, and green wavelengths of light more readily than blue. This means less of those colors reach our eyes.\n* The deeper the water, the more red light is absorbed, making the ocean appear even bluer.\n\n**3. Other Factors:**\n\n* **Depth:** The deeper the water, the bluer it appears.\n* **Turbidity:** The presence of particles like sediment or plankton can affect the color. A cloudy ocean might appear more greenish or brown.\n* **Time of Day:** The ocean can appear different colors depending on the angle of the sun.\n\n**In Summary:**\n\nThe ocean appears blue primarily due to Rayleigh scattering, where blue wavelengths of light are scattered more effectively by water molecules. This, combined with the absorption of other colors by water, results in the blue hue we perceive.\n",
"usage_metadata": {
"input_tokens": 6,
"output_tokens": 276,
"total_tokens": 282
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/custom.ts | import {
SimpleChatModel,
type BaseChatModelParams,
} from "@langchain/core/language_models/chat_models";
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import { AIMessageChunk, type BaseMessage } from "@langchain/core/messages";
import { ChatGenerationChunk } from "@langchain/core/outputs";
export interface CustomChatModelInput extends BaseChatModelParams {
n: number;
}
export class CustomChatModel extends SimpleChatModel {
n: number;
constructor(fields: CustomChatModelInput) {
super(fields);
this.n = fields.n;
}
_llmType() {
return "custom";
}
async _call(
messages: BaseMessage[],
_options: this["ParsedCallOptions"],
_runManager?: CallbackManagerForLLMRun
): Promise<string> {
if (!messages.length) {
throw new Error("No messages provided.");
}
if (typeof messages[0].content !== "string") {
throw new Error("Multimodal messages are not supported.");
}
return messages[0].content.slice(0, this.n);
}
async *_streamResponseChunks(
messages: BaseMessage[],
_options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<ChatGenerationChunk> {
if (!messages.length) {
throw new Error("No messages provided.");
}
if (typeof messages[0].content !== "string") {
throw new Error("Multimodal messages are not supported.");
}
for (const letter of messages[0].content.slice(0, this.n)) {
yield new ChatGenerationChunk({
message: new AIMessageChunk({
content: letter,
}),
text: letter,
});
await runManager?.handleLLMNewToken(letter);
}
}
}
const chatModel = new CustomChatModel({ n: 4 });
console.log(await chatModel.invoke([["human", "I am an LLM"]]));
const stream = await chatModel.stream([["human", "I am an LLM"]]);
for await (const chunk of stream) {
console.log(chunk);
}
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_googlevertexai-streaming.ts | import { ChatVertexAI } from "@langchain/google-vertexai";
// Or, if using the web entrypoint:
// import { ChatVertexAI } from "@langchain/google-vertexai-web";
const model = new ChatVertexAI({
temperature: 0.7,
model: "gemini-1.5-flash-001",
});
const stream = await model.stream([
["system", "You are a funny assistant that answers in pirate language."],
["human", "What is your favorite food?"],
]);
for await (const chunk of stream) {
console.log(chunk.content);
}
/*
A
hoy, matey! Me favorite food be a hearty plate o' grub,
with a side o' scurvy dogs and a tankard o' grog
. Argh!
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/friendli.ts | import { ChatFriendli } from "@langchain/community/chat_models/friendli";
const model = new ChatFriendli({
model: "meta-llama-3-8b-instruct", // Default value
friendliToken: process.env.FRIENDLI_TOKEN,
friendliTeam: process.env.FRIENDLI_TEAM,
maxTokens: 800,
temperature: 0.9,
topP: 0.9,
frequencyPenalty: 0,
stop: [],
});
const response = await model.invoke(
"Draft a cover letter for a role in software engineering."
);
console.log(response.content);
/*
Dear [Hiring Manager],
I am excited to apply for the role of Software Engineer at [Company Name]. With my passion for innovation, creativity, and problem-solving, I am confident that I would be a valuable asset to your team.
As a highly motivated and detail-oriented individual, ...
*/
const stream = await model.stream(
"Draft a cover letter for a role in software engineering."
);
for await (const chunk of stream) {
console.log(chunk.content);
}
/*
D
ear
[
H
iring
...
[
Your
Name
]
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_azure_openai_managed_identity.ts | import {
DefaultAzureCredential,
getBearerTokenProvider,
} from "@azure/identity";
import { AzureChatOpenAI } from "@langchain/openai";
const credentials = new DefaultAzureCredential();
const azureADTokenProvider = getBearerTokenProvider(
credentials,
"https://cognitiveservices.azure.com/.default"
);
const model = new AzureChatOpenAI({
azureADTokenProvider,
azureOpenAIApiInstanceName: "<your_instance_name>",
azureOpenAIApiDeploymentName: "<your_deployment_name>",
azureOpenAIApiVersion: "<api_version>",
});
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_groq_wsa_zod.ts | import { ChatPromptTemplate } from "@langchain/core/prompts";
import { ChatGroq } from "@langchain/groq";
import { z } from "zod";
const model = new ChatGroq({
temperature: 0,
model: "mixtral-8x7b-32768",
});
const calculatorSchema = z.object({
operation: z.enum(["add", "subtract", "multiply", "divide"]),
number1: z.number(),
number2: z.number(),
});
const modelWithStructuredOutput = model.withStructuredOutput(calculatorSchema);
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are VERY bad at math and must always use a calculator."],
["human", "Please help me!! What is 2 + 2?"],
]);
const chain = prompt.pipe(modelWithStructuredOutput);
const result = await chain.invoke({});
console.log(result);
/*
{ operation: 'add', number1: 2, number2: 2 }
*/
/**
* You can also specify 'includeRaw' to return the parsed
* and raw output in the result.
*/
const includeRawModel = model.withStructuredOutput(calculatorSchema, {
name: "calculator",
includeRaw: true,
});
const includeRawChain = prompt.pipe(includeRawModel);
const includeRawResult = await includeRawChain.invoke({});
console.log(includeRawResult);
/*
{
raw: AIMessage {
content: '',
additional_kwargs: {
tool_calls: [
{
"id": "call_01htk094ktfgxtkwj40n0ehg61",
"type": "function",
"function": {
"name": "calculator",
"arguments": "{\"operation\": \"add\", \"number1\": 2, \"number2\": 2}"
}
}
]
},
response_metadata: {
"tokenUsage": {
"completionTokens": 197,
"promptTokens": 1214,
"totalTokens": 1411
},
"finish_reason": "tool_calls"
}
},
parsed: { operation: 'add', number1: 2, number2: 2 }
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_llama_cpp_stream_invoke.ts | import { ChatLlamaCpp } from "@langchain/community/chat_models/llama_cpp";
import { SystemMessage, HumanMessage } from "@langchain/core/messages";
const llamaPath = "/Replace/with/path/to/your/model/gguf-llama2-q4_0.bin";
const model = await ChatLlamaCpp.initialize({
modelPath: llamaPath,
temperature: 0.7,
});
const controller = new AbortController();
setTimeout(() => {
controller.abort();
console.log("Aborted");
}, 5000);
await model.invoke(
[
new SystemMessage(
"You are a pirate, responses must be very verbose and in pirate dialect."
),
new HumanMessage("Tell me about Llamas?"),
],
{
signal: controller.signal,
callbacks: [
{
handleLLMNewToken(token) {
console.log(token);
},
},
],
}
);
/*
Once
upon
a
time
,
in
a
green
and
sunny
field
...
Aborted
AbortError
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/chat_debugging.ts | import { type LLMResult } from "@langchain/core/outputs";
import { ChatOpenAI } from "@langchain/openai";
import { HumanMessage } from "@langchain/core/messages";
import { Serialized } from "@langchain/core/load/serializable";
// We can pass in a list of CallbackHandlers to the LLM constructor to get callbacks for various events.
const model = new ChatOpenAI({
callbacks: [
{
handleLLMStart: async (llm: Serialized, prompts: string[]) => {
console.log(JSON.stringify(llm, null, 2));
console.log(JSON.stringify(prompts, null, 2));
},
handleLLMEnd: async (output: LLMResult) => {
console.log(JSON.stringify(output, null, 2));
},
handleLLMError: async (err: Error) => {
console.error(err);
},
},
],
});
await model.invoke([
new HumanMessage(
"What is a good name for a company that makes colorful socks?"
),
]);
/*
{
"name": "openai"
}
[
"Human: What is a good name for a company that makes colorful socks?"
]
{
"generations": [
[
{
"text": "Rainbow Soles",
"message": {
"text": "Rainbow Soles"
}
}
]
],
"llmOutput": {
"tokenUsage": {
"completionTokens": 4,
"promptTokens": 21,
"totalTokens": 25
}
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/minimax_functions_zod.ts | import { z } from "zod";
import { zodToJsonSchema } from "zod-to-json-schema";
import { ChatMinimax } from "@langchain/community/chat_models/minimax";
import { HumanMessage } from "@langchain/core/messages";
const extractionFunctionZodSchema = z.object({
location: z.string().describe(" The location to get the weather"),
});
// Bind function arguments to the model.
// "functions.parameters" must be formatted as JSON Schema.
// We translate the above Zod schema into JSON schema using the "zodToJsonSchema" package.
const model = new ChatMinimax({
model: "abab5.5-chat",
botSetting: [
{
bot_name: "MM Assistant",
content: "MM Assistant is an AI Assistant developed by minimax.",
},
],
}).bind({
functions: [
{
name: "get_weather",
description: " Get weather information.",
parameters: zodToJsonSchema(extractionFunctionZodSchema),
},
],
});
const result = await model.invoke([
new HumanMessage({
content: " What is the weather like in Shanghai tomorrow?",
name: "XiaoMing",
}),
]);
console.log(result);
/*
AIMessage {
lc_serializable: true,
lc_kwargs: { content: '', additional_kwargs: { function_call: [Object] } },
lc_namespace: [ 'langchain', 'schema' ],
content: '',
name: undefined,
additional_kwargs: {
function_call: { name: 'get_weather', arguments: '{"location": "Shanghai"}' }
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_togetherai_json.ts | import { ChatTogetherAI } from "@langchain/community/chat_models/togetherai";
import { ChatPromptTemplate } from "@langchain/core/prompts";
// Define a JSON schema for the response
const responseSchema = {
type: "object",
properties: {
orderedArray: {
type: "array",
items: {
type: "number",
},
},
},
required: ["orderedArray"],
};
const modelWithJsonSchema = new ChatTogetherAI({
temperature: 0,
apiKey: process.env.TOGETHER_AI_API_KEY,
model: "mistralai/Mixtral-8x7B-Instruct-v0.1",
}).bind({
response_format: {
type: "json_object", // Define the response format as a JSON object
schema: responseSchema, // Pass in the schema for the model's response
},
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant who responds in JSON."],
["human", "Please list this output in order of DESC {unorderedList}."],
]);
// Use LCEL to chain the prompt to the model.
const response = await prompt.pipe(modelWithJsonSchema).invoke({
unorderedList: "[1, 4, 2, 8]",
});
console.log(JSON.parse(response.content as string));
/**
{ orderedArray: [ 8, 4, 2, 1 ] }
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/minimax_sample_messages.ts | import { ChatMinimax } from "@langchain/community/chat_models/minimax";
import { AIMessage, HumanMessage } from "@langchain/core/messages";
const model = new ChatMinimax({
model: "abab5.5-chat",
botSetting: [
{
bot_name: "MM Assistant",
content: "MM Assistant is an AI Assistant developed by minimax.",
},
],
}).bind({
sampleMessages: [
new HumanMessage({
content: " Turn A5 into red and modify the content to minimax.",
}),
new AIMessage({
content: "select A5 color red change minimax",
}),
],
});
const result = await model.invoke([
new HumanMessage({
content:
' Please reply to my content according to the following requirements: According to the following interface list, give the order and parameters of calling the interface for the content I gave. You just need to give the order and parameters of calling the interface, and do not give any other output. The following is the available interface list: select: select specific table position, input parameter use letters and numbers to determine, for example "B13"; color: dye the selected table position, input parameters use the English name of the color, for example "red"; change: modify the selected table position, input parameters use strings.',
}),
new HumanMessage({
content: " Process B6 to gray and modify the content to question.",
}),
]);
console.log(result);
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/usage_metadata.ts | import { ChatOpenAI } from "@langchain/openai";
const chatModel = new ChatOpenAI({
model: "gpt-3.5-turbo-0125",
});
const res = await chatModel.invoke("Tell me a joke.");
console.log(res.usage_metadata);
/*
{ input_tokens: 12, output_tokens: 17, total_tokens: 29 }
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_anthropic_tools_wsa.ts | import { z } from "zod";
import { ChatAnthropicTools } from "@langchain/anthropic/experimental";
import { ChatPromptTemplate } from "@langchain/core/prompts";
const calculatorSchema = z.object({
operation: z
.enum(["add", "subtract", "multiply", "divide"])
.describe("The type of operation to execute"),
number1: z.number().describe("The first number to operate on."),
number2: z.number().describe("The second number to operate on."),
});
const model = new ChatAnthropicTools({
model: "claude-3-sonnet-20240229",
temperature: 0.1,
});
// Pass the schema and tool name to the withStructuredOutput method
const modelWithTool = model.withStructuredOutput(calculatorSchema);
// You can also set force: false to allow the model scratchpad space.
// This may improve reasoning capabilities.
// const modelWithTool = model.withStructuredOutput(calculatorSchema, {
// force: false,
// });
const prompt = ChatPromptTemplate.fromMessages([
[
"system",
"You are a helpful assistant who always needs to use a calculator.",
],
["human", "{input}"],
]);
// Chain your prompt and model together
const chain = prompt.pipe(modelWithTool);
const response = await chain.invoke({
input: "What is 2 + 2?",
});
console.log(response);
/*
{ operation: 'add', number1: 2, number2: 2 }
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_bedrock_multimodal.ts | import * as fs from "node:fs/promises";
import { BedrockChat } from "@langchain/community/chat_models/bedrock";
// Or, from web environments:
// import { BedrockChat } from "@langchain/community/chat_models/bedrock/web";
import { HumanMessage } from "@langchain/core/messages";
// If no credentials are provided, the default credentials from
// @aws-sdk/credential-provider-node will be used.
// modelKwargs are additional parameters passed to the model when it
// is invoked.
const model = new BedrockChat({
model: "anthropic.claude-3-sonnet-20240229-v1:0",
region: "us-east-1",
// endpointUrl: "custom.amazonaws.com",
// credentials: {
// accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
// secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,
// },
// modelKwargs: {
// anthropic_version: "bedrock-2023-05-31",
// },
});
const imageData = await fs.readFile("./hotdog.jpg");
const res = await model.invoke([
new HumanMessage({
content: [
{
type: "text",
text: "What's in this image?",
},
{
type: "image_url",
image_url: {
url: `data:image/jpeg;base64,${imageData.toString("base64")}`,
},
},
],
}),
]);
console.log(res);
/*
AIMessage {
content: 'The image shows a hot dog or frankfurter. It has a reddish-pink sausage filling encased in a light brown bread-like bun. The hot dog bun is split open, revealing the sausage inside. This classic fast food item is a popular snack or meal, often served at events like baseball games or cookouts. The hot dog appears to be against a plain white background, allowing the details and textures of the food item to be clearly visible.',
name: undefined,
additional_kwargs: { id: 'msg_01XrLPL9vCb82U3Wrrpza18p' }
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_anthropic_tool_calling_agent.ts | import { z } from "zod";
import { ChatAnthropic } from "@langchain/anthropic";
import { tool } from "@langchain/core/tools";
import { AgentExecutor, createToolCallingAgent } from "langchain/agents";
import { ChatPromptTemplate } from "@langchain/core/prompts";
const llm = new ChatAnthropic({
model: "claude-3-sonnet-20240229",
temperature: 0,
});
// Prompt template must have "input" and "agent_scratchpad input variables"
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["placeholder", "{chat_history}"],
["human", "{input}"],
["placeholder", "{agent_scratchpad}"],
]);
const currentWeatherTool = tool(async () => "28 °C", {
name: "get_current_weather",
description: "Get the current weather in a given location",
schema: z.object({
location: z.string().describe("The city and state, e.g. San Francisco, CA"),
}),
});
const agent = await createToolCallingAgent({
llm,
tools: [currentWeatherTool],
prompt,
});
const agentExecutor = new AgentExecutor({
agent,
tools: [currentWeatherTool],
});
const input = "What's the weather like in SF?";
const { output } = await agentExecutor.invoke({ input });
console.log(output);
/*
The current weather in San Francisco, CA is 28°C.
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_openai_generation_info.ts | import { ChatOpenAI } from "@langchain/openai";
import { HumanMessage } from "@langchain/core/messages";
// See https://cookbook.openai.com/examples/using_logprobs for details
const model = new ChatOpenAI({
logprobs: true,
// topLogprobs: 5,
});
const generations = await model.invoke([new HumanMessage("Hi there!")]);
console.log(JSON.stringify(generations, null, 2));
/*
{
"generations": [
[
{
"text": "Hello! How can I assist you today?",
"message": {
"lc": 1,
"type": "constructor",
"id": [
"langchain_core",
"messages",
"AIMessage"
],
"kwargs": {
"content": "Hello! How can I assist you today?",
"additional_kwargs": {}
}
},
"generationInfo": {
"finish_reason": "stop",
"logprobs": {
"content": [
{
"token": "Hello",
"logprob": -0.0011337858,
"bytes": [
72,
101,
108,
108,
111
],
"top_logprobs": []
},
{
"token": "!",
"logprob": -0.00044127836,
"bytes": [
33
],
"top_logprobs": []
},
{
"token": " How",
"logprob": -0.000065994034,
"bytes": [
32,
72,
111,
119
],
"top_logprobs": []
},
...
]
}
}
}
]
],
"llmOutput": {
"tokenUsage": {
"completionTokens": 9,
"promptTokens": 10,
"totalTokens": 19
}
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_anthropic_multi_tool.ts | import { ChatAnthropic } from "@langchain/anthropic";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { z } from "zod";
import { zodToJsonSchema } from "zod-to-json-schema";
const calculatorSchema = z.object({
operation: z
.enum(["add", "subtract", "multiply", "divide", "average"])
.describe("The type of operation to execute."),
numbers: z.array(z.number()).describe("The numbers to operate on."),
});
const weatherSchema = z
.object({
location: z.string().describe("The name of city to get the weather for."),
})
.describe(
"Get the weather of a specific location and return the temperature in Celsius."
);
const tools = [
{
name: "calculator",
description: "A simple calculator tool.",
input_schema: zodToJsonSchema(calculatorSchema),
},
{
name: "get_weather",
description: "Get the weather of a location",
input_schema: zodToJsonSchema(weatherSchema),
},
];
const model = new ChatAnthropic({
apiKey: process.env.ANTHROPIC_API_KEY,
model: "claude-3-opus-20240229",
}).bind({
tools,
});
const prompt = ChatPromptTemplate.fromMessages([
[
"system",
"You are a helpful assistant who always uses tools to ensure you provide accurate, up to date information.",
],
["human", "{input}"],
]);
// Chain your prompt and model together
const chain = prompt.pipe(model);
const response = await chain.invoke({
input:
"What is the current weather in new york, and san francisco? Also, what is the average of these numbers: 2273,7192,272,92737?",
});
console.log(JSON.stringify(response, null, 2));
/*
{
"kwargs": {
"content": "<thinking>\nTo answer this query, there are two relevant tools:\n\n1. get_weather - This can be used to get the current weather for New York and San Francisco. It requires a \"location\" parameter. Since the user provided \"new york\" and \"san francisco\" as locations, we have the necessary information to call this tool twice - once for each city.\n\n2. calculator - This can be used to calculate the average of the provided numbers. It requires a \"numbers\" parameter which is an array of numbers, and an \"operation\" parameter. The user provided the numbers \"2273,7192,272,92737\" which we can split into an array, and they asked for the \"average\", so we have the necessary information to call this tool.\n\nSince we have the required parameters for both relevant tools, we can proceed with the function calls.\n</thinking>",
"additional_kwargs": {
"id": "msg_013AgVS83LU6fWRHbykfvbYS",
"type": "message",
"role": "assistant",
"model": "claude-3-opus-20240229",
"stop_reason": "tool_use",
"usage": {
"input_tokens": 714,
"output_tokens": 336
},
"tool_calls": [
{
"id": "toolu_01NHY2v7kZx8WqAvGzBuCu4h",
"type": "function",
"function": {
"arguments": "{\"location\":\"new york\"}",
"name": "get_weather"
}
},
{
"id": "toolu_01PVCofvgkbnD4NfWfvXdsPC",
"type": "function",
"function": {
"arguments": "{\"location\":\"san francisco\"}",
"name": "get_weather"
}
},
{
"id": "toolu_019AVVNUyCYnvsVdpkGKVDdv",
"type": "function",
"function": {
"arguments": "{\"operation\":\"average\",\"numbers\":[2273,7192,272,92737]}",
"name": "calculator"
}
}
]
},
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_groq_tool_calls.ts | import { ChatGroq } from "@langchain/groq";
// Mocked out function, could be a database/API call in production
function getCurrentWeather(location: string, _unit?: string) {
if (location.toLowerCase().includes("tokyo")) {
return JSON.stringify({ location, temperature: "10", unit: "celsius" });
} else if (location.toLowerCase().includes("san francisco")) {
return JSON.stringify({
location,
temperature: "72",
unit: "fahrenheit",
});
} else {
return JSON.stringify({ location, temperature: "22", unit: "celsius" });
}
}
// Bind function to the model as a tool
const chat = new ChatGroq({
model: "mixtral-8x7b-32768",
maxTokens: 128,
}).bind({
tools: [
{
type: "function",
function: {
name: "get_current_weather",
description: "Get the current weather in a given location",
parameters: {
type: "object",
properties: {
location: {
type: "string",
description: "The city and state, e.g. San Francisco, CA",
},
unit: { type: "string", enum: ["celsius", "fahrenheit"] },
},
required: ["location"],
},
},
},
],
tool_choice: "auto",
});
const res = await chat.invoke([
["human", "What's the weather like in San Francisco?"],
]);
console.log(res.additional_kwargs.tool_calls);
/*
[
{
id: 'call_01htk055jpftwbb9tvphyf9bnf',
type: 'function',
function: {
name: 'get_current_weather',
arguments: '{"location":"San Francisco, CA"}'
}
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/minimax_glyph.ts | import { ChatMinimax } from "@langchain/community/chat_models/minimax";
import {
ChatPromptTemplate,
HumanMessagePromptTemplate,
} from "@langchain/core/prompts";
import { HumanMessage } from "@langchain/core/messages";
const model = new ChatMinimax({
model: "abab5.5-chat",
botSetting: [
{
bot_name: "MM Assistant",
content: "MM Assistant is an AI Assistant developed by minimax.",
},
],
}).bind({
replyConstraints: {
sender_type: "BOT",
sender_name: "MM Assistant",
glyph: {
type: "raw",
raw_glyph: "The translated text:{{gen 'content'}}",
},
},
});
const messagesTemplate = ChatPromptTemplate.fromMessages([
HumanMessagePromptTemplate.fromTemplate(
" Please help me translate the following sentence in English: {text}"
),
]);
const messages = await messagesTemplate.formatMessages({ text: "我是谁" });
const result = await model.invoke(messages);
console.log(result);
/*
AIMessage {
lc_serializable: true,
lc_kwargs: {
content: 'The translated text: Who am I\x02',
additional_kwargs: { function_call: undefined }
},
lc_namespace: [ 'langchain', 'schema' ],
content: 'The translated text: Who am I\x02',
name: undefined,
additional_kwargs: { function_call: undefined }
}
*/
// use json_value
const modelMinimax = new ChatMinimax({
model: "abab5.5-chat",
botSetting: [
{
bot_name: "MM Assistant",
content: "MM Assistant is an AI Assistant developed by minimax.",
},
],
}).bind({
replyConstraints: {
sender_type: "BOT",
sender_name: "MM Assistant",
glyph: {
type: "json_value",
json_properties: {
name: {
type: "string",
},
age: {
type: "number",
},
is_student: {
type: "boolean",
},
is_boy: {
type: "boolean",
},
courses: {
type: "object",
properties: {
name: {
type: "string",
},
score: {
type: "number",
},
},
},
},
},
},
});
const result2 = await modelMinimax.invoke([
new HumanMessage({
content:
" My name is Yue Wushuang, 18 years old this year, just finished the test with 99.99 points.",
name: "XiaoMing",
}),
]);
console.log(result2);
/*
AIMessage {
lc_serializable: true,
lc_kwargs: {
content: '{\n' +
' "name": "Yue Wushuang",\n' +
' "is_student": true,\n' +
' "is_boy": false,\n' +
' "courses": {\n' +
' "name": "Mathematics",\n' +
' "score": 99.99\n' +
' },\n' +
' "age": 18\n' +
' }',
additional_kwargs: { function_call: undefined }
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_llama_cpp_system.ts | import { ChatLlamaCpp } from "@langchain/community/chat_models/llama_cpp";
import { SystemMessage, HumanMessage } from "@langchain/core/messages";
const llamaPath = "/Replace/with/path/to/your/model/gguf-llama2-q4_0.bin";
const model = await ChatLlamaCpp.initialize({ modelPath: llamaPath });
const response = await model.invoke([
new SystemMessage(
"You are a pirate, responses must be very verbose and in pirate dialect, add 'Arr, m'hearty!' to each sentence."
),
new HumanMessage("Tell me where Llamas come from?"),
]);
console.log({ response });
/*
AIMessage {
lc_serializable: true,
lc_kwargs: {
content: "Arr, m'hearty! Llamas come from the land of Peru.",
additional_kwargs: {}
},
lc_namespace: [ 'langchain', 'schema' ],
content: "Arr, m'hearty! Llamas come from the land of Peru.",
name: undefined,
additional_kwargs: {}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_openai_vision.ts | import * as fs from "node:fs/promises";
import { ChatOpenAI } from "@langchain/openai";
import { HumanMessage } from "@langchain/core/messages";
const imageData = await fs.readFile("./hotdog.jpg");
const chat = new ChatOpenAI({
model: "gpt-4-vision-preview",
maxTokens: 1024,
});
const message = new HumanMessage({
content: [
{
type: "text",
text: "What's in this image?",
},
{
type: "image_url",
image_url: {
url: `data:image/jpeg;base64,${imageData.toString("base64")}`,
},
},
],
});
const res = await chat.invoke([message]);
console.log({ res });
/*
{
res: AIMessage {
content: 'The image shows a hot dog, which consists of a grilled or steamed sausage served in the slit of a partially sliced bun. This particular hot dog appears to be plain, without any visible toppings or condiments.',
additional_kwargs: { function_call: undefined }
}
}
*/
const hostedImageMessage = new HumanMessage({
content: [
{
type: "text",
text: "What does this image say?",
},
{
type: "image_url",
image_url:
"https://www.freecodecamp.org/news/content/images/2023/05/Screenshot-2023-05-29-at-5.40.38-PM.png",
},
],
});
const res2 = await chat.invoke([hostedImageMessage]);
console.log({ res2 });
/*
{
res2: AIMessage {
content: 'The image contains the text "LangChain" with a graphical depiction of a parrot on the left and two interlocked rings on the left side of the text.',
additional_kwargs: { function_call: undefined }
}
}
*/
const lowDetailImage = new HumanMessage({
content: [
{
type: "text",
text: "Summarize the contents of this image.",
},
{
type: "image_url",
image_url: {
url: "https://blog.langchain.dev/content/images/size/w1248/format/webp/2023/10/Screenshot-2023-10-03-at-4.55.29-PM.png",
detail: "low",
},
},
],
});
const res3 = await chat.invoke([lowDetailImage]);
console.log({ res3 });
/*
{
res3: AIMessage {
content: 'The image shows a user interface for a service named "WebLangChain," which appears to be powered by "Twalv." It includes a text box with the prompt "Ask me anything about anything!" suggesting that users can enter questions on various topics. Below the text box, there are example questions that users might ask, such as "what is langchain?", "history of mesopotamia," "how to build a discord bot," "leonardo dicaprio girlfriend," "fun gift ideas for software engineers," "how does a prism separate light," and "what beer is best." The interface also includes a round blue button with a paper plane icon, presumably to submit the question. The overall theme of the image is dark with blue accents.',
additional_kwargs: { function_call: undefined }
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/chat_stream_groq.ts | import { ChatGroq } from "@langchain/groq";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { StringOutputParser } from "@langchain/core/output_parsers";
const model = new ChatGroq({
apiKey: process.env.GROQ_API_KEY,
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["human", "{input}"],
]);
const outputParser = new StringOutputParser();
const chain = prompt.pipe(model).pipe(outputParser);
const response = await chain.stream({
input: "Hello",
});
let res = "";
for await (const item of response) {
res += item;
console.log("stream:", res);
}
/**
stream: Hello
stream: Hello!
stream: Hello! I
stream: Hello! I'
stream: Hello! I'm
stream: Hello! I'm happy
stream: Hello! I'm happy to
stream: Hello! I'm happy to assist
stream: Hello! I'm happy to assist you
stream: Hello! I'm happy to assist you in
stream: Hello! I'm happy to assist you in any
stream: Hello! I'm happy to assist you in any way
stream: Hello! I'm happy to assist you in any way I
stream: Hello! I'm happy to assist you in any way I can
stream: Hello! I'm happy to assist you in any way I can.
stream: Hello! I'm happy to assist you in any way I can. Is
stream: Hello! I'm happy to assist you in any way I can. Is there
stream: Hello! I'm happy to assist you in any way I can. Is there something
stream: Hello! I'm happy to assist you in any way I can. Is there something specific
stream: Hello! I'm happy to assist you in any way I can. Is there something specific you
stream: Hello! I'm happy to assist you in any way I can. Is there something specific you need
stream: Hello! I'm happy to assist you in any way I can. Is there something specific you need help
stream: Hello! I'm happy to assist you in any way I can. Is there something specific you need help with
stream: Hello! I'm happy to assist you in any way I can. Is there something specific you need help with or
stream: Hello! I'm happy to assist you in any way I can. Is there something specific you need help with or a
stream: Hello! I'm happy to assist you in any way I can. Is there something specific you need help with or a question
stream: Hello! I'm happy to assist you in any way I can. Is there something specific you need help with or a question you
stream: Hello! I'm happy to assist you in any way I can. Is there something specific you need help with or a question you have
stream: Hello! I'm happy to assist you in any way I can. Is there something specific you need help with or a question you have?
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/chat_streaming_stdout.ts | import { ChatOpenAI } from "@langchain/openai";
import { HumanMessage } from "@langchain/core/messages";
const chat = new ChatOpenAI({
streaming: true,
callbacks: [
{
handleLLMNewToken(token: string) {
process.stdout.write(token);
},
},
],
});
await chat.invoke([new HumanMessage("Write me a song about sparkling water.")]);
/*
Verse 1:
Bubbles rise, crisp and clear
Refreshing taste that brings us cheer
Sparkling water, so light and pure
Quenches our thirst, it's always secure
Chorus:
Sparkling water, oh how we love
Its fizzy bubbles and grace above
It's the perfect drink, anytime, anyplace
Refreshing as it gives us a taste
Verse 2:
From morning brunch to evening feast
It's the perfect drink for a treat
A sip of it brings a smile so bright
Our thirst is quenched in just one sip so light
...
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/minimax_plugins.ts | import { ChatMinimax } from "@langchain/community/chat_models/minimax";
import { HumanMessage } from "@langchain/core/messages";
const model = new ChatMinimax({
model: "abab5.5-chat",
botSetting: [
{
bot_name: "MM Assistant",
content: "MM Assistant is an AI Assistant developed by minimax.",
},
],
}).bind({
plugins: ["plugin_web_search"],
});
const result = await model.invoke([
new HumanMessage({
content: " What is the weather like in NewYork tomorrow?",
}),
]);
console.log(result);
/*
AIMessage {
lc_serializable: true,
lc_kwargs: {
content: 'The weather in Shanghai tomorrow is expected to be hot. Please note that this is just a forecast and the actual weather conditions may vary.',
additional_kwargs: { function_call: undefined }
},
lc_namespace: [ 'langchain', 'schema' ],
content: 'The weather in Shanghai tomorrow is expected to be hot. Please note that this is just a forecast and the actual weather conditions may vary.',
name: undefined,
additional_kwargs: { function_call: undefined }
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/token_usage_tracking_anthropic.ts | import { ChatAnthropic } from "@langchain/anthropic";
const chatModel = new ChatAnthropic({
model: "claude-3-sonnet-20240229",
});
const res = await chatModel.invoke("Tell me a joke.");
console.log(res.response_metadata);
/*
{
id: 'msg_017Mgz6HdgNbi3cwL1LNB9Dw',
model: 'claude-3-sonnet-20240229',
stop_sequence: null,
usage: { input_tokens: 12, output_tokens: 30 },
stop_reason: 'end_turn'
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_openai_fine_tune.ts | import { ChatOpenAI } from "@langchain/openai";
const model = new ChatOpenAI({
temperature: 0.9,
model: "ft:gpt-3.5-turbo-0613:{ORG_NAME}::{MODEL_ID}",
});
const message = await model.invoke("Hi there!");
console.log(message);
/*
AIMessage {
content: 'Hello! How can I assist you today?',
additional_kwargs: { function_call: undefined }
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_bedrock.ts | import { BedrockChat } from "@langchain/community/chat_models/bedrock";
// Or, from web environments:
// import { BedrockChat } from "@langchain/community/chat_models/bedrock/web";
import { HumanMessage } from "@langchain/core/messages";
// If no credentials are provided, the default credentials from
// @aws-sdk/credential-provider-node will be used.
// modelKwargs are additional parameters passed to the model when it
// is invoked.
const model = new BedrockChat({
model: "anthropic.claude-3-sonnet-20240229-v1:0",
region: "us-east-1",
// endpointUrl: "custom.amazonaws.com",
// credentials: {
// accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
// secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,
// },
// modelKwargs: {
// anthropic_version: "bedrock-2023-05-31",
// },
});
// Other model names include:
// "mistral.mistral-7b-instruct-v0:2"
// "mistral.mixtral-8x7b-instruct-v0:1"
//
// For a full list, see the Bedrock page in AWS.
const res = await model.invoke([
new HumanMessage({ content: "Tell me a joke" }),
]);
console.log(res);
/*
AIMessage {
content: "Here's a silly joke for you:\n" +
'\n' +
"Why can't a bicycle stand up by itself?\n" +
"Because it's two-tired!",
name: undefined,
additional_kwargs: { id: 'msg_01NYN7Rf39k4cgurqpZWYyDh' }
}
*/
const stream = await model.stream([
new HumanMessage({ content: "Tell me a joke" }),
]);
for await (const chunk of stream) {
console.log(chunk.content);
}
/*
Here
's
a
silly
joke
for
you
:
Why
can
't
a
bicycle
stand
up
by
itself
?
Because
it
's
two
-
tired
!
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_baiduwenxin.ts | import { ChatBaiduWenxin } from "@langchain/community/chat_models/baiduwenxin";
import { HumanMessage } from "@langchain/core/messages";
// Default model is ERNIE-Bot-turbo
const ernieTurbo = new ChatBaiduWenxin({
baiduApiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.BAIDU_API_KEY
baiduSecretKey: "YOUR-SECRET-KEY", // In Node.js defaults to process.env.BAIDU_SECRET_KEY
});
// Use ERNIE-Bot
const ernie = new ChatBaiduWenxin({
model: "ERNIE-Bot", // Available models are shown above
temperature: 1,
baiduApiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.BAIDU_API_KEY
baiduSecretKey: "YOUR-SECRET-KEY", // In Node.js defaults to process.env.BAIDU_SECRET_KEY
});
const messages = [new HumanMessage("Hello")];
let res = await ernieTurbo.invoke(messages);
/*
AIChatMessage {
text: 'Hello! How may I assist you today?',
name: undefined,
additional_kwargs: {}
}
}
*/
res = await ernie.invoke(messages);
/*
AIChatMessage {
text: 'Hello! How may I assist you today?',
name: undefined,
additional_kwargs: {}
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/chat_stream_baidu_qianfan.ts | import { ChatBaiduQianfan } from "@langchain/baidu-qianfan";
import { HumanMessage } from "@langchain/core/messages";
const chat = new ChatBaiduQianfan({
qianfanAccessKey: process.env.QIANFAN_ACCESS_KEY,
qianfanSecretKey: process.env.QIANFAN_SECRET_KEY,
model: "ERNIE-Lite-8K",
streaming: true,
});
const message = new HumanMessage("等额本金和等额本息有什么区别?");
const res = await chat.invoke([message]);
console.log({ res });
/**
{
res: AIMessage {
lc_serializable: true,
lc_kwargs: {
content: 'undefined等额本金和等额本息是两种常见的贷款还款方式,它们之间的主要区别在于计息方式、每月还款额和利息支出等方面。\n' +
'\n' +
'1. 计息方式:等额本金是一种按月递减的计息方式,每月偿还相同数额的本金和剩余贷款在该月产生的利息。而等额本息则是每月偿还相同金额的利息,根据贷款金额和贷款期限计算月供,本金和利息在每月还款中占的比例逐月变化。\n' +
'2. 每月还款额:由于等额本息每月偿还的利息占每月还款总额的比例逐渐减少,导致每月还款额逐渐增加,而等额本金每月偿还的本金相同,因此每月还款额逐渐减少。\n' +
'3. 利息支出:在贷款期限相同的情况下,等额本金的利息支出相对较少,因为随着本金的减少,剩余贷款产生的利息也相应减少。而等额本息的利息支出则相对较高,因为每月偿还的利息逐渐减少,导致总利息支出相对较高。\n' +
'\n' +
'总之,等额本金和等额本息在贷款期限相同的情况下,等额本金由于利息支出相对较少,更适合于资金充裕、有提前还款打算的借款人;而等额本息每月还款额固定,更适合于每月收入较高的借款人。',
tool_calls: [],
invalid_tool_calls: [],
additional_kwargs: {},
response_metadata: {}
},
lc_namespace: [ 'langchain_core', 'messages' ],
content: 'undefined等额本金和等额本息是两种常见的贷款还款方式,它们之间的主要区别在于计息方式、每月还款额和利息支出等方面。\n' +
'\n' +
'1. 计息方式:等额本金是一种按月递减的计息方式,每月偿还相同数额的本金和剩余贷款在该月产生的利息。而等额本息则是每月偿还相同金额的利息,根据贷款金额和贷款期限计算月供,本金和利息在每月还款中占的比例逐月变化。\n' +
'2. 每月还款额:由于等额本息每月偿还的利息占每月还款总额的比例逐渐减少,导致每月还款额逐渐增加,而等额本金每月偿还的本金相同,因此每月还款额逐渐减少。\n' +
'3. 利息支出:在贷款期限相同的情况下,等额本金的利息支出相对较少,因为随着本金的减少,剩余贷款产生的利息也相应减少。而等额本息的利息支出则相对较高,因为每月偿还的利息逐渐减少,导致总利息支出相对较高。\n' +
'\n' +
'总之,等额本金和等额本息在贷款期限相同的情况下,等额本金由于利息支出相对较少,更适合于资金充裕、有提前还款打算的借款人;而等额本息每月还款额固定,更适合于每月收入较高的借款人。',
name: undefined,
additional_kwargs: {},
response_metadata: { tokenUsage: [Object] },
tool_calls: [],
invalid_tool_calls: []
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/token_usage_tracking_callback.ts | import { ChatOpenAI } from "@langchain/openai";
const chatModel = new ChatOpenAI({
model: "gpt-4-turbo",
callbacks: [
{
handleLLMEnd(output) {
console.log(JSON.stringify(output, null, 2));
},
},
],
});
await chatModel.invoke("Tell me a joke.");
/*
{
"generations": [
[
{
"text": "Why did the scarecrow win an award?\n\nBecause he was outstanding in his field!",
"message": {
"lc": 1,
"type": "constructor",
"id": [
"langchain_core",
"messages",
"AIMessage"
],
"kwargs": {
"content": "Why did the scarecrow win an award?\n\nBecause he was outstanding in his field!",
"tool_calls": [],
"invalid_tool_calls": [],
"additional_kwargs": {},
"response_metadata": {
"tokenUsage": {
"completionTokens": 17,
"promptTokens": 12,
"totalTokens": 29
},
"finish_reason": "stop"
}
}
},
"generationInfo": {
"finish_reason": "stop"
}
}
]
],
"llmOutput": {
"tokenUsage": {
"completionTokens": 17,
"promptTokens": 12,
"totalTokens": 29
}
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_minimax.ts | import { ChatMinimax } from "@langchain/community/chat_models/minimax";
import { HumanMessage } from "@langchain/core/messages";
// Use abab5.5
const abab5_5 = new ChatMinimax({
model: "abab5.5-chat",
botSetting: [
{
bot_name: "MM Assistant",
content: "MM Assistant is an AI Assistant developed by minimax.",
},
],
});
const messages = [
new HumanMessage({
content: "Hello",
}),
];
const res = await abab5_5.invoke(messages);
console.log(res);
/*
AIChatMessage {
text: 'Hello! How may I assist you today?',
name: undefined,
additional_kwargs: {}
}
}
*/
// use abab5
const abab5 = new ChatMinimax({
proVersion: false,
model: "abab5-chat",
minimaxGroupId: process.env.MINIMAX_GROUP_ID, // In Node.js defaults to process.env.MINIMAX_GROUP_ID
minimaxApiKey: process.env.MINIMAX_API_KEY, // In Node.js defaults to process.env.MINIMAX_API_KEY
});
const result = await abab5.invoke([
new HumanMessage({
content: "Hello",
name: "XiaoMing",
}),
]);
console.log(result);
/*
AIMessage {
lc_serializable: true,
lc_kwargs: {
content: 'Hello! Can I help you with anything?',
additional_kwargs: { function_call: undefined }
},
lc_namespace: [ 'langchain', 'schema' ],
content: 'Hello! Can I help you with anything?',
name: undefined,
additional_kwargs: { function_call: undefined }
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_bedrock_tools_converse.ts | import { ChatBedrockConverse } from "@langchain/aws";
import { tool } from "@langchain/core/tools";
import { z } from "zod";
const model = new ChatBedrockConverse({
model: "anthropic.claude-3-sonnet-20240229-v1:0",
region: "us-east-1",
credentials: {
accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,
},
});
const weatherTool = tool(
({ city, state }) => `The weather in ${city}, ${state} is 72°F and sunny`,
{
name: "weather_tool",
description: "Get the weather for a city",
schema: z.object({
city: z.string().describe("The city to get the weather for"),
state: z.string().describe("The state to get the weather for").optional(),
}),
}
);
const modelWithTools = model.bindTools([weatherTool]);
// Optionally, you can bind tools via the `.bind` method:
// const modelWithTools = model.bind({
// tools: [weatherTool]
// });
const res = await modelWithTools.invoke("What's the weather in New York?");
console.log(res);
/*
AIMessage {
content: [
{
type: 'text',
text: "Okay, let's get the weather for New York City."
}
],
response_metadata: { ... },
id: '49a97da0-e971-4d7f-9f04-2495e068c15e',
tool_calls: [
{
id: 'tooluse_O6Q1Ghm7SmKA9mn2ZKmBzg',
name: 'weather_tool',
args: {
'city': 'New York',
},
],
usage_metadata: { input_tokens: 289, output_tokens: 68, total_tokens: 357 }
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_openai_parallel_tool_calls.ts | import { ChatOpenAI } from "@langchain/openai";
import { z } from "zod";
import { zodToJsonSchema } from "zod-to-json-schema";
const model = new ChatOpenAI({
temperature: 0,
model: "gpt-4o",
});
// Define your tools
const calculatorSchema = z
.object({
operation: z.enum(["add", "subtract", "multiply", "divide"]),
number1: z.number(),
number2: z.number(),
})
.describe("A tool to perform basic arithmetic operations");
const weatherSchema = z
.object({
city: z.string(),
})
.describe("A tool to get the weather in a city");
// Bind tools to the model
const modelWithTools = model.bindTools([
{
type: "function",
function: {
name: "calculator",
description: calculatorSchema.description,
parameters: zodToJsonSchema(calculatorSchema),
},
},
{
type: "function",
function: {
name: "weather",
description: weatherSchema.description,
parameters: zodToJsonSchema(weatherSchema),
},
},
]);
// Invoke the model with `parallel_tool_calls` set to `true`
const response = await modelWithTools.invoke(
["What is the weather in san francisco and what is 23716 times 27342?"],
{
parallel_tool_calls: true,
}
);
console.log(response.tool_calls);
// We can see it called two tools
/*
[
{
name: 'weather',
args: { city: 'san francisco' },
id: 'call_c1KymEIix7mdlFtgLSnTXmDc'
},
{
name: 'calculator',
args: { operation: 'multiply', number1: 23716, number2: 27342 },
id: 'call_ANLYclAmXQ4TwUCLXakbPr3Z'
}
]
*/
// Invoke the model with `parallel_tool_calls` set to `false`
const response2 = await modelWithTools.invoke(
["What is the weather in san francisco and what is 23716 times 27342?"],
{
parallel_tool_calls: false,
}
);
console.log(response2.tool_calls);
// We can see it called one tool
/*
[
{
name: 'weather',
args: { city: 'san francisco' },
id: 'call_Rk34XffawJjgZ2BCK9E4CwlT'
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_webllm.ts | // Must be run in a web environment, e.g. a web worker
import { ChatWebLLM } from "@langchain/community/chat_models/webllm";
import { HumanMessage } from "@langchain/core/messages";
// Initialize the ChatWebLLM model with the model record and chat options.
// Note that if the appConfig field is set, the list of model records
// must include the selected model record for the engine.
// You can import a list of models available by default here:
// https://github.com/mlc-ai/web-llm/blob/main/src/config.ts
//
// Or by importing it via:
// import { prebuiltAppConfig } from "@mlc-ai/web-llm";
const model = new ChatWebLLM({
model: "Phi-3-mini-4k-instruct-q4f16_1-MLC",
chatOptions: {
temperature: 0.5,
},
});
await model.initialize((progress: Record<string, unknown>) => {
console.log(progress);
});
// Call the model with a message and await the response.
const response = await model.invoke([
new HumanMessage({ content: "What is 1 + 1?" }),
]);
console.log(response);
/*
AIMessage {
content: ' 2\n',
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_ollama_tools.ts | import { tool } from "@langchain/core/tools";
import { ChatOllama } from "@langchain/ollama";
import { z } from "zod";
const weatherTool = tool((_) => "Da weather is weatherin", {
name: "get_current_weather",
description: "Get the current weather in a given location",
schema: z.object({
location: z.string().describe("The city and state, e.g. San Francisco, CA"),
}),
});
// Define the model
const model = new ChatOllama({
model: "llama3-groq-tool-use",
});
// Bind the tool to the model
const modelWithTools = model.bindTools([weatherTool]);
const result = await modelWithTools.invoke(
"What's the weather like today in San Francisco? Ensure you use the 'get_current_weather' tool."
);
console.log(result);
/*
AIMessage {
"content": "",
"tool_calls": [
{
"name": "get_current_weather",
"args": {
"location": "San Francisco, CA"
},
"type": "tool_call"
}
],
"usage_metadata": {
"input_tokens": 177,
"output_tokens": 30,
"total_tokens": 207
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_ollama_json_mode.ts | import { ChatOllama } from "@langchain/ollama";
import { ChatPromptTemplate } from "@langchain/core/prompts";
const prompt = ChatPromptTemplate.fromMessages([
[
"system",
`You are an expert translator. Format all responses as JSON objects with two keys: "original" and "translated".`,
],
["human", `Translate "{input}" into {language}.`],
]);
const model = new ChatOllama({
baseUrl: "http://localhost:11434", // Default value
model: "llama3",
format: "json",
});
const chain = prompt.pipe(model);
const result = await chain.invoke({
input: "I love programming",
language: "German",
});
console.log(result);
/*
AIMessage {
"content": "{\n\"original\": \"I love programming\",\n\"translated\": \"Ich liebe Programmieren\"\n}",
"response_metadata": { ... },
"usage_metadata": {
"input_tokens": 47,
"output_tokens": 20,
"total_tokens": 67
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_bedrock_wso.ts | import { BedrockChat } from "@langchain/community/chat_models/bedrock";
// Or, from web environments:
// import { BedrockChat } from "@langchain/community/chat_models/bedrock/web";
import { z } from "zod";
const model = new BedrockChat({
region: process.env.BEDROCK_AWS_REGION,
model: "anthropic.claude-3-sonnet-20240229-v1:0",
maxRetries: 0,
credentials: {
secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,
accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
},
});
const weatherSchema = z
.object({
city: z.string().describe("The city to get the weather for"),
state: z.string().describe("The state to get the weather for").optional(),
})
.describe("Get the weather for a city");
const modelWithStructuredOutput = model.withStructuredOutput(weatherSchema, {
name: "weather_tool", // Optional, defaults to 'extract'
});
const res = await modelWithStructuredOutput.invoke(
"What's the weather in New York?"
);
console.log(res);
/*
{ city: 'New York', state: 'NY' }
*/
|
0 | lc_public_repos/langchainjs/examples/src/models/chat | lc_public_repos/langchainjs/examples/src/models/chat/anthropic_tools/extraction.ts | import { z } from "zod";
import { zodToJsonSchema } from "zod-to-json-schema";
import { ChatAnthropicTools } from "@langchain/anthropic/experimental";
import { PromptTemplate } from "@langchain/core/prompts";
import { JsonOutputToolsParser } from "@langchain/core/output_parsers/openai_tools";
const EXTRACTION_TEMPLATE = `Extract and save the relevant entities mentioned in the following passage together with their properties.
Passage:
{input}
`;
const prompt = PromptTemplate.fromTemplate(EXTRACTION_TEMPLATE);
// Use Zod for easier schema declaration
const schema = z.object({
name: z.string().describe("The name of a person"),
height: z.number().describe("The person's height"),
hairColor: z.optional(z.string()).describe("The person's hair color"),
});
const model = new ChatAnthropicTools({
temperature: 0.1,
model: "claude-3-sonnet-20240229",
}).bind({
tools: [
{
type: "function",
function: {
name: "person",
description: "Extracts the relevant people from the passage.",
parameters: zodToJsonSchema(schema),
},
},
],
// Can also set to "auto" to let the model choose a tool
tool_choice: {
type: "function",
function: {
name: "person",
},
},
});
// Use a JsonOutputToolsParser to get the parsed JSON response directly.
const chain = await prompt.pipe(model).pipe(new JsonOutputToolsParser());
const response = await chain.invoke({
input:
"Alex is 5 feet tall. Claudia is 1 foot taller than Alex and jumps higher than him. Claudia is a brunette and Alex is blonde.",
});
console.log(JSON.stringify(response, null, 2));
/*
[
{
"type": "person",
"args": {
"name": "Alex",
"height": 5,
"hairColor": "blonde"
}
},
{
"type": "person",
"args": {
"name": "Claudia",
"height": 6,
"hairColor": "brunette"
}
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src/models/chat | lc_public_repos/langchainjs/examples/src/models/chat/anthropic_tools/tool_calling.ts | import { ChatAnthropicTools } from "@langchain/anthropic/experimental";
import { HumanMessage } from "@langchain/core/messages";
const model = new ChatAnthropicTools({
temperature: 0.1,
model: "claude-3-sonnet-20240229",
}).bind({
tools: [
{
type: "function",
function: {
name: "get_current_weather",
description: "Get the current weather in a given location",
parameters: {
type: "object",
properties: {
location: {
type: "string",
description: "The city and state, e.g. San Francisco, CA",
},
unit: { type: "string", enum: ["celsius", "fahrenheit"] },
},
required: ["location"],
},
},
},
],
// You can set the `function_call` arg to force the model to use a function
tool_choice: {
type: "function",
function: {
name: "get_current_weather",
},
},
});
const response = await model.invoke([
new HumanMessage({
content: "What's the weather in Boston?",
}),
]);
console.log(response);
/*
AIMessage {
lc_serializable: true,
lc_kwargs: { content: '', additional_kwargs: { tool_calls: [Array] } },
lc_namespace: [ 'langchain_core', 'messages' ],
content: '',
name: undefined,
additional_kwargs: { tool_calls: [ [Object] ] }
}
*/
console.log(response.additional_kwargs.tool_calls);
/*
[
{
id: '0',
type: 'function',
function: {
name: 'get_current_weather',
arguments: '{"location":"Boston, MA","unit":"fahrenheit"}'
}
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src/models/chat | lc_public_repos/langchainjs/examples/src/models/chat/ollama_functions/extraction.ts | import { z } from "zod";
import { zodToJsonSchema } from "zod-to-json-schema";
import { OllamaFunctions } from "@langchain/community/experimental/chat_models/ollama_functions";
import { PromptTemplate } from "@langchain/core/prompts";
import { JsonOutputFunctionsParser } from "@langchain/core/output_parsers/openai_functions";
const EXTRACTION_TEMPLATE = `Extract and save the relevant entities mentioned in the following passage together with their properties.
Passage:
{input}
`;
const prompt = PromptTemplate.fromTemplate(EXTRACTION_TEMPLATE);
// Use Zod for easier schema declaration
const schema = z.object({
people: z.array(
z.object({
name: z.string().describe("The name of a person"),
height: z.number().describe("The person's height"),
hairColor: z.optional(z.string()).describe("The person's hair color"),
})
),
});
const model = new OllamaFunctions({
temperature: 0.1,
model: "mistral",
}).bind({
functions: [
{
name: "information_extraction",
description: "Extracts the relevant information from the passage.",
parameters: {
type: "object",
properties: zodToJsonSchema(schema),
},
},
],
function_call: {
name: "information_extraction",
},
});
// Use a JsonOutputFunctionsParser to get the parsed JSON response directly.
const chain = prompt.pipe(model).pipe(new JsonOutputFunctionsParser());
const response = await chain.invoke({
input:
"Alex is 5 feet tall. Claudia is 1 foot taller than Alex and jumps higher than him. Claudia has orange hair and Alex is blonde.",
});
console.log(JSON.stringify(response, null, 2));
/*
{
"people": [
{
"name": "Alex",
"height": 5,
"hairColor": "blonde"
},
{
"name": "Claudia",
"height": {
"$num": 1,
"add": [
{
"name": "Alex",
"prop": "height"
}
]
},
"hairColor": "orange"
}
]
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models/chat | lc_public_repos/langchainjs/examples/src/models/chat/ollama_functions/custom_prompt.ts | import { OllamaFunctions } from "@langchain/community/experimental/chat_models/ollama_functions";
import { HumanMessage } from "@langchain/core/messages";
// Custom system prompt to format tools. You must encourage the model
// to wrap output in a JSON object with "tool" and "tool_input" properties.
const toolSystemPromptTemplate = `You have access to the following tools:
{tools}
To use a tool, respond with a JSON object with the following structure:
{{
"tool": <name of the called tool>,
"tool_input": <parameters for the tool matching the above JSON schema>
}}`;
const model = new OllamaFunctions({
temperature: 0.1,
model: "mistral",
toolSystemPromptTemplate,
}).bind({
functions: [
{
name: "get_current_weather",
description: "Get the current weather in a given location",
parameters: {
type: "object",
properties: {
location: {
type: "string",
description: "The city and state, e.g. San Francisco, CA",
},
unit: { type: "string", enum: ["celsius", "fahrenheit"] },
},
required: ["location"],
},
},
],
// You can set the `function_call` arg to force the model to use a function
function_call: {
name: "get_current_weather",
},
});
const response = await model.invoke([
new HumanMessage({
content: "What's the weather in Boston?",
}),
]);
console.log(response);
/*
AIMessage {
content: '',
additional_kwargs: {
function_call: {
name: 'get_current_weather',
arguments: '{"location":"Boston, MA","unit":"fahrenheit"}'
}
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models/chat | lc_public_repos/langchainjs/examples/src/models/chat/ollama_functions/function_calling.ts | import { OllamaFunctions } from "@langchain/community/experimental/chat_models/ollama_functions";
import { HumanMessage } from "@langchain/core/messages";
const model = new OllamaFunctions({
temperature: 0.1,
model: "mistral",
}).bind({
functions: [
{
name: "get_current_weather",
description: "Get the current weather in a given location",
parameters: {
type: "object",
properties: {
location: {
type: "string",
description: "The city and state, e.g. San Francisco, CA",
},
unit: { type: "string", enum: ["celsius", "fahrenheit"] },
},
required: ["location"],
},
},
],
// You can set the `function_call` arg to force the model to use a function
function_call: {
name: "get_current_weather",
},
});
const response = await model.invoke([
new HumanMessage({
content: "What's the weather in Boston?",
}),
]);
console.log(response);
/*
AIMessage {
content: '',
additional_kwargs: {
function_call: {
name: 'get_current_weather',
arguments: '{"location":"Boston, MA","unit":"fahrenheit"}'
}
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models/chat | lc_public_repos/langchainjs/examples/src/models/chat/cohere/tool_calling.ts | import { ChatCohere } from "@langchain/cohere";
import { HumanMessage } from "@langchain/core/messages";
import { z } from "zod";
import { tool } from "@langchain/core/tools";
const model = new ChatCohere({
apiKey: process.env.COHERE_API_KEY, // Default
});
const magicFunctionTool = tool(
async ({ num }) => {
return `The magic function of ${num} is ${num + 5}`;
},
{
name: "magic_function",
description: "Apply a magic function to the input number",
schema: z.object({
num: z.number().describe("The number to apply the magic function for"),
}),
}
);
const tools = [magicFunctionTool];
const modelWithTools = model.bindTools(tools);
const messages = [new HumanMessage("What is the magic function of number 5?")];
const response = await modelWithTools.invoke(messages);
console.log(response);
/*
AIMessage {
content: 'I will use the magic_function tool to answer this question.',
name: undefined,
additional_kwargs: {
response_id: 'd0b189e5-3dbf-493c-93f8-99ed4b01d96d',
generationId: '8982a68f-c64c-48f8-bf12-0b4bea0018b6',
chatHistory: [ [Object], [Object] ],
finishReason: 'COMPLETE',
meta: { apiVersion: [Object], billedUnits: [Object], tokens: [Object] },
toolCalls: [ [Object] ]
},
response_metadata: {
estimatedTokenUsage: { completionTokens: 54, promptTokens: 920, totalTokens: 974 },
response_id: 'd0b189e5-3dbf-493c-93f8-99ed4b01d96d',
generationId: '8982a68f-c64c-48f8-bf12-0b4bea0018b6',
chatHistory: [ [Object], [Object] ],
finishReason: 'COMPLETE',
meta: { apiVersion: [Object], billedUnits: [Object], tokens: [Object] },
toolCalls: [ [Object] ]
},
tool_calls: [
{
name: 'magic_function',
args: [Object],
id: '4ec98550-ba9a-4043-adfe-566230e5'
}
],
invalid_tool_calls: [],
usage_metadata: { input_tokens: 920, output_tokens: 54, total_tokens: 974 }
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models/chat | lc_public_repos/langchainjs/examples/src/models/chat/cohere/stateful_conversation.ts | import { ChatCohere } from "@langchain/cohere";
import { HumanMessage } from "@langchain/core/messages";
const model = new ChatCohere({
apiKey: process.env.COHERE_API_KEY, // Default
});
const conversationId = `demo_test_id-${Math.random()}`;
const response = await model.invoke(
[new HumanMessage("Tell me a joke about bears.")],
{
conversationId,
}
);
console.log("response: ", response.content);
/*
response: Why did the bear go to the dentist?
Because she had bear teeth!
Hope you found that joke about bears to be a little bit tooth-arious!
Would you like me to tell you another one? I could also provide you with a list of jokes about bears if you prefer.
Just let me know if you have any other jokes or topics you'd like to hear about!
*/
const response2 = await model.invoke(
[new HumanMessage("What was the subject of my last question?")],
{
conversationId,
}
);
console.log("response2: ", response2.content);
/*
response2: Your last question was about bears. You asked me to tell you a joke about bears, which I am programmed to assist with.
Would you like me to assist you with anything else bear-related? I can provide you with facts about bears, stories about bears, or even list other topics that might be of interest to you.
Please let me know if you have any other questions and I will do my best to provide you with a response.
*/
|
0 | lc_public_repos/langchainjs/examples/src/models/chat | lc_public_repos/langchainjs/examples/src/models/chat/cohere/rag.ts | import { ChatCohere } from "@langchain/cohere";
import { HumanMessage } from "@langchain/core/messages";
const model = new ChatCohere({
apiKey: process.env.COHERE_API_KEY, // Default
});
const documents = [
{
title: "Harrison's work",
snippet: "Harrison worked at Kensho as an engineer.",
},
{
title: "Harrison's work duration",
snippet: "Harrison worked at Kensho for 3 years.",
},
{
title: "Polar berars in the Appalachian Mountains",
snippet:
"Polar bears have surprisingly adapted to the Appalachian Mountains, thriving in the diverse, forested terrain despite their traditional arctic habitat. This unique situation has sparked significant interest and study in climate adaptability and wildlife behavior.",
},
];
const response = await model.invoke(
[new HumanMessage("Where did Harrison work and for how long?")],
{
documents,
}
);
console.log("response: ", response.content);
/*
response: Harrison worked as an engineer at Kensho for about 3 years.
*/
|
0 | lc_public_repos/langchainjs/examples/src/models/chat | lc_public_repos/langchainjs/examples/src/models/chat/cohere/chat_cohere.ts | import { ChatCohere } from "@langchain/cohere";
import { ChatPromptTemplate } from "@langchain/core/prompts";
const model = new ChatCohere({
apiKey: process.env.COHERE_API_KEY, // Default
});
const prompt = ChatPromptTemplate.fromMessages([
["ai", "You are a helpful assistant"],
["human", "{input}"],
]);
const chain = prompt.pipe(model);
const response = await chain.invoke({
input: "Hello there friend!",
});
console.log("response", response);
/*
response AIMessage {
content: 'Hello there! How can I help you today?',
name: undefined,
additional_kwargs: {
response_id: '51ff9e7e-7419-43db-a8e6-17db54805695',
generationId: 'f9b507f5-5296-40c5-834c-b1c09e24a0f6',
chatHistory: [ [Object], [Object], [Object] ],
finishReason: 'COMPLETE',
meta: { apiVersion: [Object], billedUnits: [Object], tokens: [Object] }
},
response_metadata: {
estimatedTokenUsage: { completionTokens: 10, promptTokens: 78, totalTokens: 88 },
response_id: '51ff9e7e-7419-43db-a8e6-17db54805695',
generationId: 'f9b507f5-5296-40c5-834c-b1c09e24a0f6',
chatHistory: [ [Object], [Object], [Object] ],
finishReason: 'COMPLETE',
meta: { apiVersion: [Object], billedUnits: [Object], tokens: [Object] }
},
id: undefined,
tool_calls: [],
invalid_tool_calls: [],
usage_metadata: { input_tokens: 78, output_tokens: 10, total_tokens: 88 }
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models/chat | lc_public_repos/langchainjs/examples/src/models/chat/cohere/chat_stream_cohere.ts | import { ChatCohere } from "@langchain/cohere";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { StringOutputParser } from "@langchain/core/output_parsers";
const model = new ChatCohere({
apiKey: process.env.COHERE_API_KEY, // Default
});
const prompt = ChatPromptTemplate.fromMessages([
["ai", "You are a helpful assistant"],
["human", "{input}"],
]);
const outputParser = new StringOutputParser();
const chain = prompt.pipe(model).pipe(outputParser);
const response = await chain.stream({
input: "Why is the sky blue? Be concise with your answer.",
});
let streamTokens = "";
let streamIters = 0;
for await (const item of response) {
streamTokens += item;
streamIters += 1;
}
console.log("stream tokens:", streamTokens);
console.log("stream iters:", streamIters);
/*
stream item:
stream item: Hello! I'm here to help answer any questions you
stream item: might have or assist you with any task you'd like to
stream item: accomplish. I can provide information
stream item: on a wide range of topics
stream item: , from math and science to history and literature. I can
stream item: also help you manage your schedule, set reminders, and
stream item: much more. Is there something specific you need help with? Let
stream item: me know!
stream item:
*/
|
0 | lc_public_repos/langchainjs/examples/src/models/chat | lc_public_repos/langchainjs/examples/src/models/chat/cohere/connectors.ts | import { ChatCohere } from "@langchain/cohere";
import { HumanMessage } from "@langchain/core/messages";
const model = new ChatCohere({
apiKey: process.env.COHERE_API_KEY, // Default
});
const response = await model.invoke(
[new HumanMessage("How tall are the largest pengiuns?")],
{
connectors: [{ id: "web-search" }],
}
);
console.log("response: ", JSON.stringify(response, null, 2));
/*
response: {
"lc": 1,
"type": "constructor",
"id": [
"langchain_core",
"messages",
"AIMessage"
],
"kwargs": {
"content": "The tallest penguin species currently in existence is the Emperor Penguin, with a height of 110cm to the top of their head or 115cm to the tip of their beak. This is equivalent to being approximately 3 feet and 7 inches tall.\n\nA fossil of an Anthropornis penguin was found in New Zealand and is suspected to have been even taller at 1.7 metres, though this is uncertain as the fossil is only known from preserved arm and leg bones. The height of a closely related species, Kumimanu biceae, has been estimated at 1.77 metres.\n\nDid you know that because larger-bodied penguins can hold their breath for longer, the colossus penguin could have stayed underwater for 40 minutes or more?",
"additional_kwargs": {
"response_id": "a3567a59-2377-439d-894f-0309f7fea1de",
"generationId": "65dc5b1b-6099-44c4-8338-50eed0d427c5",
"token_count": {
"prompt_tokens": 1394,
"response_tokens": 149,
"total_tokens": 1543,
"billed_tokens": 159
},
"meta": {
"api_version": {
"version": "1"
},
"billed_units": {
"input_tokens": 10,
"output_tokens": 149
}
},
"citations": [
{
"start": 58,
"end": 73,
"text": "Emperor Penguin",
"documentIds": [
"web-search_3:2",
"web-search_4:10"
]
},
{
"start": 92,
"end": 157,
"text": "110cm to the top of their head or 115cm to the tip of their beak.",
"documentIds": [
"web-search_4:10"
]
},
{
"start": 200,
"end": 225,
"text": "3 feet and 7 inches tall.",
"documentIds": [
"web-search_3:2",
"web-search_4:10"
]
},
{
"start": 242,
"end": 262,
"text": "Anthropornis penguin",
"documentIds": [
"web-search_9:4"
]
},
{
"start": 276,
"end": 287,
"text": "New Zealand",
"documentIds": [
"web-search_9:4"
]
},
{
"start": 333,
"end": 343,
"text": "1.7 metres",
"documentIds": [
"web-search_9:4"
]
},
{
"start": 403,
"end": 431,
"text": "preserved arm and leg bones.",
"documentIds": [
"web-search_9:4"
]
},
{
"start": 473,
"end": 488,
"text": "Kumimanu biceae",
"documentIds": [
"web-search_9:4"
]
},
{
"start": 512,
"end": 524,
"text": "1.77 metres.",
"documentIds": [
"web-search_9:4"
]
},
{
"start": 613,
"end": 629,
"text": "colossus penguin",
"documentIds": [
"web-search_3:2"
]
},
{
"start": 663,
"end": 681,
"text": "40 minutes or more",
"documentIds": [
"web-search_3:2"
]
}
],
"documents": [
{
"id": "web-search_3:2",
"snippet": " By comparison, the largest species of penguin alive today, the emperor penguin, is \"only\" about 4 feet tall and can weigh as much as 100 pounds.\n\nInterestingly, because larger bodied penguins can hold their breath for longer, the colossus penguin probably could have stayed underwater for 40 minutes or more. It boggles the mind to imagine the kinds of huge, deep sea fish this mammoth bird might have been capable of hunting.\n\nThe fossil was found at the La Meseta formation on Seymour Island, an island in a chain of 16 major islands around the tip of the Graham Land on the Antarctic Peninsula.",
"title": "Giant 6-Foot-8 Penguin Discovered in Antarctica",
"url": "https://www.treehugger.com/giant-foot-penguin-discovered-in-antarctica-4864169"
},
{
"id": "web-search_4:10",
"snippet": "\n\nWhat is the Tallest Penguin?\n\nThe tallest penguin is the Emperor Penguin which is 110cm to the top of their head or 115cm to the tip of their beak.\n\nHow Tall Are Emperor Penguins in Feet?\n\nAn Emperor Penguin is about 3 feet and 7 inches to the top of its head. They are the largest penguin species currently in existence.\n\nHow Much Do Penguins Weigh in Pounds?\n\nPenguins weigh between 2.5lbs for the smallest species, the Little Penguin, up to 82lbs for the largest species, the Emperor Penguin.\n\nDr. Jackie Symmons is a professional ecologist with a Ph.D. in Ecology and Wildlife Management from Bangor University and over 25 years of experience delivering conservation projects.",
"title": "How Big Are Penguins? [Height & Weight of Every Species] - Polar Guidebook",
"url": "https://polarguidebook.com/how-big-are-penguins/"
},
{
"id": "web-search_9:4",
"snippet": "\n\nA fossil of an Anthropornis penguin found on the island may have been even taller, but this is likely to be an exception. The majority of these penguins were only 1.7 metres tall and weighed around 80 kilogrammes.\n\nWhile Palaeeudyptes klekowskii remains the tallest ever penguin, it is no longer the heaviest. At an estimated 150 kilogrammes, Kumimanu fordycei would have been around three times heavier than any living penguin.\n\nWhile it's uncertain how tall the species was, the height of a closely related species, Kumimanu biceae, has been estimated at 1.77 metres.\n\nThese measurements, however, are all open for debate. Many fossil penguins are only known from preserved arm and leg bones, rather than complete skeletons.",
"title": "The largest ever penguin species has been discovered in New Zealand | Natural History Museum",
"url": "https://www.nhm.ac.uk/discover/news/2023/february/largest-ever-penguin-species-discovered-new-zealand.html"
}
],
"searchResults": [
{
"searchQuery": {
"text": "largest penguin species height",
"generationId": "908fe321-5d27-48c4-bdb6-493be5687344"
},
"documentIds": [
"web-search_3:2",
"web-search_4:10",
"web-search_9:4"
],
"connector": {
"id": "web-search"
}
}
],
"tool_inputs": null,
"searchQueries": [
{
"text": "largest penguin species height",
"generationId": "908fe321-5d27-48c4-bdb6-493be5687344"
}
]
}
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models/chat | lc_public_repos/langchainjs/examples/src/models/chat/configurable/configurable_model_declaratively.ts | import { z } from "zod";
import { tool } from "@langchain/core/tools";
import { initChatModel } from "langchain/chat_models/universal";
const GetWeather = z
.object({
location: z.string().describe("The city and state, e.g. San Francisco, CA"),
})
.describe("Get the current weather in a given location");
const weatherTool = tool(
(_) => {
// do something
return "138 degrees";
},
{
name: "GetWeather",
schema: GetWeather,
}
);
const GetPopulation = z
.object({
location: z.string().describe("The city and state, e.g. San Francisco, CA"),
})
.describe("Get the current population in a given location");
const populationTool = tool(
(_) => {
// do something
return "one hundred billion";
},
{
name: "GetPopulation",
schema: GetPopulation,
}
);
const llm = await initChatModel(undefined, { temperature: 0 });
const llmWithTools = llm.bindTools([weatherTool, populationTool]);
const toolCalls1 = (
await llmWithTools.invoke("what's bigger in 2024 LA or NYC", {
configurable: { model: "gpt-4o" },
})
).tool_calls;
console.log("toolCalls1: ", JSON.stringify(toolCalls1, null, 2));
/*
toolCalls1: [
{
"name": "GetPopulation",
"args": {
"location": "Los Angeles, CA"
},
"type": "tool_call",
"id": "call_DXRBVE4xfLYZfhZOsW1qRbr5"
},
{
"name": "GetPopulation",
"args": {
"location": "New York, NY"
},
"type": "tool_call",
"id": "call_6ec3m4eWhwGz97sCbNt7kOvC"
}
]
*/
const toolCalls2 = (
await llmWithTools.invoke("what's bigger in 2024 LA or NYC", {
configurable: { model: "claude-3-5-sonnet-20240620" },
})
).tool_calls;
console.log("toolCalls2: ", JSON.stringify(toolCalls2, null, 2));
/*
toolCalls2: [
{
"name": "GetPopulation",
"args": {
"location": "Los Angeles, CA"
},
"id": "toolu_01K3jNU8jx18sJ9Y6Q9SooJ7",
"type": "tool_call"
},
{
"name": "GetPopulation",
"args": {
"location": "New York City, NY"
},
"id": "toolu_01UiANKaSwYykuF4hi3t5oNB",
"type": "tool_call"
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src/models/chat | lc_public_repos/langchainjs/examples/src/models/chat/configurable/configurable_model.ts | import { initChatModel } from "langchain/chat_models/universal";
const configurableModel = await initChatModel(undefined, { temperature: 0 });
const gpt4Res = await configurableModel.invoke("what's your name", {
configurable: { model: "gpt-4o" },
});
console.log("gpt4Res: ", gpt4Res.content);
/*
gpt4Res: I'm an AI language model created by OpenAI, and I don't have a personal name. You can call me Assistant or any other name you prefer! How can I assist you today?
*/
const claudeRes = await configurableModel.invoke("what's your name", {
configurable: { model: "claude-3-5-sonnet-20240620" },
});
console.log("claudeRes: ", claudeRes.content);
/*
claudeRes: My name is Claude. It's nice to meet you!
*/
|
0 | lc_public_repos/langchainjs/examples/src/models/chat | lc_public_repos/langchainjs/examples/src/models/chat/configurable/configurable_model_with_defaults.ts | import { initChatModel } from "langchain/chat_models/universal";
const firstLlm = await initChatModel("gpt-4o", {
temperature: 0,
configurableFields: ["model", "modelProvider", "temperature", "maxTokens"],
configPrefix: "first", // useful when you have a chain with multiple models
});
const openaiRes = await firstLlm.invoke("what's your name");
console.log("openaiRes: ", openaiRes.content);
/*
openaiRes: I'm an AI language model created by OpenAI, and I don't have a personal name. You can call me Assistant or any other name you prefer! How can I assist you today?
*/
const claudeRes = await firstLlm.invoke("what's your name", {
configurable: {
first_model: "claude-3-5-sonnet-20240620",
first_temperature: 0.5,
first_maxTokens: 100,
},
});
console.log("claudeRes: ", claudeRes.content);
/*
claudeRes: My name is Claude. It's nice to meet you!
*/
|
0 | lc_public_repos/langchainjs/examples/src/models/chat | lc_public_repos/langchainjs/examples/src/models/chat/configurable/basic.ts | import { initChatModel } from "langchain/chat_models/universal";
// Returns a @langchain/openai ChatOpenAI instance.
const gpt4o = await initChatModel("gpt-4o", {
modelProvider: "openai",
temperature: 0,
});
// Returns a @langchain/anthropic ChatAnthropic instance.
const claudeOpus = await initChatModel("claude-3-opus-20240229", {
modelProvider: "anthropic",
temperature: 0,
});
// Returns a @langchain/google-vertexai ChatVertexAI instance.
const gemini15 = await initChatModel("gemini-1.5-pro", {
modelProvider: "google-vertexai",
temperature: 0,
});
// Since all model integrations implement the ChatModel interface, you can use them in the same way.
console.log(`GPT-4o: ${(await gpt4o.invoke("what's your name")).content}\n`);
console.log(
`Claude Opus: ${(await claudeOpus.invoke("what's your name")).content}\n`
);
console.log(
`Gemini 1.5: ${(await gemini15.invoke("what's your name")).content}\n`
);
/*
GPT-4o: I'm an AI language model created by OpenAI, and I don't have a personal name. You can call me Assistant or any other name you prefer! How can I help you today?
Claude Opus: My name is Claude. It's nice to meet you!
Gemini 1.5: I don't have a name. I am a large language model, and I am not a person. I am a computer program that can generate text, translate languages, write different kinds of creative content, and answer your questions in an informative way.
*/
|
0 | lc_public_repos/langchainjs/examples/src/models/chat | lc_public_repos/langchainjs/examples/src/models/chat/configurable/inferring_model_provider.ts | import { initChatModel } from "langchain/chat_models/universal";
const gpt4o = await initChatModel("gpt-4o", {
temperature: 0,
});
const claudeOpus = await initChatModel("claude-3-opus-20240229", {
temperature: 0,
});
const gemini15 = await initChatModel("gemini-1.5-pro", {
temperature: 0,
});
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/embeddings/googlegenerativeai.ts | import { GoogleGenerativeAIEmbeddings } from "@langchain/google-genai";
import { TaskType } from "@google/generative-ai";
/*
* Before running this, you should make sure you have created a
* Google Cloud Project that has `generativelanguage` API enabled.
*
* You will also need to generate an API key and set
* an environment variable GOOGLE_API_KEY
*
*/
const embeddings = new GoogleGenerativeAIEmbeddings({
model: "embedding-001", // 768 dimensions
taskType: TaskType.RETRIEVAL_DOCUMENT,
title: "Document title",
});
const res = await embeddings.embedQuery("OK Google");
console.log(res, res.length);
/*
[
0.010467986, -0.052334797, -0.05164676, -0.0092885755, 0.037551474,
0.007278041, -0.0014511136, -0.0002727135, -0.01205141, -0.028824795,
0.022447161, 0.032513272, -0.0075029004, 0.013371749, 0.03725578,
-0.0179886, -0.032127254, -0.019804858, -0.035530213, -0.057539217,
0.030938378, 0.022367297, -0.024294581, 0.011045744, 0.0026335048,
-0.018090524, 0.0066266404, -0.05072178, -0.025432976, 0.04673682,
-0.044976745, 0.009511519, -0.030653704, 0.0066106077, -0.03870159,
-0.04239313, 0.016969211, -0.015911, 0.020452755, 0.033449557,
-0.002724189, -0.049285132, -0.016055783, -0.0016474632, 0.013622627,
-0.012853559, -0.00383113, 0.0047683385, 0.029007262, -0.082496256,
0.055966448, 0.011457588, 0.04426033, -0.043971397, 0.029413547,
0.012740723, 0.03243298, -0.005483601, -0.01973574, -0.027495336,
0.0031939305, 0.02392931, -0.011409592, 0.053490978, -0.03130516,
-0.037364446, -0.028803863, 0.019082755, -0.00075289875, 0.015987953,
0.005136402, -0.045040093, 0.051010687, -0.06252348, -0.09334517,
-0.11461444, -0.007226655, 0.034570504, 0.017628446, 0.02613834,
-0.0043784343, -0.022333296, -0.053109482, -0.018441308, -0.10350664,
0.048912525, -0.042917475, -0.0014399975, 0.023028672, 0.00041137074,
0.019345555, -0.023254089, 0.060004912, -0.07684076, -0.04034909,
0.05221485, -0.015773885, -0.029030964, 0.02586164, -0.0401004,
... 668 more items
]
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/embeddings/googlevertexai_multimodal.ts | import fs from "fs";
import { GoogleVertexAIMultimodalEmbeddings } from "@langchain/community/experimental/multimodal_embeddings/googlevertexai";
const model = new GoogleVertexAIMultimodalEmbeddings();
// Load the image into a buffer to get the embedding of it
const img = fs.readFileSync("/path/to/file.jpg");
const imgEmbedding = await model.embedImageQuery(img);
console.log({ imgEmbedding });
// You can also get text embeddings
const textEmbedding = await model.embedQuery(
"What would be a good company name for a company that makes colorful socks?"
);
console.log({ textEmbedding });
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/embeddings/googlevertexai_multimodal_advanced.ts | import fs from "fs";
import { GoogleVertexAIMultimodalEmbeddings } from "@langchain/community/experimental/multimodal_embeddings/googlevertexai";
import { FaissStore } from "@langchain/community/vectorstores/faiss";
import { Document } from "@langchain/core/documents";
const embeddings = new GoogleVertexAIMultimodalEmbeddings();
const vectorStore = await FaissStore.fromTexts(
["dog", "cat", "horse", "seagull"],
[{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }],
embeddings
);
const img = fs.readFileSync("parrot.jpeg");
const vectors: number[] = await embeddings.embedImageQuery(img);
const document = new Document({
pageContent: img.toString("base64"),
// Metadata is optional but helps track what kind of document is being retrieved
metadata: {
id: 5,
mediaType: "image",
},
});
// Add the image embedding vectors to the vector store directly
await vectorStore.addVectors([vectors], [document]);
// Use a similar image to the one just added
const img2 = fs.readFileSync("parrot-icon.png");
const vectors2: number[] = await embeddings.embedImageQuery(img2);
// Use the lower level, direct API
const resultTwo = await vectorStore.similaritySearchVectorWithScore(
vectors2,
2
);
console.log(JSON.stringify(resultTwo, null, 2));
/*
[
[
Document {
pageContent: '<BASE64 ENCODED IMAGE DATA>'
metadata: {
id: 5,
mediaType: "image"
}
},
0.8931522965431213
],
[
Document {
pageContent: 'seagull',
metadata: {
id: 4
}
},
1.9188631772994995
]
]
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/embeddings/openai_timeout.ts | import { OpenAIEmbeddings } from "@langchain/openai";
const embeddings = new OpenAIEmbeddings({
timeout: 1000, // 1s timeout
});
/* Embed queries */
const res = await embeddings.embedQuery("Hello world");
console.log(res);
/* Embed documents */
const documentRes = await embeddings.embedDocuments(["Hello world", "Bye bye"]);
console.log({ documentRes });
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/embeddings/azure_openai-managed_identity.ts | import {
DefaultAzureCredential,
getBearerTokenProvider,
} from "@azure/identity";
import { AzureOpenAIEmbeddings } from "@langchain/openai";
const credentials = new DefaultAzureCredential();
const azureADTokenProvider = getBearerTokenProvider(
credentials,
"https://cognitiveservices.azure.com/.default"
);
const model = new AzureOpenAIEmbeddings({
azureADTokenProvider,
azureOpenAIApiInstanceName: "<your_instance_name>",
azureOpenAIApiEmbeddingsDeploymentName: "<your_embeddings_deployment_name>",
azureOpenAIApiVersion: "<api_version>",
});
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/embeddings/nomic.ts | import { NomicEmbeddings } from "@langchain/nomic";
/* Embed queries */
const nomicEmbeddings = new NomicEmbeddings();
const res = await nomicEmbeddings.embedQuery("Hello world");
console.log(res);
/* Embed documents */
const documentRes = await nomicEmbeddings.embedDocuments([
"Hello world",
"Bye bye",
]);
console.log(documentRes);
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/embeddings/jina.ts | import { JinaEmbeddings } from "@langchain/community/embeddings/jina";
const model = new JinaEmbeddings({
apiKey: process.env.JINA_API_TOKEN,
model: "jina-embeddings-v2-base-en", // Default value
});
const embeddings = await model.embedQuery(
"Tell me a story about a dragon and a princess."
);
console.log(embeddings);
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/embeddings/tencent_hunyuan.ts | // in nodejs environment
import { TencentHunyuanEmbeddings } from "@langchain/community/embeddings/tencent_hunyuan";
// in browser environment
// import { TencentHunyuanEmbeddings } from "@langchain/community/embeddings/tencent_hunyuan/web";
/* Embed queries */
const embeddings = new TencentHunyuanEmbeddings();
const res = await embeddings.embedQuery("你好,世界!");
console.log(res);
/* Embed documents */
const documentRes = await embeddings.embedDocuments(["你好,世界!", "再见"]);
console.log({ documentRes });
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/embeddings/deepinfra.ts | import { DeepInfraEmbeddings } from "@langchain/community/embeddings/deepinfra";
const model = new DeepInfraEmbeddings({
apiToken: process.env.DEEPINFRA_API_TOKEN,
batchSize: 1024, // Default value
modelName: "sentence-transformers/clip-ViT-B-32", // Default value
});
const embeddings = await model.embedQuery(
"Tell me a story about a dragon and a princess."
);
console.log(embeddings);
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/embeddings/azure_openai-base_path.ts | import { AzureOpenAIEmbeddings } from "@langchain/openai";
const model = new AzureOpenAIEmbeddings({
azureOpenAIApiKey: "<your_key>", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY
azureOpenAIApiEmbeddingsDeploymentName: "<your_embedding_deployment_name>", // In Node.js defaults to process.env.AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME
azureOpenAIApiVersion: "<api_version>", // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION
azureOpenAIBasePath:
"https://westeurope.api.microsoft.com/openai/deployments", // In Node.js defaults to process.env.AZURE_OPENAI_BASE_PATH
});
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/embeddings/minimax.ts | import { MinimaxEmbeddings } from "@langchain/community/embeddings/minimax";
export const run = async () => {
/* Embed queries */
const embeddings = new MinimaxEmbeddings();
const res = await embeddings.embedQuery("Hello world");
console.log(res);
/* Embed documents */
const documentRes = await embeddings.embedDocuments([
"Hello world",
"Bye bye",
]);
console.log({ documentRes });
};
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/embeddings/fireworks.ts | import { FireworksEmbeddings } from "@langchain/community/embeddings/fireworks";
/* Embed queries */
const fireworksEmbeddings = new FireworksEmbeddings();
const res = await fireworksEmbeddings.embedQuery("Hello world");
console.log(res);
/* Embed documents */
const documentRes = await fireworksEmbeddings.embedDocuments([
"Hello world",
"Bye bye",
]);
console.log(documentRes);
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/embeddings/tensorflow.ts | import "@tensorflow/tfjs-backend-cpu";
import { TensorFlowEmbeddings } from "@langchain/community/embeddings/tensorflow";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { Document } from "@langchain/core/documents";
const embeddings = new TensorFlowEmbeddings();
const store = new MemoryVectorStore(embeddings);
const documents = [
"A document",
"Some other piece of text",
"One more",
"And another",
];
await store.addDocuments(
documents.map((pageContent) => new Document({ pageContent }))
);
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/embeddings/openai.ts | import { OpenAIEmbeddings } from "@langchain/openai";
export const run = async () => {
/* Embed queries */
const embeddings = new OpenAIEmbeddings();
const res = await embeddings.embedQuery("Hello world");
console.log(res);
/* Embed documents */
const documentRes = await embeddings.embedDocuments([
"Hello world",
"Bye bye",
]);
console.log({ documentRes });
};
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/embeddings/cohere.ts | import { CohereEmbeddings } from "@langchain/cohere";
/* Embed queries */
const embeddings = new CohereEmbeddings({
apiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.COHERE_API_KEY
batchSize: 48, // Default value if omitted is 48. Max value is 96
model: "embed-english-v3.0",
});
const res = await embeddings.embedQuery("Hello world");
console.log(res);
/* Embed documents */
const documentRes = await embeddings.embedDocuments(["Hello world", "Bye bye"]);
console.log({ documentRes });
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/embeddings/hf_transformers.ts | import { HuggingFaceTransformersEmbeddings } from "@langchain/community/embeddings/hf_transformers";
const model = new HuggingFaceTransformersEmbeddings({
model: "Xenova/all-MiniLM-L6-v2",
});
/* Embed queries */
const res = await model.embedQuery(
"What would be a good company name for a company that makes colorful socks?"
);
console.log({ res });
/* Embed documents */
const documentRes = await model.embedDocuments(["Hello world", "Bye bye"]);
console.log({ documentRes });
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.