index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/llms/googlevertexai.ts | import { VertexAI } from "@langchain/google-vertexai";
// Or, if using the web entrypoint:
// import { VertexAI } from "@langchain/google-vertexai-web";
const model = new VertexAI({
temperature: 0.7,
});
const res = await model.invoke(
"What would be a good company name for a company that makes colorful socks?"
);
console.log({ res });
/*
{
res: '* Hue Hues\n' +
'* Sock Spectrum\n' +
'* Kaleidosocks\n' +
'* Threads of Joy\n' +
'* Vibrant Threads\n' +
'* Rainbow Soles\n' +
'* Colorful Canvases\n' +
'* Prismatic Pedals\n' +
'* Sock Canvas\n' +
'* Color Collective'
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/llms/portkey-chat.ts | import { PortkeyChat } from "@langchain/community/chat_models/portkey";
import { SystemMessage } from "@langchain/core/messages";
export const run = async () => {
const model = new PortkeyChat({
mode: "single",
llms: [
{
provider: "openai",
virtual_key: "open-ai-key-1234",
model: "gpt-3.5-turbo",
max_tokens: 2000,
},
],
});
const chatPrompt = [new SystemMessage("Question: Write a story")];
const res = await model.stream(chatPrompt);
for await (const i of res) {
if (typeof i.content !== "string") {
throw new Error(
`Content is not a string. Received: ${JSON.stringify(
i.content,
null,
2
)}`
);
}
process.stdout.write(i.content);
}
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/llms/gradient_ai-adapter.ts | import { GradientLLM } from "@langchain/community/llms/gradient_ai";
// Note that inferenceParameters are optional
const model = new GradientLLM({
adapterId: process.env.GRADIENT_ADAPTER_ID,
inferenceParameters: {
maxGeneratedTokenCount: 20,
temperature: 0,
},
});
const res = await model.invoke(
"What would be a good company name for a company that makes colorful socks?"
);
console.log({ res });
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/llms/openai.ts | import { OpenAI } from "@langchain/openai";
export const run = async () => {
const model = new OpenAI({
model: "gpt-4",
temperature: 0.7,
maxTokens: 1000,
maxRetries: 5,
});
const res = await model.invoke(
"Question: What would be a good company name a company that makes colorful socks?\nAnswer:"
);
console.log({ res });
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/llms/cohere.ts | import { Cohere } from "@langchain/cohere";
export const run = async () => {
const model = new Cohere({
temperature: 0.7,
maxTokens: 20,
maxRetries: 5,
});
const res = await model.invoke(
"Question: What would be a good company name a company that makes colorful socks?\nAnswer:"
);
console.log({ res });
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/llms/openai-chat.ts | import { OpenAIChat } from "@langchain/openai";
export const run = async () => {
const model = new OpenAIChat({
prefixMessages: [
{
role: "system",
content: "You are a helpful assistant that answers in pirate language",
},
],
maxTokens: 50,
});
const res = await model.invoke(
"What would be a good company name a company that makes colorful socks?"
);
console.log({ res });
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/llms/portkey.ts | import { Portkey } from "@langchain/community/llms/portkey";
export const run = async () => {
const model = new Portkey({
mode: "single",
llms: [
{
provider: "openai",
virtual_key: "open-ai-key-1234",
model: "text-davinci-003",
max_tokens: 2000,
},
],
});
const res = await model.stream(
"Question: Write a story about a king\nAnswer:"
);
for await (const i of res) {
process.stdout.write(i);
}
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/llms/azure_openai.ts | import { AzureOpenAI } from "@langchain/openai";
export const run = async () => {
const model = new AzureOpenAI({
temperature: 0.7,
maxTokens: 1000,
maxRetries: 5,
});
const res = await model.invoke(
"Question: What would be a good company name for a company that makes colorful socks?\nAnswer:"
);
console.log({ res });
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/tools/duckduckgo_search_agent.ts | import { DuckDuckGoSearch } from "@langchain/community/tools/duckduckgo_search";
import { ChatOpenAI } from "@langchain/openai";
import type { ChatPromptTemplate } from "@langchain/core/prompts";
import { pull } from "langchain/hub";
import { AgentExecutor, createOpenAIFunctionsAgent } from "langchain/agents";
// Define the tools the agent will have access to.
const tools = [new DuckDuckGoSearch({ maxResults: 1 })];
// Get the prompt to use - you can modify this!
// If you want to see the prompt in full, you can at:
// https://smith.langchain.com/hub/hwchase17/openai-functions-agent
const prompt = await pull<ChatPromptTemplate>(
"hwchase17/openai-functions-agent"
);
const llm = new ChatOpenAI({
model: "gpt-4-turbo-preview",
temperature: 0,
});
const agent = await createOpenAIFunctionsAgent({
llm,
tools,
prompt,
});
const agentExecutor = new AgentExecutor({
agent,
tools,
});
const result = await agentExecutor.invoke({
input: "What is Anthropic's estimated revenue for 2024?",
});
console.log(result);
/*
{
input: "What is Anthropic's estimated revenue for 2024?",
output: 'Anthropic has projected that it will generate more than $850 million in annualized revenue by the end of 2024. For more details, you can refer to the [Reuters article](https://www.reuters.com/technology/anthropic-forecasts-more-than-850-mln-annualized-revenue-rate-by-2024-end-report-2023-12-26/).'
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/tools/searxng_search.ts | import { ChatOpenAI } from "@langchain/openai";
import { AgentExecutor } from "langchain/agents";
import { BaseMessageChunk } from "@langchain/core/messages";
import { AgentAction, AgentFinish } from "@langchain/core/agents";
import { RunnableSequence } from "@langchain/core/runnables";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { SearxngSearch } from "@langchain/community/tools/searxng_search";
const model = new ChatOpenAI({
maxTokens: 1000,
model: "gpt-4",
});
// `apiBase` will be automatically parsed from .env file, set "SEARXNG_API_BASE" in .env,
const tools = [
new SearxngSearch({
params: {
format: "json", // Do not change this, format other than "json" is will throw error
engines: "google",
},
// Custom Headers to support rapidAPI authentication Or any instance that requires custom headers
headers: {},
}),
];
const prefix = ChatPromptTemplate.fromMessages([
[
"ai",
"Answer the following questions as best you can. In your final answer, use a bulleted list markdown format.",
],
["human", "{input}"],
]);
// Replace this with your actual output parser.
const customOutputParser = (
input: BaseMessageChunk
): AgentAction | AgentFinish => ({
log: "test",
returnValues: {
output: input,
},
});
// Replace this placeholder agent with your actual implementation.
const agent = RunnableSequence.from([prefix, model, customOutputParser]);
const executor = AgentExecutor.fromAgentAndTools({
agent,
tools,
});
console.log("Loaded agent.");
const input = `What is Langchain? Describe in 50 words`;
console.log(`Executing with input "${input}"...`);
const result = await executor.invoke({ input });
console.log(result);
/**
* Langchain is a framework for developing applications powered by language models, such as chatbots, Generative Question-Answering, summarization, and more. It provides a standard interface, integrations with other tools, and end-to-end chains for common applications. Langchain enables data-aware and powerful applications.
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/tools/gmail.ts | import { initializeAgentExecutorWithOptions } from "langchain/agents";
import { OpenAI } from "@langchain/openai";
import {
GmailCreateDraft,
GmailGetMessage,
GmailGetThread,
GmailSearch,
GmailSendMessage,
} from "@langchain/community/tools/gmail";
import { StructuredTool } from "@langchain/core/tools";
export async function run() {
const model = new OpenAI({
temperature: 0,
apiKey: process.env.OPENAI_API_KEY,
});
// These are the default parameters for the Gmail tools
// const gmailParams = {
// credentials: {
// clientEmail: process.env.GMAIL_CLIENT_EMAIL,
// privateKey: process.env.GMAIL_PRIVATE_KEY,
// },
// scopes: ["https://mail.google.com/"],
// };
// For custom parameters, uncomment the code above, replace the values with your own, and pass it to the tools below
const tools: StructuredTool[] = [
new GmailCreateDraft(),
new GmailGetMessage(),
new GmailGetThread(),
new GmailSearch(),
new GmailSendMessage(),
];
const gmailAgent = await initializeAgentExecutorWithOptions(tools, model, {
agentType: "structured-chat-zero-shot-react-description",
verbose: true,
});
const createInput = `Create a gmail draft for me to edit of a letter from the perspective of a sentient parrot who is looking to collaborate on some research with her estranged friend, a cat. Under no circumstances may you send the message, however.`;
const createResult = await gmailAgent.invoke({ input: createInput });
// Create Result {
// output: 'I have created a draft email for you to edit. The draft Id is r5681294731961864018.'
// }
console.log("Create Result", createResult);
const viewInput = `Could you search in my drafts for the latest email?`;
const viewResult = await gmailAgent.invoke({ input: viewInput });
// View Result {
// output: "The latest email in your drafts is from hopefulparrot@gmail.com with the subject 'Collaboration Opportunity'. The body of the email reads: 'Dear [Friend], I hope this letter finds you well. I am writing to you in the hopes of rekindling our friendship and to discuss the possibility of collaborating on some research together. I know that we have had our differences in the past, but I believe that we can put them aside and work together for the greater good. I look forward to hearing from you. Sincerely, [Parrot]'"
// }
console.log("View Result", viewResult);
}
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/tools/tavily_search.ts | import { TavilySearchResults } from "@langchain/community/tools/tavily_search";
import { ChatOpenAI } from "@langchain/openai";
import type { ChatPromptTemplate } from "@langchain/core/prompts";
import { pull } from "langchain/hub";
import { AgentExecutor, createOpenAIFunctionsAgent } from "langchain/agents";
// Define the tools the agent will have access to.
const tools = [new TavilySearchResults({ maxResults: 1 })];
// Get the prompt to use - you can modify this!
// If you want to see the prompt in full, you can at:
// https://smith.langchain.com/hub/hwchase17/openai-functions-agent
const prompt = await pull<ChatPromptTemplate>(
"hwchase17/openai-functions-agent"
);
const llm = new ChatOpenAI({
model: "gpt-3.5-turbo-1106",
temperature: 0,
});
const agent = await createOpenAIFunctionsAgent({
llm,
tools,
prompt,
});
const agentExecutor = new AgentExecutor({
agent,
tools,
});
const result = await agentExecutor.invoke({
input: "what is the weather in wailea?",
});
console.log(result);
/*
{
input: 'what is the weather in wailea?',
output: "The current weather in Wailea, HI is 64°F with clear skies. The high for today is 82°F and the low is 66°F. If you'd like more detailed information, you can visit [The Weather Channel](https://weather.com/weather/today/l/Wailea+HI?canonicalCityId=ffa9df9f7220c7e22cbcca3dc0a6c402d9c740c755955db833ea32a645b2bcab)."
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/tools/searchapi_google_news.ts | import { ChatOpenAI } from "@langchain/openai";
import { AgentExecutor } from "langchain/agents";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { RunnableSequence } from "@langchain/core/runnables";
import { AgentFinish, AgentAction } from "@langchain/core/agents";
import { BaseMessageChunk } from "@langchain/core/messages";
import { SearchApi } from "@langchain/community/tools/searchapi";
const model = new ChatOpenAI({
temperature: 0,
});
const tools = [
new SearchApi(process.env.SEARCHAPI_API_KEY, {
engine: "google_news",
}),
];
const prefix = ChatPromptTemplate.fromMessages([
[
"ai",
"Answer the following questions as best you can. In your final answer, use a bulleted list markdown format.",
],
["human", "{input}"],
]);
// Replace this with your actual output parser.
const customOutputParser = (
input: BaseMessageChunk
): AgentAction | AgentFinish => ({
log: "test",
returnValues: {
output: input,
},
});
// Replace this placeholder agent with your actual implementation.
const agent = RunnableSequence.from([prefix, model, customOutputParser]);
const executor = AgentExecutor.fromAgentAndTools({
agent,
tools,
});
const res = await executor.invoke({
input: "What's happening in Ukraine today?",
});
console.log(res);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/tools/connery.ts | import { ConneryService } from "@langchain/community/tools/connery";
import { ChatOpenAI } from "@langchain/openai";
import { initializeAgentExecutorWithOptions } from "langchain/agents";
// Specify your Connery Runner credentials.
process.env.CONNERY_RUNNER_URL = "";
process.env.CONNERY_RUNNER_API_KEY = "";
// Specify OpenAI API key.
process.env.OPENAI_API_KEY = "";
// Specify your email address to receive the emails from examples below.
const recepientEmail = "test@example.com";
// Get the SendEmail action from the Connery Runner by ID.
const conneryService = new ConneryService();
const sendEmailAction = await conneryService.getAction(
"CABC80BB79C15067CA983495324AE709"
);
// Run the action manually.
const manualRunResult = await sendEmailAction.invoke({
recipient: recepientEmail,
subject: "Test email",
body: "This is a test email sent by Connery.",
});
console.log(manualRunResult);
// Run the action using the OpenAI Functions agent.
const llm = new ChatOpenAI({ temperature: 0 });
const agent = await initializeAgentExecutorWithOptions([sendEmailAction], llm, {
agentType: "openai-functions",
verbose: true,
});
const agentRunResult = await agent.invoke({
input: `Send an email to the ${recepientEmail} and say that I will be late for the meeting.`,
});
console.log(agentRunResult);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/tools/exa_agent.ts | import {
ChatPromptTemplate,
MessagesPlaceholder,
} from "@langchain/core/prompts";
import { ChatOpenAI } from "@langchain/openai";
import Exa from "exa-js";
import { AgentExecutor, createOpenAIFunctionsAgent } from "langchain/agents";
import { createRetrieverTool } from "langchain/tools/retriever";
import { ExaRetriever } from "@langchain/exa";
// @ts-expect-error Some TS Config's will cause this to give a TypeScript error, even though it works.
const client: Exa.default = new Exa(process.env.EXASEARCH_API_KEY);
const exaRetriever = new ExaRetriever({
client,
searchArgs: {
numResults: 2,
},
});
// Convert the ExaRetriever into a tool
const searchTool = createRetrieverTool(exaRetriever, {
name: "search",
description: "Get the contents of a webpage given a string search query.",
});
const tools = [searchTool];
const llm = new ChatOpenAI({ model: "gpt-4", temperature: 0 });
const prompt = ChatPromptTemplate.fromMessages([
[
"system",
`You are a web researcher who answers user questions by looking up information on the internet and retrieving contents of helpful documents. Cite your sources.`,
],
["human", "{input}"],
new MessagesPlaceholder("agent_scratchpad"),
]);
const agentExecutor = new AgentExecutor({
agent: await createOpenAIFunctionsAgent({
llm,
tools,
prompt,
}),
tools,
});
console.log(
await agentExecutor.invoke({
input: "Summarize for me a fascinating article about cats.",
})
);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/tools/wikipedia.ts | import { WikipediaQueryRun } from "@langchain/community/tools/wikipedia_query_run";
const tool = new WikipediaQueryRun({
topKResults: 3,
maxDocContentLength: 4000,
});
const res = await tool.invoke("Langchain");
console.log(res);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/tools/wolframalpha.ts | import { WolframAlphaTool } from "@langchain/community/tools/wolframalpha";
const tool = new WolframAlphaTool({
appid: "YOUR_APP_ID",
});
const res = await tool.invoke("What is 2 * 2?");
console.log(res);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/tools/webbrowser.ts | import { WebBrowser } from "langchain/tools/webbrowser";
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
export async function run() {
// this will not work with Azure OpenAI API yet
// Azure OpenAI API does not support embedding with multiple inputs yet
// Too many inputs. The max number of inputs is 1. We hope to increase the number of inputs per request soon. Please contact us through an Azure support request at: https://go.microsoft.com/fwlink/?linkid=2213926 for further questions.
// So we will fail fast, when Azure OpenAI API is used
if (process.env.AZURE_OPENAI_API_KEY) {
throw new Error(
"Azure OpenAI API does not support embedding with multiple inputs yet"
);
}
const model = new ChatOpenAI({ temperature: 0 });
const embeddings = new OpenAIEmbeddings(
process.env.AZURE_OPENAI_API_KEY
? { azureOpenAIApiDeploymentName: "Embeddings2" }
: {}
);
const browser = new WebBrowser({ model, embeddings });
const result = await browser.invoke(
`"https://www.themarginalian.org/2015/04/09/find-your-bliss-joseph-campbell-power-of-myth","who is joseph campbell"`
);
console.log(result);
/*
Joseph Campbell was a mythologist and writer who discussed spirituality, psychological archetypes, cultural myths, and the mythology of self. He sat down with Bill Moyers for a lengthy conversation at George Lucas’s Skywalker Ranch in California, which continued the following year at the American Museum of Natural History in New York. The resulting 24 hours of raw footage were edited down to six one-hour episodes and broadcast on PBS in 1988, shortly after Campbell’s death, in what became one of the most popular in the history of public television.
Relevant Links:
- [The Holstee Manifesto](http://holstee.com/manifesto-bp)
- [The Silent Music of the Mind: Remembering Oliver Sacks](https://www.themarginalian.org/2015/08/31/remembering-oliver-sacks)
- [Joseph Campbell series](http://billmoyers.com/spotlight/download-joseph-campbell-and-the-power-of-myth-audio/)
- [Bill Moyers](https://www.themarginalian.org/tag/bill-moyers/)
- [books](https://www.themarginalian.org/tag/books/)
*/
}
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/tools/exa_search.ts | import { ExaSearchResults } from "@langchain/exa";
import { ChatOpenAI } from "@langchain/openai";
import type { ChatPromptTemplate } from "@langchain/core/prompts";
import Exa from "exa-js";
import { pull } from "langchain/hub";
import { AgentExecutor, createOpenAIFunctionsAgent } from "langchain/agents";
// Define the tools the agent will have access to.
const tools = [
new ExaSearchResults({
// @ts-expect-error Some TS Config's will cause this to give a TypeScript error, even though it works.
client: new Exa(process.env.EXASEARCH_API_KEY),
}),
];
// Get the prompt to use - you can modify this!
// If you want to see the prompt in full, you can at:
// https://smith.langchain.com/hub/hwchase17/openai-functions-agent
const prompt = await pull<ChatPromptTemplate>(
"hwchase17/openai-functions-agent"
);
const llm = new ChatOpenAI({
model: "gpt-3.5-turbo-1106",
temperature: 0,
});
const agent = await createOpenAIFunctionsAgent({
llm,
tools,
prompt,
});
const agentExecutor = new AgentExecutor({
agent,
tools,
});
const result = await agentExecutor.invoke({
input: "what is the weather in wailea?",
});
console.log(result);
/*
{
input: 'what is the weather in wailea?',
output: 'I found a weather forecast for Wailea-Makena on Windfinder.com. You can check the forecast [here](https://www.windfinder.com/forecast/wailea-makena).'
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/tools/discord.ts | import {
DiscordGetMessagesTool,
DiscordChannelSearchTool,
DiscordSendMessagesTool,
DiscordGetGuildsTool,
DiscordGetTextChannelsTool,
} from "@langchain/community/tools/discord";
// Get messages from a channel given channel ID
const getMessageTool = new DiscordGetMessagesTool();
const messageResults = await getMessageTool.invoke("1153400523718938780");
console.log(messageResults);
// Get guilds/servers
const getGuildsTool = new DiscordGetGuildsTool();
const guildResults = await getGuildsTool.invoke("");
console.log(guildResults);
// Search results in a given channel (case-insensitive)
const searchTool = new DiscordChannelSearchTool();
const searchResults = await searchTool.invoke("Test");
console.log(searchResults);
// Get all text channels of a server
const getChannelsTool = new DiscordGetTextChannelsTool();
const channelResults = await getChannelsTool.invoke("1153400523718938775");
console.log(channelResults);
// Send a message
const sendMessageTool = new DiscordSendMessagesTool();
const sendMessageResults = await sendMessageTool.invoke("test message");
console.log(sendMessageResults);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/tools/google_places.ts | import { GooglePlacesAPI } from "@langchain/community/tools/google_places";
import { OpenAI } from "@langchain/openai";
import { initializeAgentExecutorWithOptions } from "langchain/agents";
export async function run() {
const model = new OpenAI({
temperature: 0,
});
const tools = [new GooglePlacesAPI()];
const executor = await initializeAgentExecutorWithOptions(tools, model, {
agentType: "zero-shot-react-description",
verbose: true,
});
const res = await executor.invoke({
input: "Where is the University of Toronto - Scarborough? ",
});
console.log(res.output);
}
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/tools/stackexchange.ts | import { StackExchangeAPI } from "@langchain/community/tools/stackexchange";
// Get results from StackExchange API
const stackExchangeTool = new StackExchangeAPI();
const result = await stackExchangeTool.invoke("zsh: command not found: python");
console.log(result);
// Get results from StackExchange API with title query
const stackExchangeTitleTool = new StackExchangeAPI({
queryType: "title",
});
const titleResult = await stackExchangeTitleTool.invoke(
"zsh: command not found: python"
);
console.log(titleResult);
// Get results from StackExchange API with bad query
const stackExchangeBadTool = new StackExchangeAPI();
const badResult = await stackExchangeBadTool.invoke(
"sjefbsmnazdkhbazkbdoaencopebfoubaef"
);
console.log(badResult);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/tools/pyinterpreter.ts | import { OpenAI } from "@langchain/openai";
import { PythonInterpreterTool } from "@langchain/community/experimental/tools/pyinterpreter";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { StringOutputParser } from "@langchain/core/output_parsers";
const prompt = ChatPromptTemplate.fromTemplate(
`Generate python code that does {input}. Do not generate anything else.`
);
const model = new OpenAI({});
const interpreter = await PythonInterpreterTool.initialize({
indexURL: "../node_modules/pyodide",
});
// Note: In Deno, it may be easier to initialize the interpreter yourself:
// import pyodideModule from "npm:pyodide/pyodide.js";
// import { PythonInterpreterTool } from "npm:@langchain/community/experimental/tools/pyinterpreter";
// const pyodide = await pyodideModule.loadPyodide();
// const pythonTool = new PythonInterpreterTool({instance: pyodide})
const chain = prompt
.pipe(model)
.pipe(new StringOutputParser())
.pipe(interpreter);
const result = await chain.invoke({
input: `prints "Hello LangChain"`,
});
console.log(JSON.parse(result).stdout);
// To install python packages:
// This uses the loadPackages command.
// This works for packages built with pyodide.
await interpreter.addPackage("numpy");
// But for other packages, you will want to use micropip.
// See: https://pyodide.org/en/stable/usage/loading-packages.html
// for more information
await interpreter.addPackage("micropip");
// The following is roughly equivalent to:
// pyodide.runPython(`import ${pkgname}; ${pkgname}`);
const micropip = interpreter.pyodideInstance.pyimport("micropip");
await micropip.install("numpy");
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/tools/google_routes.ts | import { GoogleRoutesAPI } from "@langchain/community/tools/google_routes";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { ChatOpenAI } from "@langchain/openai";
import { AgentExecutor, createToolCallingAgent } from "langchain/agents";
export async function run() {
const tools = [new GoogleRoutesAPI()];
const llm = new ChatOpenAI({
model: "gpt-3.5-turbo-0125",
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["placeholder", "{chat_history}"],
["human", "{input}"],
["placeholder", "{agent_scratchpad}"],
]);
const agent = await createToolCallingAgent({
llm,
tools,
prompt,
});
const agentExecutor = new AgentExecutor({
agent,
tools,
});
const result = await agentExecutor.invoke({
input: "How to go from the Eiffel Tower to the Louvre Museum by transit?",
});
console.log(result);
/* {
input: 'How to go from the Eiffel Tower to the Louvre Museum by transit?',
output: 'To travel from the Eiffel Tower to the Louvre Museum by transit, here is the route information:\n' +
'\n' +
'- Departure: Eiffel Tower\n' +
'- Arrival: Louvre Museum\n' +
'- Distance: 4.1 km\n' +
'- Duration: 18 minutes\n' +
'- Transit Fare: €2.15\n' +
'\n' +
'Travel Instructions:\n' +
"1. Walk to Pont d'Iéna\n" +
'2. Take bus 72 towards Gare de Lyon - Maison de La RATP\n' +
'3. Walk to your destination\n' +
'\n' +
'Departure Time: 22:03\n' +
'Arrival Time: 22:15\n' +
'\n' +
'Please follow these instructions to reach the Louvre Museum from the Eiffel Tower.'
} */
}
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/tools/dalle_image_generation.ts | /* eslint-disable no-process-env */
import { DallEAPIWrapper } from "@langchain/openai";
const tool = new DallEAPIWrapper({
n: 1, // Default
model: "dall-e-3", // Default
apiKey: process.env.OPENAI_API_KEY, // Default
});
const imageURL = await tool.invoke("a painting of a cat");
console.log(imageURL);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/tools/google_calendar.ts | import { initializeAgentExecutorWithOptions } from "langchain/agents";
import { OpenAI } from "@langchain/openai";
import { Calculator } from "@langchain/community/tools/calculator";
import {
GoogleCalendarCreateTool,
GoogleCalendarViewTool,
} from "@langchain/community/tools/google_calendar";
export async function run() {
const model = new OpenAI({
temperature: 0,
apiKey: process.env.OPENAI_API_KEY,
});
const googleCalendarParams = {
credentials: {
clientEmail: process.env.GOOGLE_CALENDAR_CLIENT_EMAIL,
privateKey: process.env.GOOGLE_CALENDAR_PRIVATE_KEY,
calendarId: process.env.GOOGLE_CALENDAR_CALENDAR_ID,
},
scopes: [
"https://www.googleapis.com/auth/calendar",
"https://www.googleapis.com/auth/calendar.events",
],
model,
};
const tools = [
new Calculator(),
new GoogleCalendarCreateTool(googleCalendarParams),
new GoogleCalendarViewTool(googleCalendarParams),
];
const calendarAgent = await initializeAgentExecutorWithOptions(tools, model, {
agentType: "zero-shot-react-description",
verbose: true,
});
const createInput = `Create a meeting with John Doe next Friday at 4pm - adding to the agenda of it the result of 99 + 99`;
const createResult = await calendarAgent.invoke({ input: createInput });
// Create Result {
// output: 'A meeting with John Doe on 29th September at 4pm has been created and the result of 99 + 99 has been added to the agenda.'
// }
console.log("Create Result", createResult);
const viewInput = `What meetings do I have this week?`;
const viewResult = await calendarAgent.invoke({ input: viewInput });
// View Result {
// output: "You have no meetings this week between 8am and 8pm."
// }
console.log("View Result", viewResult);
}
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/tools/duckduckgo_search_simple.ts | import { DuckDuckGoSearch } from "@langchain/community/tools/duckduckgo_search";
// Instantiate the DuckDuckGoSearch tool.
const tool = new DuckDuckGoSearch({ maxResults: 1 });
// Get the results of a query by calling .invoke on the tool.
const result = await tool.invoke(
"What is Anthropic's estimated revenue for 2024?"
);
console.log(result);
/*
[{
"title": "Anthropic forecasts more than $850 mln in annualized revenue rate by ...",
"link": "https://www.reuters.com/technology/anthropic-forecasts-more-than-850-mln-annualized-revenue-rate-by-2024-end-report-2023-12-26/",
"snippet": "Dec 26 (Reuters) - Artificial intelligence startup <b>Anthropic</b> has projected it will generate more than $850 million in annualized <b>revenue</b> by the end of <b>2024</b>, the Information reported on Tuesday ..."
}]
*/
|
0 | lc_public_repos/langchainjs/examples/src/tools | lc_public_repos/langchainjs/examples/src/tools/azure_dynamic_sessions/azure_dynamic_sessions.ts | import { SessionsPythonREPLTool } from "@langchain/azure-dynamic-sessions";
const tool = new SessionsPythonREPLTool({
poolManagementEndpoint:
process.env.AZURE_CONTAINER_APP_SESSION_POOL_MANAGEMENT_ENDPOINT || "",
});
const result = await tool.invoke("print('Hello, World!')\n1+2");
console.log(result);
// {
// stdout: "Hello, World!\n",
// stderr: "",
// result: 3,
// }
|
0 | lc_public_repos/langchainjs/examples/src/tools | lc_public_repos/langchainjs/examples/src/tools/azure_dynamic_sessions/azure_dynamic_sessions-agent.ts | import type { ChatPromptTemplate } from "@langchain/core/prompts";
import { pull } from "langchain/hub";
import { AgentExecutor, createToolCallingAgent } from "langchain/agents";
import { SessionsPythonREPLTool } from "@langchain/azure-dynamic-sessions";
import { AzureChatOpenAI } from "@langchain/openai";
const tools = [
new SessionsPythonREPLTool({
poolManagementEndpoint:
process.env.AZURE_CONTAINER_APP_SESSION_POOL_MANAGEMENT_ENDPOINT || "",
}),
];
// Note: you need a model deployment that supports function calling,
// like `gpt-35-turbo` version `1106`.
const llm = new AzureChatOpenAI({
temperature: 0,
});
// Get the prompt to use - you can modify this!
// If you want to see the prompt in full, you can at:
// https://smith.langchain.com/hub/jacob/tool-calling-agent
const prompt = await pull<ChatPromptTemplate>("jacob/tool-calling-agent");
const agent = await createToolCallingAgent({
llm,
tools,
prompt,
});
const agentExecutor = new AgentExecutor({
agent,
tools,
});
const result = await agentExecutor.invoke({
input:
"Create a Python program that prints the Python version and return the result.",
});
console.log(result);
|
0 | lc_public_repos/langchainjs/examples/src/tools | lc_public_repos/langchainjs/examples/src/tools/azure_dynamic_sessions/.env.example | AZURE_CONTAINER_APP_SESSION_POOL_MANAGEMENT_ENDPOINT=<your_endpoint>
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/extraction/openai_tool_calling_extraction.ts | import { z } from "zod";
import { zodToJsonSchema } from "zod-to-json-schema";
import { ChatOpenAI } from "@langchain/openai";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { JsonOutputToolsParser } from "@langchain/core/output_parsers/openai_tools";
const EXTRACTION_TEMPLATE = `Extract and save the relevant entities mentioned \
in the following passage together with their properties.
If a property is not present and is not required in the function parameters, do not include it in the output.`;
const prompt = ChatPromptTemplate.fromMessages([
["system", EXTRACTION_TEMPLATE],
["human", "{input}"],
]);
const person = z.object({
name: z.string().describe("The person's name"),
age: z.string().describe("The person's age"),
});
const model = new ChatOpenAI({
model: "gpt-3.5-turbo-1106",
temperature: 0,
}).bind({
tools: [
{
type: "function",
function: {
name: "person",
description: "A person",
parameters: zodToJsonSchema(person),
},
},
],
});
const parser = new JsonOutputToolsParser();
const chain = prompt.pipe(model).pipe(parser);
const res = await chain.invoke({
input: "jane is 2 and bob is 3",
});
console.log(res);
/*
[
{ name: 'person', arguments: { name: 'jane', age: '2' } },
{ name: 'person', arguments: { name: 'bob', age: '3' } }
]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/embeddings/pinecone.ts | import { PineconeEmbeddings } from "@langchain/pinecone";
export const run = async () => {
const model = new PineconeEmbeddings();
console.log({ model }); // Prints out model metadata
const res = await model.embedQuery(
"What would be a good company name a company that makes colorful socks?"
);
console.log({ res });
};
await run();
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/embeddings/baidu_qianfan.ts | import { BaiduQianfanEmbeddings } from "@langchain/baidu-qianfan";
const embeddings = new BaiduQianfanEmbeddings();
const res = await embeddings.embedQuery(
"What would be a good company name a company that makes colorful socks?"
);
console.log({ res });
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/embeddings/bedrock.ts | /* eslint-disable @typescript-eslint/no-non-null-assertion */
import { BedrockEmbeddings } from "@langchain/aws";
const embeddings = new BedrockEmbeddings({
region: process.env.BEDROCK_AWS_REGION!,
credentials: {
accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,
},
model: "amazon.titan-embed-text-v1", // Default value
});
const res = await embeddings.embedQuery(
"What would be a good company name a company that makes colorful socks?"
);
console.log({ res });
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/embeddings/openai_dimensions.ts | import { OpenAIEmbeddings } from "@langchain/openai";
const embeddings = new OpenAIEmbeddings({
model: "text-embedding-3-large",
});
const vectors = await embeddings.embedDocuments(["some text"]);
console.log(vectors[0].length);
const embeddings1024 = new OpenAIEmbeddings({
model: "text-embedding-3-large",
dimensions: 1024,
});
const vectors2 = await embeddings1024.embedDocuments(["some text"]);
console.log(vectors2[0].length);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/embeddings/jina.ts | import { JinaEmbeddings } from "@langchain/community/embeddings/jina";
const model = new JinaEmbeddings({
apiKey: process.env.JINA_API_TOKEN,
model: "jina-embeddings-v2-base-en",
});
const embeddings = await model.embedQuery(
"Tell me a story about a dragon and a princess."
);
console.log(embeddings);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/embeddings/gradient_ai.ts | import { GradientEmbeddings } from "@langchain/community/embeddings/gradient_ai";
const model = new GradientEmbeddings({});
const res = await model.embedQuery(
"What would be a good company name a company that makes colorful socks?"
);
console.log({ res });
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/embeddings/alibaba_tongyi.ts | import { AlibabaTongyiEmbeddings } from "@langchain/community/embeddings/alibaba_tongyi";
const model = new AlibabaTongyiEmbeddings({});
const res = await model.embedQuery(
"What would be a good company name a company that makes colorful socks?"
);
console.log({ res });
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/embeddings/max_concurrency.ts | import { OpenAIEmbeddings } from "@langchain/openai";
export const run = async () => {
const model = new OpenAIEmbeddings({
maxConcurrency: 1,
});
const res = await model.embedQuery(
"What would be a good company name a company that makes colorful socks?"
);
console.log({ res });
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/embeddings/togetherai.ts | import { TogetherAIEmbeddings } from "@langchain/community/embeddings/togetherai";
const embeddings = new TogetherAIEmbeddings({
apiKey: process.env.TOGETHER_AI_API_KEY, // Default value
model: "togethercomputer/m2-bert-80M-8k-retrieval", // Default value
});
const res = await embeddings.embedQuery(
"What would be a good company name a company that makes colorful socks?"
);
console.log({ res });
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/embeddings/llama_cpp_basic.ts | import { LlamaCppEmbeddings } from "@langchain/community/embeddings/llama_cpp";
const llamaPath = "/Replace/with/path/to/your/model/gguf-llama2-q4_0.bin";
const embeddings = await LlamaCppEmbeddings.initialize({
modelPath: llamaPath,
});
const res = embeddings.embedQuery("Hello Llama!");
console.log(res);
/*
[ 15043, 365, 29880, 3304, 29991 ]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/embeddings/deepinfra.ts | import { DeepInfraEmbeddings } from "@langchain/community/embeddings/deepinfra";
const model = new DeepInfraEmbeddings({
apiToken: process.env.DEEPINFRA_API_TOKEN,
batchSize: 1024, // Default value
modelName: "sentence-transformers/clip-ViT-B-32", // Default value
});
const embeddings = await model.embedQuery(
"Tell me a story about a dragon and a princess."
);
console.log(embeddings);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/embeddings/llama_cpp_docs.ts | import { LlamaCppEmbeddings } from "@langchain/community/embeddings/llama_cpp";
const llamaPath = "/Replace/with/path/to/your/model/gguf-llama2-q4_0.bin";
const documents = ["Hello World!", "Bye Bye!"];
const embeddings = await LlamaCppEmbeddings.initialize({
modelPath: llamaPath,
});
const res = await embeddings.embedDocuments(documents);
console.log(res);
/*
[ [ 15043, 2787, 29991 ], [ 2648, 29872, 2648, 29872, 29991 ] ]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/embeddings/cache_backed_redis.ts | import { Redis } from "ioredis";
import { OpenAIEmbeddings } from "@langchain/openai";
import { CacheBackedEmbeddings } from "langchain/embeddings/cache_backed";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import { FaissStore } from "@langchain/community/vectorstores/faiss";
import { RedisByteStore } from "@langchain/community/storage/ioredis";
import { TextLoader } from "langchain/document_loaders/fs/text";
const underlyingEmbeddings = new OpenAIEmbeddings();
// Requires a Redis instance running at http://localhost:6379.
// See https://github.com/redis/ioredis for full config options.
const redisClient = new Redis();
const redisStore = new RedisByteStore({
client: redisClient,
});
const cacheBackedEmbeddings = CacheBackedEmbeddings.fromBytesStore(
underlyingEmbeddings,
redisStore,
{
namespace: underlyingEmbeddings.modelName,
}
);
const loader = new TextLoader("./state_of_the_union.txt");
const rawDocuments = await loader.load();
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 1000,
chunkOverlap: 0,
});
const documents = await splitter.splitDocuments(rawDocuments);
let time = Date.now();
const vectorstore = await FaissStore.fromDocuments(
documents,
cacheBackedEmbeddings
);
console.log(`Initial creation time: ${Date.now() - time}ms`);
/*
Initial creation time: 1808ms
*/
// The second time is much faster since the embeddings for the input docs have already been added to the cache
time = Date.now();
const vectorstore2 = await FaissStore.fromDocuments(
documents,
cacheBackedEmbeddings
);
console.log(`Cached creation time: ${Date.now() - time}ms`);
/*
Cached creation time: 33ms
*/
// Many keys logged with hashed values
const keys = [];
for await (const key of redisStore.yieldKeys()) {
keys.push(key);
}
console.log(keys.slice(0, 5));
/*
[
'text-embedding-ada-002fa9ac80e1bf226b7b4dfc03ea743289a65a727b2',
'text-embedding-ada-0027dbf9c4b36e12fe1768300f145f4640342daaf22',
'text-embedding-ada-002ea9b59e760e64bec6ee9097b5a06b0d91cb3ab64',
'text-embedding-ada-002fec5d021611e1527297c5e8f485876ea82dcb111',
'text-embedding-ada-002c00f818c345da13fed9f2697b4b689338143c8c7'
]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/embeddings/premai.ts | import { PremEmbeddings } from "@langchain/community/embeddings/premai";
const embeddings = new PremEmbeddings({
// In Node.js defaults to process.env.PREM_API_KEY
apiKey: "YOUR-API-KEY",
// In Node.js defaults to process.env.PREM_PROJECT_ID
project_id: "YOUR-PROJECT_ID",
model: "@cf/baai/bge-small-en-v1.5", // The model to generate the embeddings
});
const res = await embeddings.embedQuery(
"What would be a good company name a company that makes colorful socks?"
);
console.log({ res });
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/embeddings/cache_backed_in_memory.ts | import { OpenAIEmbeddings } from "@langchain/openai";
import { CacheBackedEmbeddings } from "langchain/embeddings/cache_backed";
import { InMemoryStore } from "@langchain/core/stores";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import { FaissStore } from "@langchain/community/vectorstores/faiss";
import { TextLoader } from "langchain/document_loaders/fs/text";
const underlyingEmbeddings = new OpenAIEmbeddings();
const inMemoryStore = new InMemoryStore();
const cacheBackedEmbeddings = CacheBackedEmbeddings.fromBytesStore(
underlyingEmbeddings,
inMemoryStore,
{
namespace: underlyingEmbeddings.modelName,
}
);
const loader = new TextLoader("./state_of_the_union.txt");
const rawDocuments = await loader.load();
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 1000,
chunkOverlap: 0,
});
const documents = await splitter.splitDocuments(rawDocuments);
// No keys logged yet since the cache is empty
for await (const key of inMemoryStore.yieldKeys()) {
console.log(key);
}
let time = Date.now();
const vectorstore = await FaissStore.fromDocuments(
documents,
cacheBackedEmbeddings
);
console.log(`Initial creation time: ${Date.now() - time}ms`);
/*
Initial creation time: 1905ms
*/
// The second time is much faster since the embeddings for the input docs have already been added to the cache
time = Date.now();
const vectorstore2 = await FaissStore.fromDocuments(
documents,
cacheBackedEmbeddings
);
console.log(`Cached creation time: ${Date.now() - time}ms`);
/*
Cached creation time: 8ms
*/
// Many keys logged with hashed values
const keys = [];
for await (const key of inMemoryStore.yieldKeys()) {
keys.push(key);
}
console.log(keys.slice(0, 5));
/*
[
'text-embedding-ada-002ea9b59e760e64bec6ee9097b5a06b0d91cb3ab64',
'text-embedding-ada-0023b424f5ed1271a6f5601add17c1b58b7c992772e',
'text-embedding-ada-002fec5d021611e1527297c5e8f485876ea82dcb111',
'text-embedding-ada-00262f72e0c2d711c6b861714ee624b28af639fdb13',
'text-embedding-ada-00262d58882330038a4e6e25ea69a938f4391541874'
]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/embeddings/openai.ts | import { OpenAIEmbeddings } from "@langchain/openai";
const model = new OpenAIEmbeddings();
const res = await model.embedQuery(
"What would be a good company name a company that makes colorful socks?"
);
console.log({ res });
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/embeddings/cohere.ts | import { CohereEmbeddings } from "@langchain/cohere";
export const run = async () => {
const model = new CohereEmbeddings({ model: "embed-english-v3.0" });
const res = await model.embedQuery(
"What would be a good company name a company that makes colorful socks?"
);
console.log({ res });
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/embeddings/zhipuai.ts | import { ZhipuAIEmbeddings } from "@langchain/community/embeddings/zhipuai";
const model = new ZhipuAIEmbeddings({});
const res = await model.embedQuery(
"What would be a good company name a company that makes colorful socks?"
);
console.log({ res });
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/embeddings/azure_openai.ts | import { AzureOpenAIEmbeddings } from "@langchain/openai";
const model = new AzureOpenAIEmbeddings();
const res = await model.embedQuery(
"What would be a good company name for a company that makes colorful socks?"
);
console.log({ res });
|
0 | lc_public_repos/langchainjs/examples/src/embeddings | lc_public_repos/langchainjs/examples/src/embeddings/convex/cache_backed_convex.ts | "use node";
import { TextLoader } from "langchain/document_loaders/fs/text";
import { CacheBackedEmbeddings } from "langchain/embeddings/cache_backed";
import { OpenAIEmbeddings } from "@langchain/openai";
import { ConvexKVStore } from "@langchain/community/storage/convex";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import { ConvexVectorStore } from "@langchain/community/vectorstores/convex";
import { action } from "./_generated/server.js";
export const ask = action({
args: {},
handler: async (ctx) => {
const underlyingEmbeddings = new OpenAIEmbeddings();
const cacheBackedEmbeddings = CacheBackedEmbeddings.fromBytesStore(
underlyingEmbeddings,
new ConvexKVStore({ ctx }),
{
namespace: underlyingEmbeddings.modelName,
}
);
const loader = new TextLoader("./state_of_the_union.txt");
const rawDocuments = await loader.load();
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 1000,
chunkOverlap: 0,
});
const documents = await splitter.splitDocuments(rawDocuments);
let time = Date.now();
const vectorstore = await ConvexVectorStore.fromDocuments(
documents,
cacheBackedEmbeddings,
{ ctx }
);
console.log(`Initial creation time: ${Date.now() - time}ms`);
/*
Initial creation time: 1808ms
*/
// The second time is much faster since the embeddings for the input docs have already been added to the cache
time = Date.now();
const vectorstore2 = await ConvexVectorStore.fromDocuments(
documents,
cacheBackedEmbeddings,
{ ctx }
);
console.log(`Cached creation time: ${Date.now() - time}ms`);
/*
Cached creation time: 33ms
*/
},
});
|
0 | lc_public_repos/langchainjs/examples/src/embeddings/convex | lc_public_repos/langchainjs/examples/src/embeddings/convex/_generated/server.d.ts | /* eslint-disable */
/**
* Generated utilities for implementing server-side Convex query and mutation functions.
*
* THIS CODE IS AUTOMATICALLY GENERATED.
*
* Generated by convex@1.3.1.
* To regenerate, run `npx convex dev`.
* @module
*/
import {
ActionBuilder,
HttpActionBuilder,
MutationBuilder,
QueryBuilder,
GenericActionCtx,
GenericMutationCtx,
GenericQueryCtx,
GenericDatabaseReader,
GenericDatabaseWriter,
} from "convex/server";
import type { DataModel } from "./dataModel.js";
/**
* Define a query in this Convex app's public API.
*
* This function will be allowed to read your Convex database and will be accessible from the client.
*
* @param func - The query function. It receives a {@link QueryCtx} as its first argument.
* @returns The wrapped query. Include this as an `export` to name it and make it accessible.
*/
export declare const query: QueryBuilder<DataModel, "public">;
/**
* Define a query that is only accessible from other Convex functions (but not from the client).
*
* This function will be allowed to read from your Convex database. It will not be accessible from the client.
*
* @param func - The query function. It receives a {@link QueryCtx} as its first argument.
* @returns The wrapped query. Include this as an `export` to name it and make it accessible.
*/
export declare const internalQuery: QueryBuilder<DataModel, "internal">;
/**
* Define a mutation in this Convex app's public API.
*
* This function will be allowed to modify your Convex database and will be accessible from the client.
*
* @param func - The mutation function. It receives a {@link MutationCtx} as its first argument.
* @returns The wrapped mutation. Include this as an `export` to name it and make it accessible.
*/
export declare const mutation: MutationBuilder<DataModel, "public">;
/**
* Define a mutation that is only accessible from other Convex functions (but not from the client).
*
* This function will be allowed to modify your Convex database. It will not be accessible from the client.
*
* @param func - The mutation function. It receives a {@link MutationCtx} as its first argument.
* @returns The wrapped mutation. Include this as an `export` to name it and make it accessible.
*/
export declare const internalMutation: MutationBuilder<DataModel, "internal">;
/**
* Define an action in this Convex app's public API.
*
* An action is a function which can execute any JavaScript code, including non-deterministic
* code and code with side-effects, like calling third-party services.
* They can be run in Convex's JavaScript environment or in Node.js using the "use node" directive.
* They can interact with the database indirectly by calling queries and mutations using the {@link ActionCtx}.
*
* @param func - The action. It receives an {@link ActionCtx} as its first argument.
* @returns The wrapped action. Include this as an `export` to name it and make it accessible.
*/
export declare const action: ActionBuilder<DataModel, "public">;
/**
* Define an action that is only accessible from other Convex functions (but not from the client).
*
* @param func - The function. It receives an {@link ActionCtx} as its first argument.
* @returns The wrapped function. Include this as an `export` to name it and make it accessible.
*/
export declare const internalAction: ActionBuilder<DataModel, "internal">;
/**
* Define an HTTP action.
*
* This function will be used to respond to HTTP requests received by a Convex
* deployment if the requests matches the path and method where this action
* is routed. Be sure to route your action in `convex/http.js`.
*
* @param func - The function. It receives an {@link ActionCtx} as its first argument.
* @returns The wrapped function. Import this function from `convex/http.js` and route it to hook it up.
*/
export declare const httpAction: HttpActionBuilder;
/**
* A set of services for use within Convex query functions.
*
* The query context is passed as the first argument to any Convex query
* function run on the server.
*
* This differs from the {@link MutationCtx} because all of the services are
* read-only.
*/
export type QueryCtx = GenericQueryCtx<DataModel>;
/**
* A set of services for use within Convex mutation functions.
*
* The mutation context is passed as the first argument to any Convex mutation
* function run on the server.
*/
export type MutationCtx = GenericMutationCtx<DataModel>;
/**
* A set of services for use within Convex action functions.
*
* The action context is passed as the first argument to any Convex action
* function run on the server.
*/
export type ActionCtx = GenericActionCtx<DataModel>;
/**
* An interface to read from the database within Convex query functions.
*
* The two entry points are {@link DatabaseReader.get}, which fetches a single
* document by its {@link Id}, or {@link DatabaseReader.query}, which starts
* building a query.
*/
export type DatabaseReader = GenericDatabaseReader<DataModel>;
/**
* An interface to read from and write to the database within Convex mutation
* functions.
*
* Convex guarantees that all writes within a single mutation are
* executed atomically, so you never have to worry about partial writes leaving
* your data in an inconsistent state. See [the Convex Guide](https://docs.convex.dev/understanding/convex-fundamentals/functions#atomicity-and-optimistic-concurrency-control)
* for the guarantees Convex provides your functions.
*/
export type DatabaseWriter = GenericDatabaseWriter<DataModel>;
|
0 | lc_public_repos/langchainjs/examples/src/embeddings/convex | lc_public_repos/langchainjs/examples/src/embeddings/convex/_generated/api.js | /* eslint-disable */
/**
* Generated `api` utility.
*
* THIS CODE IS AUTOMATICALLY GENERATED.
*
* Generated by convex@1.3.1.
* To regenerate, run `npx convex dev`.
* @module
*/
import { anyApi } from "convex/server";
/**
* A utility for referencing Convex functions in your app's API.
*
* Usage:
* ```js
* const myFunctionReference = api.myModule.myFunction;
* ```
*/
export const api = anyApi;
export const internal = anyApi;
|
0 | lc_public_repos/langchainjs/examples/src/embeddings/convex | lc_public_repos/langchainjs/examples/src/embeddings/convex/_generated/server.js | /* eslint-disable */
/**
* Generated utilities for implementing server-side Convex query and mutation functions.
*
* THIS CODE IS AUTOMATICALLY GENERATED.
*
* Generated by convex@1.3.1.
* To regenerate, run `npx convex dev`.
* @module
*/
import {
actionGeneric,
httpActionGeneric,
queryGeneric,
mutationGeneric,
internalActionGeneric,
internalMutationGeneric,
internalQueryGeneric,
} from "convex/server";
/**
* Define a query in this Convex app's public API.
*
* This function will be allowed to read your Convex database and will be accessible from the client.
*
* @param func - The query function. It receives a {@link QueryCtx} as its first argument.
* @returns The wrapped query. Include this as an `export` to name it and make it accessible.
*/
export const query = queryGeneric;
/**
* Define a query that is only accessible from other Convex functions (but not from the client).
*
* This function will be allowed to read from your Convex database. It will not be accessible from the client.
*
* @param func - The query function. It receives a {@link QueryCtx} as its first argument.
* @returns The wrapped query. Include this as an `export` to name it and make it accessible.
*/
export const internalQuery = internalQueryGeneric;
/**
* Define a mutation in this Convex app's public API.
*
* This function will be allowed to modify your Convex database and will be accessible from the client.
*
* @param func - The mutation function. It receives a {@link MutationCtx} as its first argument.
* @returns The wrapped mutation. Include this as an `export` to name it and make it accessible.
*/
export const mutation = mutationGeneric;
/**
* Define a mutation that is only accessible from other Convex functions (but not from the client).
*
* This function will be allowed to modify your Convex database. It will not be accessible from the client.
*
* @param func - The mutation function. It receives a {@link MutationCtx} as its first argument.
* @returns The wrapped mutation. Include this as an `export` to name it and make it accessible.
*/
export const internalMutation = internalMutationGeneric;
/**
* Define an action in this Convex app's public API.
*
* An action is a function which can execute any JavaScript code, including non-deterministic
* code and code with side-effects, like calling third-party services.
* They can be run in Convex's JavaScript environment or in Node.js using the "use node" directive.
* They can interact with the database indirectly by calling queries and mutations using the {@link ActionCtx}.
*
* @param func - The action. It receives an {@link ActionCtx} as its first argument.
* @returns The wrapped action. Include this as an `export` to name it and make it accessible.
*/
export const action = actionGeneric;
/**
* Define an action that is only accessible from other Convex functions (but not from the client).
*
* @param func - The function. It receives an {@link ActionCtx} as its first argument.
* @returns The wrapped function. Include this as an `export` to name it and make it accessible.
*/
export const internalAction = internalActionGeneric;
/**
* Define a Convex HTTP action.
*
* @param func - The function. It receives an {@link ActionCtx} as its first argument, and a `Request` object
* as its second.
* @returns The wrapped endpoint function. Route a URL path to this function in `convex/http.js`.
*/
export const httpAction = httpActionGeneric;
|
0 | lc_public_repos/langchainjs/examples/src/embeddings/convex | lc_public_repos/langchainjs/examples/src/embeddings/convex/_generated/dataModel.d.ts | /* eslint-disable */
/**
* Generated data model types.
*
* THIS CODE IS AUTOMATICALLY GENERATED.
*
* Generated by convex@1.3.1.
* To regenerate, run `npx convex dev`.
* @module
*/
import { AnyDataModel } from "convex/server";
import type { GenericId } from "convex/values";
/**
* No `schema.ts` file found!
*
* This generated code has permissive types like `Doc = any` because
* Convex doesn't know your schema. If you'd like more type safety, see
* https://docs.convex.dev/using/schemas for instructions on how to add a
* schema file.
*
* After you change a schema, rerun codegen with `npx convex dev`.
*/
/**
* The names of all of your Convex tables.
*/
export type TableNames = string;
/**
* The type of a document stored in Convex.
*/
export type Doc = any;
/**
* An identifier for a document in Convex.
*
* Convex documents are uniquely identified by their `Id`, which is accessible
* on the `_id` field. To learn more, see [Document IDs](https://docs.convex.dev/using/document-ids).
*
* Documents can be loaded using `db.get(id)` in query and mutation functions.
*
* IDs are just strings at runtime, but this type can be used to distinguish them from other
* strings when type checking.
*/
export type Id<TableName extends TableNames = TableNames> =
GenericId<TableName>;
/**
* A type describing your Convex data model.
*
* This type includes information about what tables you have, the type of
* documents stored in those tables, and the indexes defined on them.
*
* This type is used to parameterize methods like `queryGeneric` and
* `mutationGeneric` to make them type-safe.
*/
export type DataModel = AnyDataModel;
|
0 | lc_public_repos/langchainjs/examples/src/embeddings/convex | lc_public_repos/langchainjs/examples/src/embeddings/convex/_generated/api.d.ts | /* eslint-disable */
/**
* Generated `api` utility.
*
* THIS CODE IS AUTOMATICALLY GENERATED.
*
* Generated by convex@1.3.1.
* To regenerate, run `npx convex dev`.
* @module
*/
import type {
ApiFromModules,
FilterApi,
FunctionReference,
} from "convex/server";
/**
* A utility for referencing Convex functions in your app's API.
*
* Usage:
* ```js
* const myFunctionReference = api.myModule.myFunction;
* ```
*/
declare const fullApi: ApiFromModules<{}>;
export declare const api: FilterApi<
typeof fullApi,
FunctionReference<any, "public">
>;
export declare const internal: FilterApi<
typeof fullApi,
FunctionReference<any, "internal">
>;
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/googlegenerativeai.ts | import { ChatGoogleGenerativeAI } from "@langchain/google-genai";
import { HarmBlockThreshold, HarmCategory } from "@google/generative-ai";
/*
* Before running this, you should make sure you have created a
* Google Cloud Project that has `generativelanguage` API enabled.
*
* You will also need to generate an API key and set
* an environment variable GOOGLE_API_KEY
*
*/
// Text
const model = new ChatGoogleGenerativeAI({
model: "gemini-pro",
maxOutputTokens: 2048,
safetySettings: [
{
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
},
],
});
// Batch and stream are also supported
const res = await model.invoke([
[
"human",
"What would be a good company name for a company that makes colorful socks?",
],
]);
console.log(res);
/*
AIMessage {
content: '1. Rainbow Soles\n' +
'2. Toe-tally Colorful\n' +
'3. Bright Sock Creations\n' +
'4. Hue Knew Socks\n' +
'5. The Happy Sock Factory\n' +
'6. Color Pop Hosiery\n' +
'7. Sock It to Me!\n' +
'8. Mismatched Masterpieces\n' +
'9. Threads of Joy\n' +
'10. Funky Feet Emporium\n' +
'11. Colorful Threads\n' +
'12. Sole Mates\n' +
'13. Colorful Soles\n' +
'14. Sock Appeal\n' +
'15. Happy Feet Unlimited\n' +
'16. The Sock Stop\n' +
'17. The Sock Drawer\n' +
'18. Sole-diers\n' +
'19. Footloose Footwear\n' +
'20. Step into Color',
name: 'model',
additional_kwargs: {}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_llama_cpp_stream_multi.ts | import { ChatLlamaCpp } from "@langchain/community/chat_models/llama_cpp";
import { SystemMessage, HumanMessage } from "@langchain/core/messages";
const llamaPath = "/Replace/with/path/to/your/model/gguf-llama2-q4_0.bin";
const llamaCpp = await ChatLlamaCpp.initialize({
modelPath: llamaPath,
temperature: 0.7,
});
const stream = await llamaCpp.stream([
new SystemMessage(
"You are a pirate, responses must be very verbose and in pirate dialect."
),
new HumanMessage("Tell me about Llamas?"),
]);
for await (const chunk of stream) {
console.log(chunk.content);
}
/*
Ar
rr
r
,
me
heart
y
!
Ye
be
ask
in
'
about
llam
as
,
e
h
?
...
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_openai_callbacks.ts | import { ChatOpenAI } from "@langchain/openai";
// See https://cookbook.openai.com/examples/using_logprobs for details
const model = new ChatOpenAI({
logprobs: true,
// topLogprobs: 5,
});
const result = await model.invoke("Hi there!", {
callbacks: [
{
handleLLMEnd(output) {
console.log("GENERATION OUTPUT:", JSON.stringify(output, null, 2));
},
},
],
});
console.log("FINAL OUTPUT", result);
/*
GENERATION OUTPUT: {
"generations": [
[
{
"text": "Hello! How can I assist you today?",
"message": {
"lc": 1,
"type": "constructor",
"id": [
"langchain_core",
"messages",
"AIMessage"
],
"kwargs": {
"content": "Hello! How can I assist you today?",
"additional_kwargs": {}
}
},
"generationInfo": {
"finish_reason": "stop",
"logprobs": {
"content": [
{
"token": "Hello",
"logprob": -0.0010195904,
"bytes": [
72,
101,
108,
108,
111
],
"top_logprobs": []
},
{
"token": "!",
"logprob": -0.0004447316,
"bytes": [
33
],
"top_logprobs": []
},
{
"token": " How",
"logprob": -0.00006682846,
"bytes": [
32,
72,
111,
119
],
"top_logprobs": []
},
...
]
}
}
}
]
],
"llmOutput": {
"tokenUsage": {
"completionTokens": 9,
"promptTokens": 10,
"totalTokens": 19
}
}
}
FINAL OUTPUT AIMessage {
content: 'Hello! How can I assist you today?',
additional_kwargs: { function_call: undefined, tool_calls: undefined }
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/llm_caching.ts | import { OpenAI } from "@langchain/openai";
// To make the caching really obvious, lets use a slower model.
const model = new OpenAI({
model: "gpt-3.5-turbo-instruct",
cache: true,
});
console.time();
// The first time, it is not yet in cache, so it should take longer
const res = await model.invoke("Tell me a long joke!");
console.log(res);
console.timeEnd();
console.time();
// The second time it is, so it goes faster
const res2 = await model.invoke("Tell me a long joke!");
console.log(res2);
console.timeEnd();
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/chat_mistralai_agents.ts | import { z } from "zod";
import { ChatMistralAI } from "@langchain/mistralai";
import { tool } from "@langchain/core/tools";
import { AgentExecutor, createToolCallingAgent } from "langchain/agents";
import { ChatPromptTemplate } from "@langchain/core/prompts";
const llm = new ChatMistralAI({
temperature: 0,
model: "mistral-large-latest",
});
// Prompt template must have "input" and "agent_scratchpad input variables"
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["placeholder", "{chat_history}"],
["human", "{input}"],
["placeholder", "{agent_scratchpad}"],
]);
// Mocked tool
const currentWeatherTool = tool(async () => "28 °C", {
name: "get_current_weather",
description: "Get the current weather in a given location",
schema: z.object({
location: z.string().describe("The city and state, e.g. San Francisco, CA"),
}),
});
const agent = await createToolCallingAgent({
llm,
tools: [currentWeatherTool],
prompt,
});
const agentExecutor = new AgentExecutor({
agent,
tools: [currentWeatherTool],
});
const input = "What's the weather like in Paris?";
const { output } = await agentExecutor.invoke({ input });
console.log(output);
/*
The current weather in Paris is 28 °C.
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_qianfan.ts | import { ChatBaiduWenxin } from "@langchain/community/chat_models/baiduwenxin";
import { HumanMessage } from "@langchain/core/messages";
// Default model is ERNIE-Bot-turbo
const ernieTurbo = new ChatBaiduWenxin({
baiduApiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.BAIDU_API_KEY
baiduSecretKey: "YOUR-SECRET-KEY", // In Node.js defaults to process.env.BAIDU_SECRET_KEY
});
// Use ERNIE-Bot
const ernie = new ChatBaiduWenxin({
model: "ERNIE-Bot", // Available models are shown above
temperature: 1,
baiduApiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.BAIDU_API_KEY
baiduSecretKey: "YOUR-SECRET-KEY", // In Node.js defaults to process.env.BAIDU_SECRET_KEY
});
const messages = [new HumanMessage("Hello")];
let res = await ernieTurbo.invoke(messages);
/*
AIChatMessage {
text: 'Hello! How may I assist you today?',
name: undefined,
additional_kwargs: {}
}
}
*/
res = await ernie.invoke(messages);
/*
AIChatMessage {
text: 'Hello! How may I assist you today?',
name: undefined,
additional_kwargs: {}
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/minimax_functions.ts | import { ChatMinimax } from "@langchain/community/chat_models/minimax";
import { HumanMessage } from "@langchain/core/messages";
const functionSchema = {
name: "get_weather",
description: " Get weather information.",
parameters: {
type: "object",
properties: {
location: {
type: "string",
description: " The location to get the weather",
},
},
required: ["location"],
},
};
// Bind function arguments to the model.
// All subsequent invoke calls will use the bound parameters.
// "functions.parameters" must be formatted as JSON Schema
const model = new ChatMinimax({
botSetting: [
{
bot_name: "MM Assistant",
content: "MM Assistant is an AI Assistant developed by minimax.",
},
],
}).bind({
functions: [functionSchema],
});
const result = await model.invoke([
new HumanMessage({
content: " What is the weather like in NewYork tomorrow?",
name: "I",
}),
]);
console.log(result);
/*
AIMessage {
lc_serializable: true,
lc_kwargs: { content: '', additional_kwargs: { function_call: [Object] } },
lc_namespace: [ 'langchain', 'schema' ],
content: '',
name: undefined,
additional_kwargs: {
function_call: { name: 'get_weather', arguments: '{"location": "NewYork"}' }
}
}
*/
// Alternatively, you can pass function call arguments as an additional argument as a one-off:
const minimax = new ChatMinimax({
model: "abab5.5-chat",
botSetting: [
{
bot_name: "MM Assistant",
content: "MM Assistant is an AI Assistant developed by minimax.",
},
],
});
const result2 = await minimax.invoke(
[new HumanMessage("What is the weather like in NewYork tomorrow?")],
{
functions: [functionSchema],
}
);
console.log(result2);
/*
AIMessage {
lc_serializable: true,
lc_kwargs: { content: '', additional_kwargs: { function_call: [Object] } },
lc_namespace: [ 'langchain', 'schema' ],
content: '',
name: undefined,
additional_kwargs: {
function_call: { name: 'get_weather', arguments: '{"location": "NewYork"}' }
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/token_usage_tracking.ts | import { ChatOpenAI } from "@langchain/openai";
const chatModel = new ChatOpenAI({
model: "gpt-4-turbo",
});
const res = await chatModel.invoke("Tell me a joke.");
console.log(res.response_metadata);
/*
{
tokenUsage: { completionTokens: 15, promptTokens: 12, totalTokens: 27 },
finish_reason: 'stop'
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_togetherai.ts | import { ChatTogetherAI } from "@langchain/community/chat_models/togetherai";
import { HumanMessage } from "@langchain/core/messages";
const model = new ChatTogetherAI({
temperature: 0.9,
// In Node.js defaults to process.env.TOGETHER_AI_API_KEY
apiKey: "YOUR-API-KEY",
});
console.log(await model.invoke([new HumanMessage("Hello there!")]));
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_tencent_hunyuan.ts | // in nodejs environment
import { ChatTencentHunyuan } from "@langchain/community/chat_models/tencent_hunyuan";
// in browser environment
// import { ChatTencentHunyuan } from "@langchain/community/chat_models/tencent_hunyuan/web";
import { HumanMessage } from "@langchain/core/messages";
import type { LLMResult } from "@langchain/core/outputs";
const messages = [new HumanMessage("Hello")];
// Default model is hunyuan-pro
const hunyuanPro = new ChatTencentHunyuan({
streaming: false,
temperature: 1,
});
let res = await hunyuanPro.invoke(messages);
console.log(res);
/*
AIMessage {
content: 'Hello! How can I help you today?Is there anything I can do for you?',
name: undefined,
additional_kwargs: {},
response_metadata: {
tokenUsage: { totalTokens: 20, promptTokens: 1, completionTokens: 19 }
},
tool_calls: [],
invalid_tool_calls: []
}
*/
// Use hunyuan-lite
const hunyuanLite = new ChatTencentHunyuan({
model: "hunyuan-lite",
streaming: false,
});
res = await hunyuanLite.invoke(messages);
console.log(res);
/*
AIMessage {
content: '你好!很高兴为你提供服务~有什么我可以帮助你的吗?',
name: undefined,
additional_kwargs: {},
response_metadata: {
tokenUsage: { totalTokens: 14, promptTokens: 1, completionTokens: 13 }
},
tool_calls: [],
invalid_tool_calls: []
}
*/
// Use hunyuan-lite with streaming
const hunyuanLiteStream = new ChatTencentHunyuan({
model: "hunyuan-lite",
streaming: true,
temperature: 1,
});
hunyuanLiteStream.invoke(messages, {
callbacks: [
{
handleLLMEnd(output: LLMResult) {
console.log(output);
/*
{
generations: [
[
[Object], [Object],
[Object], [Object],
[Object], [Object],
[Object], [Object],
[Object]
]
],
llmOutput: {
tokenUsage: { totalTokens: 9, promptTokens: 1, completionTokens: 8 }
}
}
*/
},
handleLLMNewToken(token: string) {
console.log(`token: ${token}`);
/*
token: 你好
token: !
token: 很高兴
token: 能
token: 为您
token: 解答
token: 问题
token: 和建议
token: 方案
token: .
token: 如果您
token: 有其他
token: 需要帮助
token: 的地方
token: ,
token:
token: 随时
token: 告诉我
token: 哦
token: ~
token:
*/
},
},
],
});
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/minimax_chain.ts | import { LLMChain } from "langchain/chains";
import { ChatMinimax } from "@langchain/community/chat_models/minimax";
import {
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
} from "@langchain/core/prompts";
// We can also construct an LLMChain from a ChatPromptTemplate and a chat model.
const chat = new ChatMinimax({ temperature: 0.01 });
const chatPrompt = ChatPromptTemplate.fromMessages([
SystemMessagePromptTemplate.fromTemplate(
"You are a helpful assistant that translates {input_language} to {output_language}."
),
HumanMessagePromptTemplate.fromTemplate("{text}"),
]);
const chainB = new LLMChain({
prompt: chatPrompt,
llm: chat,
});
const resB = await chainB.invoke({
input_language: "English",
output_language: "Chinese",
text: "I love programming.",
});
console.log({ resB });
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/chat_timeout.ts | import { ChatOpenAI } from "@langchain/openai";
import { HumanMessage } from "@langchain/core/messages";
const chat = new ChatOpenAI({ temperature: 1 });
const response = await chat.invoke(
[
new HumanMessage(
"What is a good name for a company that makes colorful socks?"
),
],
{ timeout: 1000 } // 1s timeout
);
console.log(response);
// AIMessage { text: '\n\nRainbow Sox Co.' }
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_llama_cpp_stream.ts | import { ChatLlamaCpp } from "@langchain/community/chat_models/llama_cpp";
const llamaPath = "/Replace/with/path/to/your/model/gguf-llama2-q4_0.bin";
const model = await ChatLlamaCpp.initialize({
modelPath: llamaPath,
temperature: 0.7,
});
const stream = await model.stream("Tell me a short story about a happy Llama.");
for await (const chunk of stream) {
console.log(chunk.content);
}
/*
Once
upon
a
time
,
in
a
green
and
sunny
field
...
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_ollama_wso.ts | import { ChatOllama } from "@langchain/ollama";
import { z } from "zod";
// Define the model
const model = new ChatOllama({
model: "llama3-groq-tool-use",
});
// Define the tool schema you'd like the model to use.
const schema = z.object({
location: z.string().describe("The city and state, e.g. San Francisco, CA"),
});
// Pass the schema to the withStructuredOutput method to bind it to the model.
const modelWithTools = model.withStructuredOutput(schema, {
name: "get_current_weather",
});
const result = await modelWithTools.invoke(
"What's the weather like today in San Francisco? Ensure you use the 'get_current_weather' tool."
);
console.log(result);
/*
{ location: 'San Francisco, CA' }
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/chat_mistralai_wsa.ts | import { ChatMistralAI } from "@langchain/mistralai";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { z } from "zod";
const calculatorSchema = z
.object({
operation: z
.enum(["add", "subtract", "multiply", "divide"])
.describe("The type of operation to execute."),
number1: z.number().describe("The first number to operate on."),
number2: z.number().describe("The second number to operate on."),
})
.describe("A simple calculator tool");
const model = new ChatMistralAI({
apiKey: process.env.MISTRAL_API_KEY,
model: "mistral-large-latest",
});
// Pass the schema and tool name to the withStructuredOutput method
const modelWithTool = model.withStructuredOutput(calculatorSchema);
const prompt = ChatPromptTemplate.fromMessages([
[
"system",
"You are a helpful assistant who always needs to use a calculator.",
],
["human", "{input}"],
]);
// Chain your prompt and model together
const chain = prompt.pipe(modelWithTool);
const response = await chain.invoke({
input: "What is 2 + 2?",
});
console.log(response);
/*
{ operation: 'add', number1: 2, number2: 2 }
*/
/**
* You can supply a "name" field to give the LLM additional context
* around what you are trying to generate. You can also pass
* 'includeRaw' to get the raw message back from the model too.
*/
const includeRawModel = model.withStructuredOutput(calculatorSchema, {
name: "calculator",
includeRaw: true,
});
const includeRawChain = prompt.pipe(includeRawModel);
const includeRawResponse = await includeRawChain.invoke({
input: "What is 2 + 2?",
});
console.log(JSON.stringify(includeRawResponse, null, 2));
/*
{
"raw": {
"kwargs": {
"content": "",
"additional_kwargs": {
"tool_calls": [
{
"id": "null",
"type": "function",
"function": {
"name": "calculator",
"arguments": "{\"operation\": \"add\", \"number1\": 2, \"number2\": 2}"
}
}
]
}
}
},
"parsed": {
"operation": "add",
"number1": 2,
"number2": 2
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_fake.ts | import { FakeListChatModel } from "@langchain/core/utils/testing";
import { HumanMessage } from "@langchain/core/messages";
import { StringOutputParser } from "@langchain/core/output_parsers";
/**
* The FakeListChatModel can be used to simulate ordered predefined responses.
*/
const chat = new FakeListChatModel({
responses: ["I'll callback later.", "You 'console' them!"],
});
const firstMessage = new HumanMessage("You want to hear a JavasSript joke?");
const secondMessage = new HumanMessage(
"How do you cheer up a JavaScript developer?"
);
const firstResponse = await chat.invoke([firstMessage]);
const secondResponse = await chat.invoke([secondMessage]);
console.log({ firstResponse });
console.log({ secondResponse });
/**
* The FakeListChatModel can also be used to simulate streamed responses.
*/
const stream = await chat
.pipe(new StringOutputParser())
.stream(`You want to hear a JavasSript joke?`);
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
console.log(chunks.join(""));
/**
* The FakeListChatModel can also be used to simulate delays in either either synchronous or streamed responses.
*/
const slowChat = new FakeListChatModel({
responses: ["Because Oct 31 equals Dec 25", "You 'console' them!"],
sleep: 1000,
});
const thirdMessage = new HumanMessage(
"Why do programmers always mix up Halloween and Christmas?"
);
const slowResponse = await slowChat.invoke([thirdMessage]);
console.log({ slowResponse });
const slowStream = await slowChat
.pipe(new StringOutputParser())
.stream("How do you cheer up a JavaScript developer?");
const slowChunks = [];
for await (const chunk of slowStream) {
slowChunks.push(chunk);
}
console.log(slowChunks.join(""));
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_ollama_multimodal.ts | import { ChatOllama } from "@langchain/ollama";
import { HumanMessage } from "@langchain/core/messages";
import * as fs from "node:fs/promises";
const imageData = await fs.readFile("./hotdog.jpg");
const chat = new ChatOllama({
model: "llava",
baseUrl: "http://127.0.0.1:11434",
});
const res = await chat.invoke([
new HumanMessage({
content: [
{
type: "text",
text: "What is in this image?",
},
{
type: "image_url",
image_url: `data:image/jpeg;base64,${imageData.toString("base64")}`,
},
],
}),
]);
console.log(res);
/*
AIMessage {
content: ' The image shows a hot dog with ketchup on it, placed on top of a bun. It appears to be a close-up view, possibly taken in a kitchen setting or at an outdoor event.',
name: undefined,
additional_kwargs: {}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_anthropic_tools_wsa_json.ts | import { ChatAnthropicTools } from "@langchain/anthropic/experimental";
import { ChatPromptTemplate } from "@langchain/core/prompts";
const calculatorJsonSchema = {
type: "object",
properties: {
operation: {
type: "string",
enum: ["add", "subtract", "multiply", "divide"],
description: "The type of operation to execute.",
},
number1: { type: "number", description: "The first number to operate on." },
number2: {
type: "number",
description: "The second number to operate on.",
},
},
required: ["operation", "number1", "number2"],
description: "A simple calculator tool",
};
const model = new ChatAnthropicTools({
model: "claude-3-sonnet-20240229",
temperature: 0.1,
});
// Pass the schema and optionally, the tool name to the withStructuredOutput method
const modelWithTool = model.withStructuredOutput(calculatorJsonSchema, {
name: "calculator",
});
const prompt = ChatPromptTemplate.fromMessages([
[
"system",
"You are a helpful assistant who always needs to use a calculator.",
],
["human", "{input}"],
]);
// Chain your prompt and model together
const chain = prompt.pipe(modelWithTool);
const response = await chain.invoke({
input: "What is 2 + 2?",
});
console.log(response);
/*
{ operation: 'add', number1: 2, number2: 2 }
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_azure_chat_openai.ts | import { AzureChatOpenAI } from "@langchain/openai";
const model = new AzureChatOpenAI({
prefixMessages: [
{
role: "system",
content: "You are a helpful assistant that answers in pirate language",
},
],
maxTokens: 50,
});
const res = await model.invoke(
"What would be a good company name for a company that makes colorful socks?"
);
console.log({ res });
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_openai_tool_calls.ts | import { ChatOpenAI } from "@langchain/openai";
import { ToolMessage } from "@langchain/core/messages";
// Mocked out function, could be a database/API call in production
function getCurrentWeather(location: string, _unit?: string) {
if (location.toLowerCase().includes("tokyo")) {
return JSON.stringify({ location, temperature: "10", unit: "celsius" });
} else if (location.toLowerCase().includes("san francisco")) {
return JSON.stringify({
location,
temperature: "72",
unit: "fahrenheit",
});
} else {
return JSON.stringify({ location, temperature: "22", unit: "celsius" });
}
}
// Bind function to the model as a tool
const chat = new ChatOpenAI({
model: "gpt-3.5-turbo-1106",
maxTokens: 128,
}).bind({
tools: [
{
type: "function",
function: {
name: "get_current_weather",
description: "Get the current weather in a given location",
parameters: {
type: "object",
properties: {
location: {
type: "string",
description: "The city and state, e.g. San Francisco, CA",
},
unit: { type: "string", enum: ["celsius", "fahrenheit"] },
},
required: ["location"],
},
},
},
],
tool_choice: "auto",
});
// Ask initial question that requires multiple tool calls
const res = await chat.invoke([
["human", "What's the weather like in San Francisco, Tokyo, and Paris?"],
]);
console.log(res.additional_kwargs.tool_calls);
/*
[
{
id: 'call_IiOsjIZLWvnzSh8iI63GieUB',
type: 'function',
function: {
name: 'get_current_weather',
arguments: '{"location": "San Francisco", "unit": "celsius"}'
}
},
{
id: 'call_blQ3Oz28zSfvS6Bj6FPEUGA1',
type: 'function',
function: {
name: 'get_current_weather',
arguments: '{"location": "Tokyo", "unit": "celsius"}'
}
},
{
id: 'call_Kpa7FaGr3F1xziG8C6cDffsg',
type: 'function',
function: {
name: 'get_current_weather',
arguments: '{"location": "Paris", "unit": "celsius"}'
}
}
]
*/
// Format the results from calling the tool calls back to OpenAI as ToolMessages
const toolMessages = res.additional_kwargs.tool_calls?.map((toolCall) => {
const toolCallResult = getCurrentWeather(
JSON.parse(toolCall.function.arguments).location
);
return new ToolMessage({
tool_call_id: toolCall.id,
name: toolCall.function.name,
content: toolCallResult,
});
});
// Send the results back as the next step in the conversation
const finalResponse = await chat.invoke([
["human", "What's the weather like in San Francisco, Tokyo, and Paris?"],
res,
...(toolMessages ?? []),
]);
console.log(finalResponse);
/*
AIMessage {
content: 'The current weather in:\n' +
'- San Francisco is 72°F\n' +
'- Tokyo is 10°C\n' +
'- Paris is 22°C',
additional_kwargs: { function_call: undefined, tool_calls: undefined }
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_bedrock_tools.ts | import { BedrockChat } from "@langchain/community/chat_models/bedrock";
// Or, from web environments:
// import { BedrockChat } from "@langchain/community/chat_models/bedrock/web";
import { z } from "zod";
import { zodToJsonSchema } from "zod-to-json-schema";
const model = new BedrockChat({
region: process.env.BEDROCK_AWS_REGION,
model: "anthropic.claude-3-sonnet-20240229-v1:0",
maxRetries: 0,
credentials: {
secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,
accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
},
});
const weatherSchema = z
.object({
city: z.string().describe("The city to get the weather for"),
state: z.string().describe("The state to get the weather for").optional(),
})
.describe("Get the weather for a city");
const modelWithTools = model.bindTools([
{
name: "weather_tool",
description: weatherSchema.description,
input_schema: zodToJsonSchema(weatherSchema),
},
]);
// Optionally, you can bind tools via the `.bind` method:
// const modelWithTools = model.bind({
// tools: [
// {
// name: "weather_tool",
// description: weatherSchema.description,
// input_schema: zodToJsonSchema(weatherSchema),
// },
// ],
// });
const res = await modelWithTools.invoke("What's the weather in New York?");
console.log(res);
/*
AIMessage {
additional_kwargs: { id: 'msg_bdrk_01JF7hb4PNQPywP4gnBbgpHi' },
response_metadata: {
stop_reason: 'tool_use',
usage: { input_tokens: 300, output_tokens: 85 }
},
tool_calls: [
{
name: 'weather_tool',
args: {
city: 'New York',
state: 'NY'
},
id: 'toolu_bdrk_01AtEZRTCKioFXqhoNcpgaV7'
}
],
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/openai_functions.ts | import { ChatOpenAI } from "@langchain/openai";
import { HumanMessage } from "@langchain/core/messages";
const extractionFunctionSchema = {
name: "extractor",
description: "Extracts fields from the input.",
parameters: {
type: "object",
properties: {
tone: {
type: "string",
enum: ["positive", "negative"],
description: "The overall tone of the input",
},
word_count: {
type: "number",
description: "The number of words in the input",
},
chat_response: {
type: "string",
description: "A response to the human's input",
},
},
required: ["tone", "word_count", "chat_response"],
},
};
const model = new ChatOpenAI({
model: "gpt-4",
}).bind({
functions: [extractionFunctionSchema],
function_call: { name: "extractor" },
});
const result = await model.invoke([new HumanMessage("What a beautiful day!")]);
console.log(result);
/*
AIMessage {
lc_serializable: true,
lc_kwargs: { content: '', additional_kwargs: { function_call: [Object] } },
lc_namespace: [ 'langchain', 'schema' ],
content: '',
name: undefined,
additional_kwargs: {
function_call: {
name: 'extractor',
arguments: '{\n' +
' "tone": "positive",\n' +
' "word_count": 4,\n' +
` "chat_response": "I'm glad you're enjoying the day! What makes it so beautiful for you?"\n` +
'}'
}
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/openai_functions_zod.ts | import { ChatOpenAI } from "@langchain/openai";
import { z } from "zod";
import { zodToJsonSchema } from "zod-to-json-schema";
import { HumanMessage } from "@langchain/core/messages";
const extractionFunctionSchema = {
name: "extractor",
description: "Extracts fields from the input.",
parameters: zodToJsonSchema(
z.object({
tone: z
.enum(["positive", "negative"])
.describe("The overall tone of the input"),
entity: z.string().describe("The entity mentioned in the input"),
word_count: z.number().describe("The number of words in the input"),
chat_response: z.string().describe("A response to the human's input"),
final_punctuation: z
.optional(z.string())
.describe("The final punctuation mark in the input, if any."),
})
),
};
const model = new ChatOpenAI({
model: "gpt-4",
}).bind({
functions: [extractionFunctionSchema],
function_call: { name: "extractor" },
});
const result = await model.invoke([new HumanMessage("What a beautiful day!")]);
console.log(result);
/*
AIMessage {
lc_serializable: true,
lc_kwargs: { content: '', additional_kwargs: { function_call: [Object] } },
lc_namespace: [ 'langchain', 'schema' ],
content: '',
name: undefined,
additional_kwargs: {
function_call: {
name: 'extractor',
arguments: '{\n' +
'"tone": "positive",\n' +
'"entity": "day",\n' +
'"word_count": 4,\n' +
`"chat_response": "I'm glad you're enjoying the day!",\n` +
'"final_punctuation": "!"\n' +
'}'
}
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/googlegenerativeai_multimodal.ts | import fs from "fs";
import { ChatGoogleGenerativeAI } from "@langchain/google-genai";
import { HumanMessage } from "@langchain/core/messages";
// Multi-modal
const vision = new ChatGoogleGenerativeAI({
model: "gemini-pro-vision",
maxOutputTokens: 2048,
});
const image = fs.readFileSync("./hotdog.jpg").toString("base64");
const input2 = [
new HumanMessage({
content: [
{
type: "text",
text: "Describe the following image.",
},
{
type: "image_url",
image_url: `data:image/png;base64,${image}`,
},
],
}),
];
const res2 = await vision.invoke(input2);
console.log(res2);
/*
AIMessage {
content: ' The image shows a hot dog in a bun. The hot dog is grilled and has a dark brown color. The bun is toasted and has a light brown color. The hot dog is in the center of the bun.',
name: 'model',
additional_kwargs: {}
}
*/
// Multi-modal streaming
const res3 = await vision.stream(input2);
for await (const chunk of res3) {
console.log(chunk);
}
/*
AIMessageChunk {
content: ' The image shows a hot dog in a bun. The hot dog is grilled and has grill marks on it. The bun is toasted and has a light golden',
name: 'model',
additional_kwargs: {}
}
AIMessageChunk {
content: ' brown color. The hot dog is in the center of the bun.',
name: 'model',
additional_kwargs: {}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_llama_cpp.ts | import { ChatLlamaCpp } from "@langchain/community/chat_models/llama_cpp";
import { HumanMessage } from "@langchain/core/messages";
const llamaPath = "/Replace/with/path/to/your/model/gguf-llama2-q4_0.bin";
const model = await ChatLlamaCpp.initialize({ modelPath: llamaPath });
const response = await model.invoke([
new HumanMessage({ content: "My name is John." }),
]);
console.log({ response });
/*
AIMessage {
lc_serializable: true,
lc_kwargs: {
content: 'Hello John.',
additional_kwargs: {}
},
lc_namespace: [ 'langchain', 'schema' ],
content: 'Hello John.',
name: undefined,
additional_kwargs: {}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_cloudflare_workersai.ts | import { ChatCloudflareWorkersAI } from "@langchain/cloudflare";
const model = new ChatCloudflareWorkersAI({
model: "@cf/meta/llama-2-7b-chat-int8", // Default value
cloudflareAccountId: process.env.CLOUDFLARE_ACCOUNT_ID,
cloudflareApiToken: process.env.CLOUDFLARE_API_TOKEN,
// Pass a custom base URL to use Cloudflare AI Gateway
// baseUrl: `https://gateway.ai.cloudflare.com/v1/{YOUR_ACCOUNT_ID}/{GATEWAY_NAME}/workers-ai/`,
});
const response = await model.invoke([
["system", "You are a helpful assistant that translates English to German."],
["human", `Translate "I love programming".`],
]);
console.log(response);
/*
AIMessage {
content: `Sure! Here's the translation of "I love programming" into German:\n` +
'\n' +
'"Ich liebe Programmieren."\n' +
'\n' +
'In this sentence, "Ich" means "I," "liebe" means "love," and "Programmieren" means "programming."',
additional_kwargs: {}
}
*/
const stream = await model.stream([
["system", "You are a helpful assistant that translates English to German."],
["human", `Translate "I love programming".`],
]);
for await (const chunk of stream) {
console.log(chunk);
}
/*
AIMessageChunk {
content: 'S',
additional_kwargs: {}
}
AIMessageChunk {
content: 'ure',
additional_kwargs: {}
}
AIMessageChunk {
content: '!',
additional_kwargs: {}
}
AIMessageChunk {
content: ' Here',
additional_kwargs: {}
}
...
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_anthropic.ts | import { ChatAnthropic } from "@langchain/anthropic";
const model = new ChatAnthropic({
temperature: 0.9,
model: "claude-3-5-sonnet-20240620",
// In Node.js defaults to process.env.ANTHROPIC_API_KEY,
// apiKey: "YOUR-API-KEY",
maxTokens: 1024,
});
const res = await model.invoke("Why is the sky blue?");
console.log(res);
/*
AIMessage {
content: "The sky appears blue due to a phenomenon called Rayleigh scattering. Here's a brief explanation:\n" +
'\n' +
'1. Sunlight contains all colors of the visible spectrum.\n' +
'\n' +
"2. As sunlight enters Earth's atmosphere, it collides with gas molecules and other particles.\n" +
'\n' +
'3. These collisions cause the light to scatter in all directions.\n' +
'\n' +
'4. Blue light has a shorter wavelength and higher frequency than other colors in the visible spectrum, so it scatters more easily than other colors.\n' +
'\n' +
'5. This scattered blue light reaches our eyes from all directions, making the sky appear blue.\n' +
'\n' +
'6. Other colors, like red and yellow, have longer wavelengths and pass through the atmosphere more directly, which is why we see them primarily during sunrise and sunset when sunlight travels through more of the atmosphere to reach our eyes.\n' +
'\n' +
'This effect is more pronounced during the day when the sun is high in the sky. At sunrise and sunset, when sunlight travels through more of the atmosphere, we see more red and orange colors because the blue light has been scattered away by the time it reaches our eyes.',
response_metadata: {
id: 'msg_013zKN9RXhpyCeHNsgwHjHsi',
model: 'claude-3-5-sonnet-20240620',
stop_reason: 'end_turn',
stop_sequence: null,
usage: { input_tokens: 13, output_tokens: 233 }
},
tool_calls: [],
invalid_tool_calls: [],
usage_metadata: { input_tokens: 13, output_tokens: 233, total_tokens: 246 }
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/chat_vertexai_agents.ts | import { z } from "zod";
import { tool } from "@langchain/core/tools";
import { AgentExecutor, createToolCallingAgent } from "langchain/agents";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { ChatVertexAI } from "@langchain/google-vertexai";
// Uncomment this if you're running inside a web/edge environment.
// import { ChatVertexAI } from "@langchain/google-vertexai-web";
const llm: any = new ChatVertexAI({
temperature: 0,
});
// Prompt template must have "input" and "agent_scratchpad input variables"
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["placeholder", "{chat_history}"],
["human", "{input}"],
["placeholder", "{agent_scratchpad}"],
]);
// Mocked tool
const currentWeatherTool = tool(async () => "28 °C", {
name: "get_current_weather",
description: "Get the current weather in a given location",
schema: z.object({
location: z.string().describe("The city and state, e.g. San Francisco, CA"),
}),
});
const agent = await createToolCallingAgent({
llm,
tools: [currentWeatherTool],
prompt,
});
const agentExecutor = new AgentExecutor({
agent,
tools: [currentWeatherTool],
});
const input = "What's the weather like in Paris?";
const { output } = await agentExecutor.invoke({ input });
console.log(output);
/*
It's 28 degrees Celsius in Paris.
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/custom_advanced.ts | import { AIMessage, BaseMessage } from "@langchain/core/messages";
import { ChatResult } from "@langchain/core/outputs";
import {
BaseChatModel,
BaseChatModelCallOptions,
BaseChatModelParams,
} from "@langchain/core/language_models/chat_models";
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
export interface AdvancedCustomChatModelOptions
extends BaseChatModelCallOptions {}
export interface AdvancedCustomChatModelParams extends BaseChatModelParams {
n: number;
}
export class AdvancedCustomChatModel extends BaseChatModel<AdvancedCustomChatModelOptions> {
n: number;
static lc_name(): string {
return "AdvancedCustomChatModel";
}
constructor(fields: AdvancedCustomChatModelParams) {
super(fields);
this.n = fields.n;
}
async _generate(
messages: BaseMessage[],
_options: this["ParsedCallOptions"],
_runManager?: CallbackManagerForLLMRun
): Promise<ChatResult> {
if (!messages.length) {
throw new Error("No messages provided.");
}
if (typeof messages[0].content !== "string") {
throw new Error("Multimodal messages are not supported.");
}
const content = messages[0].content.slice(0, this.n);
const tokenUsage = {
usedTokens: this.n,
};
return {
generations: [{ message: new AIMessage({ content }), text: content }],
llmOutput: { tokenUsage },
};
}
_llmType(): string {
return "advanced_custom_chat_model";
}
}
const chatModel = new AdvancedCustomChatModel({ n: 4 });
console.log(await chatModel.invoke([["human", "I am an LLM"]]));
const eventStream = chatModel.streamEvents([["human", "I am an LLM"]], {
version: "v2",
});
for await (const event of eventStream) {
if (event.event === "on_chat_model_end") {
console.log(JSON.stringify(event, null, 2));
}
}
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_togetherai_tools.ts | import { ChatTogetherAI } from "@langchain/community/chat_models/togetherai";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { convertToOpenAITool } from "@langchain/core/utils/function_calling";
import { Calculator } from "@langchain/community/tools/calculator";
// Use a pre-built tool
const calculatorTool = convertToOpenAITool(new Calculator());
const modelWithCalculator = new ChatTogetherAI({
temperature: 0,
// This is the default env variable name it will look for if none is passed.
apiKey: process.env.TOGETHER_AI_API_KEY,
// Together JSON mode/tool calling only supports a select number of models
model: "mistralai/Mixtral-8x7B-Instruct-v0.1",
}).bind({
// Bind the tool to the model.
tools: [calculatorTool],
tool_choice: calculatorTool, // Specify what tool the model should use
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a super not-so-smart mathmatician."],
["human", "Help me out, how can I add {math}?"],
]);
// Use LCEL to chain the prompt to the model.
const response = await prompt.pipe(modelWithCalculator).invoke({
math: "2 plus 3",
});
console.log(JSON.stringify(response.additional_kwargs.tool_calls));
/**
[
{
"id": "call_f4lzeeuho939vs4dilwd7267",
"type":"function",
"function": {
"name":"calculator",
"arguments": "{\"input\":\"2 + 3\"}"
}
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_openai.ts | import { ChatOpenAI } from "@langchain/openai";
import { HumanMessage } from "@langchain/core/messages";
const model = new ChatOpenAI({
temperature: 0.9,
apiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.OPENAI_API_KEY
});
// You can also pass tools or functions to the model, learn more here
// https://platform.openai.com/docs/guides/gpt/function-calling
const modelForFunctionCalling = new ChatOpenAI({
model: "gpt-4",
temperature: 0,
});
await modelForFunctionCalling.invoke(
[new HumanMessage("What is the weather in New York?")],
{
functions: [
{
name: "get_current_weather",
description: "Get the current weather in a given location",
parameters: {
type: "object",
properties: {
location: {
type: "string",
description: "The city and state, e.g. San Francisco, CA",
},
unit: { type: "string", enum: ["celsius", "fahrenheit"] },
},
required: ["location"],
},
},
],
// You can set the `function_call` arg to force the model to use a function
function_call: {
name: "get_current_weather",
},
}
);
/*
AIMessage {
text: '',
name: undefined,
additional_kwargs: {
function_call: {
name: 'get_current_weather',
arguments: '{\n "location": "New York"\n}'
}
}
}
*/
// Coerce response type with JSON mode.
// Requires "gpt-4-1106-preview" or later
const jsonModeModel = new ChatOpenAI({
model: "gpt-4-1106-preview",
maxTokens: 128,
}).bind({
response_format: {
type: "json_object",
},
});
// Must be invoked with a system message containing the string "JSON":
// https://platform.openai.com/docs/guides/text-generation/json-mode
const res = await jsonModeModel.invoke([
["system", "Only return JSON"],
["human", "Hi there!"],
]);
console.log(res);
/*
AIMessage {
content: '{\n "response": "How can I assist you today?"\n}',
name: undefined,
additional_kwargs: { function_call: undefined, tool_calls: undefined }
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_openai_invoke_info.ts | import { ChatOpenAI } from "@langchain/openai";
// See https://cookbook.openai.com/examples/using_logprobs for details
const model = new ChatOpenAI({
logprobs: true,
// topLogprobs: 5,
});
const responseMessage = await model.invoke("Hi there!");
console.log(JSON.stringify(responseMessage, null, 2));
/*
{
"lc": 1,
"type": "constructor",
"id": [
"langchain_core",
"messages",
"AIMessage"
],
"kwargs": {
"content": "Hello! How can I assist you today?",
"additional_kwargs": {},
"response_metadata": {
"tokenUsage": {
"completionTokens": 9,
"promptTokens": 10,
"totalTokens": 19
},
"finish_reason": "stop",
"logprobs": {
"content": [
{
"token": "Hello",
"logprob": -0.0006793116,
"bytes": [
72,
101,
108,
108,
111
],
"top_logprobs": []
},
{
"token": "!",
"logprob": -0.00011725161,
"bytes": [
33
],
"top_logprobs": []
},
{
"token": " How",
"logprob": -0.000038457987,
"bytes": [
32,
72,
111,
119
],
"top_logprobs": []
},
{
"token": " can",
"logprob": -0.00094290765,
"bytes": [
32,
99,
97,
110
],
"top_logprobs": []
},
{
"token": " I",
"logprob": -0.0000013856493,
"bytes": [
32,
73
],
"top_logprobs": []
},
{
"token": " assist",
"logprob": -0.14702488,
"bytes": [
32,
97,
115,
115,
105,
115,
116
],
"top_logprobs": []
},
{
"token": " you",
"logprob": -0.000001147242,
"bytes": [
32,
121,
111,
117
],
"top_logprobs": []
},
{
"token": " today",
"logprob": -0.000067901296,
"bytes": [
32,
116,
111,
100,
97,
121
],
"top_logprobs": []
},
{
"token": "?",
"logprob": -0.000014974867,
"bytes": [
63
],
"top_logprobs": []
}
]
}
}
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_azure_openai_base_path.ts | import { AzureChatOpenAI } from "@langchain/openai";
const model = new AzureChatOpenAI({
temperature: 0.9,
azureOpenAIApiKey: "<your_key>", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY
azureOpenAIApiDeploymentName: "<your_deployment_name>", // In Node.js defaults to process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME
azureOpenAIApiVersion: "<api_version>", // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION
azureOpenAIBasePath:
"https://westeurope.api.microsoft.com/openai/deployments", // In Node.js defaults to process.env.AZURE_OPENAI_BASE_PATH
});
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_iflytek_xinghuo.ts | import { ChatIflytekXinghuo } from "@langchain/community/chat_models/iflytek_xinghuo";
import { HumanMessage } from "@langchain/core/messages";
const model = new ChatIflytekXinghuo();
const messages1 = [new HumanMessage("Nice to meet you!")];
const res1 = await model.invoke(messages1);
console.log(res1);
const messages2 = [new HumanMessage("Hello")];
const res2 = await model.invoke(messages2);
console.log(res2);
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_googlevertexai-multimodal.ts | import { ChatPromptTemplate } from "@langchain/core/prompts";
import { ChatVertexAI } from "@langchain/google-vertexai";
import fs from "node:fs";
const model = new ChatVertexAI({
model: "gemini-pro-vision",
temperature: 0.7,
});
const image = fs.readFileSync("./hotdog.jpg").toString("base64");
const prompt = ChatPromptTemplate.fromMessages([
[
"human",
[
{
type: "text",
text: "Describe the following image.",
},
{
type: "image_url",
image_url: "data:image/png;base64,{image_base64}",
},
],
],
]);
const response = await prompt.pipe(model).invoke({
image_base64: image,
});
console.log(response.content);
/*
This is an image of a hot dog. The hot dog is on a white background. The hot dog is a grilled sausage in a bun.
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_anthropic_legacy.ts | import { ChatAnthropic } from "@langchain/anthropic";
const model = new ChatAnthropic({
temperature: 0.9,
apiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.ANTHROPIC_API_KEY
maxTokensToSample: 1024,
});
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_premai.ts | import { ChatPrem } from "@langchain/community/chat_models/premai";
import { HumanMessage } from "@langchain/core/messages";
const model = new ChatPrem({
// In Node.js defaults to process.env.PREM_API_KEY
apiKey: "YOUR-API-KEY",
// In Node.js defaults to process.env.PREM_PROJECT_ID
project_id: "YOUR-PROJECT_ID",
});
console.log(await model.invoke([new HumanMessage("Hello there!")]));
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/chat_groq.ts | import { ChatGroq } from "@langchain/groq";
import { ChatPromptTemplate } from "@langchain/core/prompts";
const model = new ChatGroq({
apiKey: process.env.GROQ_API_KEY,
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["human", "{input}"],
]);
const chain = prompt.pipe(model);
const response = await chain.invoke({
input: "Hello",
});
console.log("response", response);
/**
response AIMessage {
content: "Hello! I'm happy to assist you in any way I can. Is there something specific you need help with or a question you have?",
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/usage_metadata_anthropic.ts | import { ChatAnthropic } from "@langchain/anthropic";
const chatModel = new ChatAnthropic({
model: "claude-3-haiku-20240307",
});
const res = await chatModel.invoke("Tell me a joke.");
console.log(res.usage_metadata);
/*
{ input_tokens: 12, output_tokens: 98, total_tokens: 110 }
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_ollama.ts | import { ChatOllama } from "@langchain/ollama";
import { StringOutputParser } from "@langchain/core/output_parsers";
const model = new ChatOllama({
baseUrl: "http://localhost:11434", // Default value
model: "llama3",
});
const stream = await model
.pipe(new StringOutputParser())
.stream(`Translate "I love programming" into German.`);
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
console.log(chunks.join(""));
/*
The translation of "I love programming" into German is:
Ich liebe Programmieren.
Here's a breakdown of the translation:
* Ich = I
* liebe = love
* Programmieren = programming (note: this word is the infinitive form, which is often used in informal contexts or when speaking about one's profession or hobby)
If you want to make it sound more formal or use the correct verb conjugation for "I", you can say:
Ich bin ein großer Fan von Programmieren.
This translates to:
I am a big fan of programming.
In this sentence, "bin" is the first person singular present tense of the verb "sein", which means "to be". The phrase "ein großer Fan" means "a big fan".
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/chat_mistralai.ts | import { ChatMistralAI } from "@langchain/mistralai";
import { ChatPromptTemplate } from "@langchain/core/prompts";
const model = new ChatMistralAI({
apiKey: process.env.MISTRAL_API_KEY,
model: "mistral-small",
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["human", "{input}"],
]);
const chain = prompt.pipe(model);
const response = await chain.invoke({
input: "Hello",
});
console.log("response", response);
/**
response AIMessage {
lc_namespace: [ 'langchain_core', 'messages' ],
content: "Hello! I'm here to help answer any questions you might have or provide information on a variety of topics. How can I assist you today?\n" +
'\n' +
'Here are some common tasks I can help with:\n' +
'\n' +
'* Setting alarms or reminders\n' +
'* Sending emails or messages\n' +
'* Making phone calls\n' +
'* Providing weather information\n' +
'* Creating to-do lists\n' +
'* Offering suggestions for restaurants, movies, or other local activities\n' +
'* Providing definitions and explanations for words or concepts\n' +
'* Translating text into different languages\n' +
'* Playing music or podcasts\n' +
'* Setting timers\n' +
'* Providing directions or traffic information\n' +
'* And much more!\n' +
'\n' +
"Let me know how I can help you specifically, and I'll do my best to make your day easier and more productive!\n" +
'\n' +
'Best regards,\n' +
'Your helpful assistant.',
name: undefined,
additional_kwargs: {}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_alitongyi.ts | import { ChatAlibabaTongyi } from "@langchain/community/chat_models/alibaba_tongyi";
import { HumanMessage } from "@langchain/core/messages";
// Default model is qwen-turbo
const qwenTurbo = new ChatAlibabaTongyi({
alibabaApiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.ALIBABA_API_KEY
});
// Use qwen-plus
const qwenPlus = new ChatAlibabaTongyi({
model: "qwen-plus", // Available models: qwen-turbo, qwen-plus, qwen-max
temperature: 1,
alibabaApiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.ALIBABA_API_KEY
});
const messages = [new HumanMessage("Hello")];
const res = await qwenTurbo.invoke(messages);
/*
AIMessage {
content: "Hello! How can I help you today? Is there something you would like to talk about or ask about? I'm here to assist you with any questions you may have.",
}
*/
const res2 = await qwenPlus.invoke(messages);
/*
AIMessage {
text: "Hello! How can I help you today? Is there something you would like to talk about or ask about? I'm here to assist you with any questions you may have.",
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/integration_openai_custom_base.ts | import { ChatOpenAI } from "@langchain/openai";
const model = new ChatOpenAI({
temperature: 0.9,
configuration: {
baseURL: "https://your_custom_url.com",
},
});
const message = await model.invoke("Hi there!");
console.log(message);
/*
AIMessage {
content: 'Hello! How can I assist you today?',
additional_kwargs: { function_call: undefined }
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/chat/caching.ts | import { ChatOpenAI } from "@langchain/openai";
// To make the caching really obvious, lets use a slower model.
const model = new ChatOpenAI({
model: "gpt-4",
cache: true,
});
console.time();
// The first time, it is not yet in cache, so it should take longer
const res = await model.invoke("Tell me a joke!");
console.log(res);
console.timeEnd();
console.time();
// The second time it is, so it goes faster
const res2 = await model.invoke("Tell me a joke!");
console.log(res2);
console.timeEnd();
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.