index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/puppeteer_screenshot_web.ts | import { PuppeteerWebBaseLoader } from "@langchain/community/document_loaders/web/puppeteer";
const loaderWithOptions = new PuppeteerWebBaseLoader("https://langchain.com", {
launchOptions: {
headless: true,
},
gotoOptions: {
waitUntil: "domcontentloaded",
},
});
const screenshot = await loaderWithOptions.screenshot();
console.log({ screenshot });
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/puppeteer_web.ts | import { PuppeteerWebBaseLoader } from "@langchain/community/document_loaders/web/puppeteer";
const loaderWithOptions = new PuppeteerWebBaseLoader(
"https://www.tabnews.com.br/",
{
launchOptions: {
headless: true,
},
gotoOptions: {
waitUntil: "domcontentloaded",
},
/** Pass custom evaluate , in this case you get page and browser instances */
async evaluate(page, browser) {
await page.waitForResponse("https://www.tabnews.com.br/va/view");
const result = await page.evaluate(() => document.body.innerHTML);
await browser.close();
return result;
},
}
);
const docsFromLoaderWithOptions = await loaderWithOptions.load();
console.log({ docsFromLoaderWithOptions });
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/figma.ts | import { FigmaFileLoader } from "@langchain/community/document_loaders/web/figma";
const loader = new FigmaFileLoader({
accessToken: "FIGMA_ACCESS_TOKEN", // or load it from process.env.FIGMA_ACCESS_TOKEN
nodeIds: ["id1", "id2", "id3"],
fileKey: "key",
});
const docs = await loader.load();
console.log({ docs });
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/github.ts | import { GithubRepoLoader } from "@langchain/community/document_loaders/web/github";
export const run = async () => {
const loader = new GithubRepoLoader(
"https://github.com/langchain-ai/langchainjs",
{
branch: "main",
recursive: false,
unknown: "warn",
maxConcurrency: 5, // Defaults to 2
}
);
const docs = await loader.load();
console.log({ docs });
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/browserbase.ts | import { BrowserbaseLoader } from "@langchain/community/document_loaders/web/browserbase";
const loader = new BrowserbaseLoader(["https://example.com"], {
textContent: true,
});
const docs = await loader.load();
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/apify_dataset_existing.ts | import { ApifyDatasetLoader } from "@langchain/community/document_loaders/web/apify_dataset";
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
import { OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai";
import { Document } from "@langchain/core/documents";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { createRetrievalChain } from "langchain/chains/retrieval";
import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
const APIFY_API_TOKEN = "YOUR-APIFY-API-TOKEN"; // or set as process.env.APIFY_API_TOKEN
const OPENAI_API_KEY = "YOUR-OPENAI-API-KEY"; // or set as process.env.OPENAI_API_KEY
/*
* datasetMappingFunction is a function that maps your Apify dataset format to LangChain documents.
* In the below example, the Apify dataset format looks like this:
* {
* "url": "https://apify.com",
* "text": "Apify is the best web scraping and automation platform."
* }
*/
const loader = new ApifyDatasetLoader("your-dataset-id", {
datasetMappingFunction: (item) =>
new Document({
pageContent: (item.text || "") as string,
metadata: { source: item.url },
}),
clientOptions: {
token: APIFY_API_TOKEN,
},
});
const docs = await loader.load();
const vectorStore = await HNSWLib.fromDocuments(
docs,
new OpenAIEmbeddings({ apiKey: OPENAI_API_KEY })
);
const model = new ChatOpenAI({
temperature: 0,
apiKey: OPENAI_API_KEY,
});
const questionAnsweringPrompt = ChatPromptTemplate.fromMessages([
[
"system",
"Answer the user's questions based on the below context:\n\n{context}",
],
["human", "{input}"],
]);
const combineDocsChain = await createStuffDocumentsChain({
llm: model,
prompt: questionAnsweringPrompt,
});
const chain = await createRetrievalChain({
retriever: vectorStore.asRetriever(),
combineDocsChain,
});
const res = await chain.invoke({ input: "What is LangChain?" });
console.log(res.answer);
console.log(res.context.map((doc) => doc.metadata.source));
/*
LangChain is a framework for developing applications powered by language models.
[
'https://js.langchain.com/docs/',
'https://js.langchain.com/docs/modules/chains/',
'https://js.langchain.com/docs/modules/chains/llmchain/',
'https://js.langchain.com/docs/category/functions-4'
]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/youtube.ts | import { YoutubeLoader } from "@langchain/community/document_loaders/web/youtube";
const loader = YoutubeLoader.createFromUrl("https://youtu.be/bZQun8Y4L2A", {
language: "en",
addVideoInfo: true,
});
const docs = await loader.load();
console.log(docs);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/srt.ts | import { SRTLoader } from "@langchain/community/document_loaders/fs/srt";
export const run = async () => {
const loader = new SRTLoader(
"src/document_loaders/example_data/Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.srt"
);
const docs = await loader.load();
console.log({ docs });
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/spider.ts | import { SpiderLoader } from "@langchain/community/document_loaders/web/spider";
const loader = new SpiderLoader({
url: "https://spider.cloud", // The URL to scrape
apiKey: process.env.SPIDER_API_KEY, // Optional, defaults to `SPIDER_API_KEY` in your env.
mode: "scrape", // The mode to run the crawler in. Can be "scrape" for single urls or "crawl" for deeper scraping following subpages
// params: {
// // optional parameters based on Spider API docs
// // For API documentation, visit https://spider.cloud/docs/api
// },
});
const docs = await loader.load();
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/github_custom_instance.ts | import { GithubRepoLoader } from "@langchain/community/document_loaders/web/github";
export const run = async () => {
const loader = new GithubRepoLoader(
"https://github.your.company/org/repo-name",
{
baseUrl: "https://github.your.company",
apiUrl: "https://github.your.company/api/v3",
accessToken: "ghp_A1B2C3D4E5F6a7b8c9d0",
branch: "main",
recursive: true,
unknown: "warn",
}
);
const docs = await loader.load();
console.log({ docs });
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/college_confidential.ts | import { CollegeConfidentialLoader } from "@langchain/community/document_loaders/web/college_confidential";
export const run = async () => {
const loader = new CollegeConfidentialLoader(
"https://www.collegeconfidential.com/colleges/brown-university/"
);
const docs = await loader.load();
console.log({ docs });
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/searchapi.ts | import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { TokenTextSplitter } from "@langchain/textsplitters";
import { SearchApiLoader } from "@langchain/community/document_loaders/web/searchapi";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
import { createRetrievalChain } from "langchain/chains/retrieval";
// Initialize the necessary components
const llm = new ChatOpenAI({
model: "gpt-3.5-turbo-1106",
});
const embeddings = new OpenAIEmbeddings();
const apiKey = "Your SearchApi API key";
// Define your question and query
const question = "Your question here";
const query = "Your query here";
// Use SearchApiLoader to load web search results
const loader = new SearchApiLoader({ q: query, apiKey, engine: "google" });
const docs = await loader.load();
const textSplitter = new TokenTextSplitter({
chunkSize: 800,
chunkOverlap: 100,
});
const splitDocs = await textSplitter.splitDocuments(docs);
// Use MemoryVectorStore to store the loaded documents in memory
const vectorStore = await MemoryVectorStore.fromDocuments(
splitDocs,
embeddings
);
const questionAnsweringPrompt = ChatPromptTemplate.fromMessages([
[
"system",
"Answer the user's questions based on the below context:\n\n{context}",
],
["human", "{input}"],
]);
const combineDocsChain = await createStuffDocumentsChain({
llm,
prompt: questionAnsweringPrompt,
});
const chain = await createRetrievalChain({
retriever: vectorStore.asRetriever(),
combineDocsChain,
});
const res = await chain.invoke({
input: question,
});
console.log(res.answer);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/sonix_audio_transcription.ts | import { SonixAudioTranscriptionLoader } from "@langchain/community/document_loaders/web/sonix_audio";
const loader = new SonixAudioTranscriptionLoader({
sonixAuthKey: "SONIX_AUTH_KEY",
request: {
audioFilePath: "LOCAL_AUDIO_FILE_PATH",
fileName: "FILE_NAME",
language: "en",
},
});
const docs = await loader.load();
console.log(docs);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/azure_blob_storage_container.ts | import { AzureBlobStorageContainerLoader } from "@langchain/community/document_loaders/web/azure_blob_storage_container";
const loader = new AzureBlobStorageContainerLoader({
azureConfig: {
connectionString: "",
container: "container_name",
},
unstructuredConfig: {
apiUrl: "http://localhost:8000/general/v0/general",
apiKey: "", // this will be soon required
},
});
const docs = await loader.load();
console.log(docs);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/firecrawl.ts | import { FireCrawlLoader } from "@langchain/community/document_loaders/web/firecrawl";
const loader = new FireCrawlLoader({
url: "https://firecrawl.dev", // The URL to scrape
apiKey: process.env.FIRECRAWL_API_KEY, // Optional, defaults to `FIRECRAWL_API_KEY` in your env.
mode: "scrape", // The mode to run the crawler in. Can be "scrape" for single urls or "crawl" for all accessible subpages
params: {
// optional parameters based on Firecrawl API docs
// For API documentation, visit https://docs.firecrawl.dev
},
});
const docs = await loader.load();
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/s3.ts | import { S3Loader } from "@langchain/community/document_loaders/web/s3";
const loader = new S3Loader({
bucket: "my-document-bucket-123",
key: "AccountingOverview.pdf",
s3Config: {
region: "us-east-1",
credentials: {
accessKeyId: "AKIAIOSFODNN7EXAMPLE",
secretAccessKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
},
},
unstructuredAPIURL: "http://localhost:8000/general/v0/general",
unstructuredAPIKey: "", // this will be soon required
});
const docs = await loader.load();
console.log(docs);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/github_stream.ts | import { GithubRepoLoader } from "@langchain/community/document_loaders/web/github";
export const run = async () => {
const loader = new GithubRepoLoader(
"https://github.com/langchain-ai/langchainjs",
{
branch: "main",
recursive: false,
unknown: "warn",
maxConcurrency: 3, // Defaults to 2
}
);
const docs = [];
for await (const doc of loader.loadAsStream()) {
docs.push(doc);
}
console.log({ docs });
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/sitemap.ts | import { SitemapLoader } from "@langchain/community/document_loaders/web/sitemap";
const loader = new SitemapLoader("https://www.langchain.com/");
const docs = await loader.load();
console.log(docs.length);
/**
26
*/
console.log(docs[0]);
/**
Document {
pageContent: '\n' +
' \n' +
'\n' +
' \n' +
' \n' +
' Blog ArticleApr 8, 2022As the internet continues to develop and grow exponentially, jobs related to the industry do too, particularly those that relate to web design and development. The prediction is that by 2029, the job outlook for these two fields will grow by 8%—significantly faster than average. Whether you’re seeking salaried employment or aiming to work in a freelance capacity, a career in web design can offer a variety of employment arrangements, competitive salaries, and opportunities to utilize both technical and creative skill sets.What does a career in web design involve?A career in website design can involve the design, creation, and coding of a range of website types. Other tasks will typically include liaising with clients and discussing website specifications, incorporating feedback, working on graphic design and image editing, and enabling multimedia features such as audio and video. Requiring a range of creative and technical skills, web designers may be involved in work across a range of industries, including software companies, IT consultancies, web design companies, corporate organizations, and more. In contrast with web developers, web designers tend to play a more creative role, crafting the overall vision and design of a site, and determining how to best incorporate the necessary functionality. However, there can be significant overlap between the roles.Full-stack, back-end, and front-end web developmentThe U.S. Bureau of Labor Statistics (BLS) Occupational Outlook Handbook tends to group web developers and digital designers into one category. However, they define them separately, stating that web developers create and maintain websites and are responsible for the technical aspects including performance and capacity. Web or digital designers, on the other hand, are responsible for the look and functionality of websites and interfaces. They develop, create, and test the layout, functions, and navigation for usability. Web developers can focus on the back-end, front-end, or full-stack development, and typically utilize a range of programming languages, libraries, and frameworks to do so. Web designers may work more closely with front-end engineers to establish the user-end functionality and appearance of a site.Are web designers in demand in 2022?In our ever-increasingly digital environment, there is a constant need for websites—and therefore for web designers and developers. With 17.4 billion websites in existence as of January 2020, the demand for web developers is only expected to rise.Web designers with significant coding experience are typically in higher demand, and can usually expect a higher salary. Like all jobs, there are likely to be a range of opportunities, some of which are better paid than others. But certain skill sets are basic to web design, most of which are key to how to become a web designer in 2022.const removeHiddenBreakpointLayers = function ie(e){function t(){for(let{hash:r,mediaQuery:i}of e){if(!i)continue;if(window.matchMedia(i).matches)return r}return e[0]?.hash}let o=t();if(o)for(let r of document.querySelectorAll(".hidden-"+o))r.parentNode?.removeChild(r);for(let r of document.querySelectorAll(".ssr-variant")){for(;r.firstChild;)r.parentNode?.insertBefore(r.firstChild,r);r.parentNode?.removeChild(r)}for(let r of document.querySelectorAll("[data-framer-original-sizes]")){let i=r.getAttribute("data-framer-original-sizes");i===""?r.removeAttribute("sizes"):r.setAttribute("sizes",i),r.removeAttribute("data-framer-original-sizes")}};removeHiddenBreakpointLayers([{"hash":"1ksv3g6"}])\n' +
'\n' +
' \n' +
' \n' +
' \n' +
' \n' +
' \n' +
'\n' +
'\n',
metadata: {
changefreq: '',
lastmod: '',
priority: '',
source: 'https://www.langchain.com/blog-detail/starting-a-career-in-design'
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/serpapi.ts | import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { SerpAPILoader } from "@langchain/community/document_loaders/web/serpapi";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
import { createRetrievalChain } from "langchain/chains/retrieval";
// Initialize the necessary components
const llm = new ChatOpenAI();
const embeddings = new OpenAIEmbeddings();
const apiKey = "Your SerpAPI API key";
// Define your question and query
const question = "Your question here";
const query = "Your query here";
// Use SerpAPILoader to load web search results
const loader = new SerpAPILoader({ q: query, apiKey });
const docs = await loader.load();
// Use MemoryVectorStore to store the loaded documents in memory
const vectorStore = await MemoryVectorStore.fromDocuments(docs, embeddings);
const questionAnsweringPrompt = ChatPromptTemplate.fromMessages([
[
"system",
"Answer the user's questions based on the below context:\n\n{context}",
],
["human", "{input}"],
]);
const combineDocsChain = await createStuffDocumentsChain({
llm,
prompt: questionAnsweringPrompt,
});
const chain = await createRetrievalChain({
retriever: vectorStore.asRetriever(),
combineDocsChain,
});
const res = await chain.invoke({
input: question,
});
console.log(res.answer);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/confluence.ts | import { ConfluencePagesLoader } from "@langchain/community/document_loaders/web/confluence";
const username = process.env.CONFLUENCE_USERNAME;
const accessToken = process.env.CONFLUENCE_ACCESS_TOKEN;
const personalAccessToken = process.env.CONFLUENCE_PAT;
if (username && accessToken) {
const loader = new ConfluencePagesLoader({
baseUrl: "https://example.atlassian.net/wiki",
spaceKey: "~EXAMPLE362906de5d343d49dcdbae5dEXAMPLE",
username,
accessToken,
});
const documents = await loader.load();
console.log(documents);
} else if (personalAccessToken) {
const loader = new ConfluencePagesLoader({
baseUrl: "https://example.atlassian.net/wiki",
spaceKey: "~EXAMPLE362906de5d343d49dcdbae5dEXAMPLE",
personalAccessToken,
});
const documents = await loader.load();
console.log(documents);
} else {
console.log(
"You need either a username and access token, or a personal access token (PAT), to use this example."
);
}
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/text.ts | import { TextLoader } from "langchain/document_loaders/fs/text";
const loader = new TextLoader("src/document_loaders/example_data/example.txt");
const docs = await loader.load();
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/airtable_load.ts | import { AirtableLoader } from "@langchain/community/document_loaders/web/airtable";
import { Document } from "@langchain/core/documents";
// Default airtable loader
const loader = new AirtableLoader({
tableId: "YOUR_TABLE_ID",
baseId: "YOUR_BASE_ID",
});
try {
const documents: Document[] = await loader.load();
console.log("Loaded documents:", documents);
} catch (error) {
console.error("Error loading documents:", error);
}
// Lazy airtable loader
const loaderLazy = new AirtableLoader({
tableId: "YOUR_TABLE_ID",
baseId: "YOUR_BASE_ID",
});
try {
console.log("Lazily loading documents:");
for await (const document of loader.loadLazy()) {
console.log("Loaded document:", document);
}
} catch (error) {
console.error("Error loading documents lazily:", error);
}
// Airtable loader with specific view
const loaderView = new AirtableLoader({
tableId: "YOUR_TABLE_ID",
baseId: "YOUR_BASE_ID",
kwargs: { view: "YOUR_VIEW_NAME" },
});
try {
const documents: Document[] = await loader.load();
console.log("Loaded documents with view:", documents);
} catch (error) {
console.error("Error loading documents with view:", error);
}
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/assemblyai_subtitles.ts | import { AudioSubtitleLoader } from "@langchain/community/document_loaders/web/assemblyai";
// You can also use a local file path and the loader will upload it to AssemblyAI for you.
const audioUrl = "https://storage.googleapis.com/aai-docs-samples/espn.m4a";
const loader = new AudioSubtitleLoader(
{
audio: audioUrl,
// any other parameters as documented here: https://www.assemblyai.com/docs/api-reference/transcripts/submit
},
"srt", // srt or vtt
{
apiKey: "<ASSEMBLYAI_API_KEY>", // or set the `ASSEMBLYAI_API_KEY` env variable
}
);
const docs = await loader.load();
console.dir(docs, { depth: Infinity });
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/pdf.ts | import { PDFLoader } from "@langchain/community/document_loaders/fs/pdf";
export const run = async () => {
const loader = new PDFLoader("src/document_loaders/example_data/bitcoin.pdf");
const docs = await loader.load();
console.log({ docs });
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/unstructured.ts | import { UnstructuredLoader } from "@langchain/community/document_loaders/fs/unstructured";
const options = {
apiKey: "MY_API_KEY",
};
const loader = new UnstructuredLoader(
"src/document_loaders/example_data/notion.md",
options
);
const docs = await loader.load();
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/gitbook.ts | import { GitbookLoader } from "@langchain/community/document_loaders/web/gitbook";
export const run = async () => {
const loader = new GitbookLoader("https://docs.gitbook.com");
const docs = await loader.load(); // load single path
console.log(docs);
const allPathsLoader = new GitbookLoader("https://docs.gitbook.com", {
shouldLoadAllPaths: true,
});
const docsAllPaths = await allPathsLoader.load(); // loads all paths of the given gitbook
console.log(docsAllPaths);
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/github_ignore_paths.ts | import { GithubRepoLoader } from "@langchain/community/document_loaders/web/github";
export const run = async () => {
const loader = new GithubRepoLoader(
"https://github.com/langchain-ai/langchainjs",
{ branch: "main", recursive: false, unknown: "warn", ignorePaths: ["*.md"] }
);
const docs = await loader.load();
console.log({ docs });
// Will not include any .md files
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/github_submodules.ts | import { GithubRepoLoader } from "@langchain/community/document_loaders/web/github";
export const run = async () => {
const loader = new GithubRepoLoader(
"https://github.com/langchain-ai/langchainjs",
{
branch: "main",
recursive: true,
processSubmodules: true,
unknown: "warn",
}
);
const docs = await loader.load();
console.log({ docs });
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/notion_markdown.ts | import { NotionLoader } from "@langchain/community/document_loaders/fs/notion";
export const run = async () => {
/** Provide the directory path of your notion folder */
const directoryPath = "Notion_DB";
const loader = new NotionLoader(directoryPath);
const docs = await loader.load();
console.log({ docs });
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/assemblyai_audio_transcription.ts | import {
AudioTranscriptLoader,
// AudioTranscriptParagraphsLoader,
// AudioTranscriptSentencesLoader
} from "@langchain/community/document_loaders/web/assemblyai";
// You can also use a local file path and the loader will upload it to AssemblyAI for you.
const audioUrl = "https://storage.googleapis.com/aai-docs-samples/espn.m4a";
// Use `AudioTranscriptParagraphsLoader` or `AudioTranscriptSentencesLoader` for splitting the transcript into paragraphs or sentences
const loader = new AudioTranscriptLoader(
{
audio: audioUrl,
// any other parameters as documented here: https://www.assemblyai.com/docs/api-reference/transcripts/submit
},
{
apiKey: "<ASSEMBLYAI_API_KEY>", // or set the `ASSEMBLYAI_API_KEY` env variable
}
);
const docs = await loader.load();
console.dir(docs, { depth: Infinity });
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/ppt.ts | import { PPTXLoader } from "@langchain/community/document_loaders/fs/pptx";
export const run = async () => {
const loader = new PPTXLoader(
"src/document_loaders/example_data/theikuntest.pptx"
);
const docs = await loader.load();
console.log({ docs });
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/taskade.ts | import { TaskadeProjectLoader } from "@langchain/community/document_loaders/web/taskade";
const loader = new TaskadeProjectLoader({
personalAccessToken: "TASKADE_PERSONAL_ACCESS_TOKEN", // or load it from process.env.TASKADE_PERSONAL_ACCESS_TOKEN
projectId: "projectId",
});
const docs = await loader.load();
console.log({ docs });
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/sort_xyz_blockchain.ts | import { SortXYZBlockchainLoader } from "@langchain/community/document_loaders/web/sort_xyz_blockchain";
import { OpenAI } from "@langchain/openai";
/**
* See https://docs.sort.xyz/docs/api-keys to get your free Sort API key.
* See https://docs.sort.xyz for more information on the available queries.
* See https://docs.sort.xyz/reference for more information about Sort's REST API.
*/
/**
* Run the example.
*/
export const run = async () => {
// Initialize the OpenAI model. Use OPENAI_API_KEY from .env in /examples
const model = new OpenAI({ temperature: 0.9 });
const apiKey = "YOUR_SORTXYZ_API_KEY";
const contractAddress =
"0x887F3909C14DAbd9e9510128cA6cBb448E932d7f".toLowerCase();
/*
Load NFT metadata from the Ethereum blockchain. Hint: to load by a specific ID, see SQL query example below.
*/
const nftMetadataLoader = new SortXYZBlockchainLoader({
apiKey,
query: {
type: "NFTMetadata",
blockchain: "ethereum",
contractAddress,
},
});
const nftMetadataDocs = await nftMetadataLoader.load();
const nftPrompt =
"Describe the character with the attributes from the following json document in a 4 sentence story. ";
const nftResponse = await model.invoke(
nftPrompt + JSON.stringify(nftMetadataDocs[0], null, 2)
);
console.log(`user > ${nftPrompt}`);
console.log(`chatgpt > ${nftResponse}`);
/*
Load the latest transactions for a contract address from the Ethereum blockchain.
*/
const latestTransactionsLoader = new SortXYZBlockchainLoader({
apiKey,
query: {
type: "latestTransactions",
blockchain: "ethereum",
contractAddress,
},
});
const latestTransactionsDocs = await latestTransactionsLoader.load();
const latestPrompt =
"Describe the following json documents in only 4 sentences per document. Include as much detail as possible. ";
const latestResponse = await model.invoke(
latestPrompt + JSON.stringify(latestTransactionsDocs[0], null, 2)
);
console.log(`\n\nuser > ${nftPrompt}`);
console.log(`chatgpt > ${latestResponse}`);
/*
Load metadata for a specific NFT by using raw SQL and the NFT index. See https://docs.sort.xyz for forumulating SQL.
*/
const sqlQueryLoader = new SortXYZBlockchainLoader({
apiKey,
query: `SELECT * FROM ethereum.nft_metadata WHERE contract_address = '${contractAddress}' AND token_id = 1 LIMIT 1`,
});
const sqlDocs = await sqlQueryLoader.load();
const sqlPrompt =
"Describe the character with the attributes from the following json document in an ad for a new coffee shop. ";
const sqlResponse = await model.invoke(
sqlPrompt + JSON.stringify(sqlDocs[0], null, 2)
);
console.log(`\n\nuser > ${sqlPrompt}`);
console.log(`chatgpt > ${sqlResponse}`);
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/web_pdf.ts | import { WebPDFLoader } from "@langchain/community/document_loaders/web/pdf";
const blob = new Blob(); // e.g. from a file input
const loader = new WebPDFLoader(blob);
const docs = await loader.load();
console.log({ docs });
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/hn.ts | import { HNLoader } from "@langchain/community/document_loaders/web/hn";
export const run = async () => {
const loader = new HNLoader("https://news.ycombinator.com/item?id=34817881");
const docs = await loader.load();
console.log({ docs });
};
|
0 | lc_public_repos/langchainjs/examples/src/document_loaders | lc_public_repos/langchainjs/examples/src/document_loaders/example_data/example.txt | Foo
Bar
Baz
|
0 | lc_public_repos/langchainjs/examples/src/document_loaders | lc_public_repos/langchainjs/examples/src/document_loaders/example_data/notion.md | # Testing the notion markdownloader
# 🦜️🔗 LangChain.js
⚡ Building applications with LLMs through composability ⚡
**Production Support:** As you move your LangChains into production, we'd love to offer more comprehensive support.
Please fill out [this form](https://forms.gle/57d8AmXBYp8PP8tZA) and we'll set up a dedicated support Slack channel.
## Quick Install
`yarn add langchain`
```typescript
import { OpenAI } from "langchain/llms/openai";
```
## 🤔 What is this?
Large language models (LLMs) are emerging as a transformative technology, enabling
developers to build applications that they previously could not.
But using these LLMs in isolation is often not enough to
create a truly powerful app - the real power comes when you can combine them with other sources of computation or knowledge.
This library is aimed at assisting in the development of those types of applications.
## Relationship with Python LangChain
This is built to integrate as seamlessly as possible with the [LangChain Python package](https://github.com/langchain-ai/langchain). Specifically, this means all objects (prompts, LLMs, chains, etc) are designed in a way where they can be serialized and shared between languages.
The [LangChainHub](https://github.com/hwchase17/langchain-hub) is a central place for the serialized versions of these prompts, chains, and agents.
## 📖 Documentation
For full documentation of prompts, chains, agents and more, please see [here](https://js.langchain.com/docs/introduction).
## 💁 Contributing
As an open source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infra, or better documentation.
Check out [our contributing guidelines](CONTRIBUTING.md) for instructions on how to contribute.
|
0 | lc_public_repos/langchainjs/examples/src/document_loaders | lc_public_repos/langchainjs/examples/src/document_loaders/example_data/Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.srt | 1
00:00:17,580 --> 00:00:21,920
<i>Corruption discovered
at the core of the Banking Clan!</i>
2
00:00:21,950 --> 00:00:24,620
<i>Reunited, Rush Clovis
and Senator Amidala</i>
3
00:00:24,660 --> 00:00:27,830
<i>discover the full extent
of the deception.</i>
4
00:00:27,870 --> 00:00:30,960
<i>Anakin Skywalker is sent to the rescue!</i>
5
00:00:31,000 --> 00:00:35,050
<i>He refuses to trust Clovis and
asks Padm not to work with him.</i>
6
00:00:35,090 --> 00:00:39,050
<i>Determined to save the banks,
she refuses her husband's request,</i>
7
00:00:39,090 --> 00:00:42,800
<i>throwing their
relationship into turmoil.</i>
8
00:00:42,840 --> 00:00:45,890
<i>Voted for by both the
Separatists and the Republic,</i>
9
00:00:45,930 --> 00:00:50,260
<i>Rush Clovis is elected new leader
of the Galactic Banking Clan.</i>
10
00:00:50,310 --> 00:00:53,320
<i>Now, all attention is focused on Scipio</i>
11
00:00:53,350 --> 00:00:56,350
<i>as the important
transfer of power begins.</i>
12
00:01:20,410 --> 00:01:24,330
Welcome back to Scipio, Rush Clovis.
13
00:01:24,370 --> 00:01:27,240
Our Separatist government
has great hopes for you.
14
00:01:27,290 --> 00:01:30,080
Thank you, Senator.
15
00:01:30,120 --> 00:01:31,750
Only you and Senator Amidala
16
00:01:31,790 --> 00:01:34,330
will be allowed to monitor
the exchange proceedings.
17
00:01:34,380 --> 00:01:36,050
No forces on either side
18
00:01:36,080 --> 00:01:38,540
will be allowed
into the Neutral Zone.
19
00:01:38,590 --> 00:01:40,750
Senator Amidala,
we will be right here
20
00:01:40,800 --> 00:01:41,850
if you should need us.
21
00:01:41,880 --> 00:01:43,210
Thank you, Commander.
22
00:02:06,600 --> 00:02:09,190
It is with great disappointment
23
00:02:09,230 --> 00:02:13,020
that I implement
the following verdict.
24
00:02:13,070 --> 00:02:15,490
By decree of the Muun people,
25
00:02:15,530 --> 00:02:18,570
the five representatives
standing before me
26
00:02:18,610 --> 00:02:21,280
are found guilty
of embezzlement.
27
00:02:21,320 --> 00:02:24,450
They shall be imprisoned
forthwith,
28
00:02:24,490 --> 00:02:27,660
and control of the banks
shall transfer immediately
29
00:02:27,700 --> 00:02:29,580
to Rush Clovis
30
00:02:29,620 --> 00:02:33,080
under the guidance
of the Muun government.
31
00:02:41,210 --> 00:02:43,250
We are grateful to you, Clovis,
32
00:02:43,290 --> 00:02:46,630
for everything you have done
for the Muun people.
33
00:02:46,670 --> 00:02:48,340
To have lost the banks
34
00:02:48,380 --> 00:02:51,010
would have been
an historic disaster.
35
00:02:51,050 --> 00:02:52,510
I would like you to know
36
00:02:52,550 --> 00:02:54,840
I have no interest
in controlling the banks.
37
00:02:54,880 --> 00:02:57,930
I am simply here
to reestablish order.
38
00:03:01,890 --> 00:03:06,060
Do you think our friend
is up to the task?
39
00:03:06,100 --> 00:03:07,850
There are few men
I have met in my career
40
00:03:07,890 --> 00:03:10,680
who are more dedicated
to a cause than Clovis.
41
00:03:10,730 --> 00:03:12,850
Once he decides
what he is fighting for,
42
00:03:12,890 --> 00:03:15,360
little will stop him
from achieving it.
43
00:03:15,400 --> 00:03:17,520
Let us hope you are right
44
00:03:17,570 --> 00:03:19,910
for all our sakes.
45
00:03:39,330 --> 00:03:41,540
Ah, Clovis.
46
00:03:41,580 --> 00:03:44,160
How are you liking
your new office?
47
00:03:44,210 --> 00:03:48,040
I must say, you look very
comfortable behind that desk.
48
00:03:48,080 --> 00:03:51,080
Count Dooku,
what do I owe the pleasure?
49
00:03:51,130 --> 00:03:53,420
Come, come, my boy.
50
00:03:53,460 --> 00:03:55,920
You don't think I'd let
such an important day pass
51
00:03:55,960 --> 00:03:58,630
without wishing you
the best of luck.
52
00:03:58,680 --> 00:04:01,930
Thank you, but luck
has nothing to do with it.
53
00:04:01,970 --> 00:04:04,260
The transfer has occurred
without a hitch.
54
00:04:04,300 --> 00:04:06,010
Well, of course it has.
55
00:04:06,050 --> 00:04:09,430
The Separatists are fully
behind your appointment.
56
00:04:09,470 --> 00:04:14,430
After all, aren't we the ones
who put you there?
57
00:04:14,480 --> 00:04:16,100
For your support, I am grateful,
58
00:04:16,140 --> 00:04:17,480
but I now must lead
59
00:04:17,520 --> 00:04:21,270
without allegiance
towards either side.
60
00:04:22,690 --> 00:04:24,570
Is that so?
61
00:04:24,610 --> 00:04:28,030
Quite the idealist you have become
in so short a time.
62
00:04:28,070 --> 00:04:30,490
What do you want, Dooku?
63
00:04:30,530 --> 00:04:32,700
To collect on my investment.
64
00:04:32,740 --> 00:04:34,620
How do you think the Republic
would like to know
65
00:04:34,660 --> 00:04:37,250
that it was I
who supplied Rush Clovis
66
00:04:37,280 --> 00:04:38,950
with all the information
he needed
67
00:04:38,990 --> 00:04:41,030
to topple the leaders of the bank?
68
00:04:41,080 --> 00:04:42,910
I will tell them myself.
69
00:04:42,950 --> 00:04:44,700
Oh, but you can't.
70
00:04:44,750 --> 00:04:46,800
I put you in power.
71
00:04:46,830 --> 00:04:49,290
You belong to me,
72
00:04:49,330 --> 00:04:51,120
and if you want to stay in control,
73
00:04:51,170 --> 00:04:52,840
you will do as I say.
74
00:04:52,880 --> 00:04:56,050
The banks will remain unbiased.
75
00:04:56,090 --> 00:04:57,850
Then I'm afraid the Separatists
76
00:04:57,880 --> 00:05:01,260
will be unable to pay
the interest on our loans.
77
00:05:01,300 --> 00:05:03,300
But the banks will collapse,
and then...
78
00:05:03,340 --> 00:05:06,840
Not if you raise
interest rates on the Republic.
79
00:05:06,880 --> 00:05:07,970
What?
80
00:05:08,010 --> 00:05:09,880
You know I can't do that.
81
00:05:09,930 --> 00:05:12,600
Oh, but you can, and you will,
82
00:05:12,640 --> 00:05:15,430
or everything that you
fought so hard for
83
00:05:15,470 --> 00:05:17,350
will be destroyed.
84
00:05:31,110 --> 00:05:33,860
By the new order
of the Traxus Division
85
00:05:33,900 --> 00:05:36,240
and in an attempt
to stabilize the banks,
86
00:05:36,280 --> 00:05:39,450
it is essential that interest
rates on loans to the Republic
87
00:05:39,490 --> 00:05:41,910
be raised immediately.
88
00:05:41,950 --> 00:05:43,490
What?
89
00:05:43,530 --> 00:05:44,950
But you can't do that!
90
00:05:44,990 --> 00:05:46,700
Clovis.
91
00:05:46,740 --> 00:05:47,910
Clovis!
92
00:05:47,950 --> 00:05:49,950
What are you doing?
93
00:06:03,960 --> 00:06:05,670
This is an outrage!
94
00:06:05,710 --> 00:06:07,920
We warned you this would happen!
95
00:06:07,960 --> 00:06:10,260
And what of the Separatists?
96
00:06:10,300 --> 00:06:11,760
From the little information.
97
00:06:11,800 --> 00:06:14,550
Senator Amidala
has been able to establish,
98
00:06:14,590 --> 00:06:18,430
there will be no raise
on their current loan.
99
00:06:18,470 --> 00:06:22,060
I knew from the beginning
that Clovis would do this.
100
00:06:28,980 --> 00:06:31,270
Hmm, correct you might have been
101
00:06:31,310 --> 00:06:32,690
about Clovis.
102
00:06:32,730 --> 00:06:34,150
It's incredibly foolish
103
00:06:34,190 --> 00:06:36,360
for to make a move like this
so early.
104
00:06:36,400 --> 00:06:39,440
He will turn the whole Republic
against him.
105
00:06:39,480 --> 00:06:42,570
Not clear to us
are his objectives.
106
00:06:42,610 --> 00:06:44,820
Want this he might.
107
00:06:44,860 --> 00:06:46,820
Something's wrong.
108
00:06:46,860 --> 00:06:48,450
This doesn't make sense.
109
00:06:48,490 --> 00:06:51,950
I would like
to call for restraint
110
00:06:51,990 --> 00:06:55,740
and allow us time
to analyze the situation.
111
00:07:12,630 --> 00:07:14,760
You may begin your attack.
112
00:07:14,800 --> 00:07:17,420
It is time to make Rush Clovis
113
00:07:17,470 --> 00:07:19,800
look like a powerful Separatist.
114
00:07:19,840 --> 00:07:21,840
Right away, sir.
115
00:07:28,390 --> 00:07:29,930
It looks like
an invasion fleet, sir.
116
00:07:29,970 --> 00:07:31,970
We're caught out here
in the open.
117
00:07:36,650 --> 00:07:39,740
Get the men off this landing pad
and beyond the city gates!
118
00:07:51,070 --> 00:07:53,450
Senator Amidala,
come in, please.
119
00:07:53,490 --> 00:07:55,280
What is it, Commander Thorn?
120
00:07:55,320 --> 00:07:57,490
We're under attack
by the Separatist garrison.
121
00:07:57,530 --> 00:07:59,240
Looks to be a full invasion.
122
00:07:59,280 --> 00:08:00,660
Invasion?
123
00:08:00,700 --> 00:08:02,240
We can't get to you.
124
00:08:02,290 --> 00:08:05,160
I suggest you get to a ship
as soon as you can.
125
00:08:09,250 --> 00:08:10,290
Boom!
126
00:08:14,420 --> 00:08:15,670
Ahh!
127
00:08:28,640 --> 00:08:29,760
Let's move!
128
00:08:29,800 --> 00:08:31,050
Hurry!
129
00:08:54,740 --> 00:08:55,740
Ah!
130
00:08:59,360 --> 00:09:01,280
For the Republic!
131
00:09:04,370 --> 00:09:05,620
Ah!
132
00:09:34,300 --> 00:09:37,180
Our garrison has been attacked
by the Separatists,
133
00:09:37,220 --> 00:09:39,760
and it appears they are staging
an invasion of Scipio.
134
00:09:39,810 --> 00:09:41,220
An invasion?
135
00:09:41,270 --> 00:09:43,190
What do they hope to achieve?
136
00:09:43,230 --> 00:09:45,860
With this news, the Senate
will vote immediately
137
00:09:45,890 --> 00:09:47,230
to attack Scipio.
138
00:09:47,270 --> 00:09:50,230
It appears war has already
come to Scipio.
139
00:09:50,270 --> 00:09:52,440
I want you off that planet
immediately.
140
00:09:52,480 --> 00:09:53,940
I can't.
141
00:09:53,980 --> 00:09:56,270
Surely you can get to a ship.
142
00:09:56,320 --> 00:09:59,570
General Skywalker,
I'm afraid I'm trapped.
143
00:10:03,240 --> 00:10:04,240
Let me go!
144
00:10:05,700 --> 00:10:07,700
Invoke an emergency meeting
of the Senate.
145
00:10:07,740 --> 00:10:09,700
There is no time to lose.
146
00:10:11,740 --> 00:10:13,240
I feel it is only right
147
00:10:13,290 --> 00:10:15,990
that you should handle
this matter, my boy.
148
00:10:16,040 --> 00:10:18,200
A lot will be entrusted to you.
149
00:10:26,420 --> 00:10:28,130
Don't touch me!
150
00:10:29,880 --> 00:10:30,920
What have you done to her?
151
00:10:32,170 --> 00:10:34,840
Clovis, what is going on?
152
00:10:34,880 --> 00:10:36,880
I didn't want this, Padm.
153
00:10:36,930 --> 00:10:39,090
Why don't you tell her
what you did want
154
00:10:39,140 --> 00:10:41,940
and how you got it.
155
00:10:41,970 --> 00:10:43,260
Dooku.
156
00:10:46,600 --> 00:10:48,720
Padm, this is not what it seems.
157
00:10:48,770 --> 00:10:51,060
Hasn't she joined our cause?
158
00:10:51,100 --> 00:10:54,140
Clovis here told me
how instrumental you were
159
00:10:54,190 --> 00:10:55,350
in getting him to power.
160
00:10:55,400 --> 00:10:56,410
If I had known...
161
00:10:56,440 --> 00:10:57,810
Either you are with us
162
00:10:57,860 --> 00:10:59,530
or you are against us.
163
00:10:59,570 --> 00:11:00,740
Arrest her!
164
00:11:00,770 --> 00:11:02,440
We can't do this, Dooku.
165
00:11:02,480 --> 00:11:05,110
The Separatist Senate
will never approve.
166
00:11:06,280 --> 00:11:07,280
Hey!
167
00:11:11,990 --> 00:11:13,530
No. No.
168
00:11:13,570 --> 00:11:14,620
No!
169
00:11:16,580 --> 00:11:17,590
No!
170
00:11:19,660 --> 00:11:20,830
Are you insane?
171
00:11:20,870 --> 00:11:22,750
This was not part of the deal.
172
00:11:22,790 --> 00:11:24,250
What deal?
173
00:11:24,290 --> 00:11:26,250
What have you done here, Clovis?
174
00:11:26,290 --> 00:11:28,250
He's given us the banks.
175
00:11:28,290 --> 00:11:29,670
Gone are our debts,
176
00:11:29,710 --> 00:11:33,500
and gone is any credit
for the Republic.
177
00:11:33,540 --> 00:11:37,130
All of your idealism
was just a front.
178
00:11:37,170 --> 00:11:39,050
There was nothing I could do.
179
00:11:39,090 --> 00:11:42,880
Everyone has their price,
my dear.
180
00:11:49,890 --> 00:11:52,050
It is with grave news
181
00:11:52,100 --> 00:11:54,100
I come before you.
182
00:11:54,140 --> 00:11:57,350
Count Dooku and his
Separatist betrayers
183
00:11:57,390 --> 00:11:59,850
have manipulated us, my friends.
184
00:11:59,890 --> 00:12:02,230
The war must go to Scipio!
185
00:12:02,270 --> 00:12:04,900
Clovis has been
their puppet of deceit
186
00:12:04,940 --> 00:12:09,150
as the Separatists are
currently invading Scipio.
187
00:12:09,190 --> 00:12:11,990
We must stop them
and secure the planet!
188
00:12:12,030 --> 00:12:15,110
We have handed
the entire economic system
189
00:12:15,150 --> 00:12:17,030
over to Count Dooku.
190
00:12:17,070 --> 00:12:18,700
We are doomed!
191
00:12:18,740 --> 00:12:19,870
Invade!
192
00:12:23,240 --> 00:12:26,450
As Supreme Chancellor,
I must abide
193
00:12:26,490 --> 00:12:28,910
by the consensus of the Senate.
194
00:12:28,960 --> 00:12:32,170
We shall commence
a mercy mission to Scipio
195
00:12:32,210 --> 00:12:36,080
to be led by
General Anakin Skywalker.
196
00:12:36,130 --> 00:12:39,890
The banks will be secured
at all costs,
197
00:12:39,920 --> 00:12:43,170
and the Republic
will not crumble!
198
00:12:44,860 --> 00:12:45,856
Victory!
199
00:12:45,880 --> 00:12:48,800
We will take victory.
200
00:12:48,840 --> 00:12:50,760
War on Scipio!
201
00:12:53,300 --> 00:12:55,600
Great emotions
you will find on Scipio,
202
00:12:55,640 --> 00:12:59,560
will you not?
203
00:12:59,600 --> 00:13:02,310
I am worried
for Senator Amidala.
204
00:13:02,350 --> 00:13:03,890
I'm afraid we may be too late.
205
00:13:03,940 --> 00:13:06,530
Correct you were about Clovis,
206
00:13:06,560 --> 00:13:10,190
but let go of your selfishness
you must
207
00:13:10,230 --> 00:13:12,520
if you are to see clearly.
208
00:13:12,570 --> 00:13:16,230
Not all is as it seems.
209
00:13:16,280 --> 00:13:18,790
I understand, Master.
210
00:13:45,460 --> 00:13:48,750
Lord Tyranus, the Republic fleet
211
00:13:48,800 --> 00:13:51,300
will be arriving shortly.
212
00:13:51,340 --> 00:13:52,970
Very good, my lord.
213
00:13:53,010 --> 00:13:55,800
Clovis has blindly
played his part.
214
00:13:55,840 --> 00:13:57,970
It now appears he coordinated
215
00:13:58,010 --> 00:14:00,970
the entire Separatist takeover.
216
00:14:01,010 --> 00:14:03,510
And because of this treachery,
217
00:14:03,560 --> 00:14:06,640
the banks will be firmly placed
218
00:14:06,680 --> 00:14:11,140
under the control
of the Supreme Chancellor.
219
00:14:24,110 --> 00:14:26,440
Why are you doing this?
220
00:14:26,490 --> 00:14:28,990
You wouldn't understand.
221
00:14:29,030 --> 00:14:30,860
I had to strike a deal
with Dooku,
222
00:14:30,910 --> 00:14:31,860
but don't worry.
223
00:14:31,910 --> 00:14:33,570
I am the one in control.
224
00:14:33,620 --> 00:14:35,320
As soon as things
have settled down,
225
00:14:35,370 --> 00:14:38,240
I can get rid of him,
and I'll control it all again.
226
00:14:38,290 --> 00:14:39,450
Listen to yourself.
227
00:14:39,500 --> 00:14:41,260
The Republic is sending
its armada
228
00:14:41,290 --> 00:14:43,040
to take back the banks.
229
00:14:43,080 --> 00:14:46,710
You've brought war
right where there cannot be war.
230
00:14:46,750 --> 00:14:48,330
Your actions
have destroyed the banks
231
00:14:48,380 --> 00:14:50,220
once and for all!
232
00:15:00,720 --> 00:15:03,680
Rex, have you gotten a fix
on Senator Amidala's position?
233
00:15:03,720 --> 00:15:06,390
We'll have a better lock
once we get near the city,
234
00:15:06,430 --> 00:15:09,010
but initial scans suggest
she's still alive, sir.
235
00:15:09,060 --> 00:15:10,560
Good.
236
00:15:10,600 --> 00:15:12,100
Hawk, we're gonna need
air support
237
00:15:12,140 --> 00:15:13,220
once we're on the ground.
238
00:15:13,270 --> 00:15:14,430
You'll have it, General.
239
00:15:14,480 --> 00:15:16,560
Me and the boys
are ready to fly.
240
00:15:52,420 --> 00:15:53,670
My Lord,
241
00:15:53,710 --> 00:15:56,040
we have fully engaged
Republic forces,
242
00:15:56,090 --> 00:15:58,550
but we are suffering
heavy losses.
243
00:15:58,590 --> 00:16:01,050
We have accomplished
what we came here for.
244
00:16:01,090 --> 00:16:02,960
It is time to withdraw.
245
00:16:03,010 --> 00:16:06,090
But sir, our forces
are still engaged
246
00:16:06,130 --> 00:16:08,300
in battle on the planet.
247
00:16:08,340 --> 00:16:09,680
Leave them.
248
00:16:09,720 --> 00:16:12,090
As you wish, Count Dooku.
249
00:16:29,110 --> 00:16:32,690
Sir, a Republic attack fleet
has just entered orbit
250
00:16:32,730 --> 00:16:34,110
and is approaching the city.
251
00:16:36,070 --> 00:16:37,780
Get me Count Dooku.
252
00:16:37,820 --> 00:16:41,200
It appears Count Dooku
has left the planet's surface.
253
00:16:41,240 --> 00:16:42,740
What?
254
00:16:42,780 --> 00:16:45,700
And the Separatist forces
are in full retreat.
255
00:16:45,740 --> 00:16:47,740
We are alone.
256
00:17:16,800 --> 00:17:19,180
Rex, hold the droid forces here.
257
00:17:19,220 --> 00:17:20,930
I'm gonna push on and get Padm.
258
00:17:20,970 --> 00:17:21,970
Copy that.
259
00:17:34,520 --> 00:17:37,270
Such plans I had.
260
00:17:37,320 --> 00:17:41,660
You know, I've spent so much
of my life misunderstood.
261
00:17:41,690 --> 00:17:43,860
What will they say about me now?
262
00:17:43,900 --> 00:17:46,150
What will I have left behind?
263
00:17:46,200 --> 00:17:49,200
Clovis, you have to
turn yourself in.
264
00:17:58,910 --> 00:18:00,620
It's over, Clovis.
265
00:18:11,840 --> 00:18:13,300
Stay away from me!
266
00:18:13,340 --> 00:18:14,920
I didn't do anything wrong!
267
00:18:14,960 --> 00:18:16,630
You have to believe me!
268
00:18:16,670 --> 00:18:19,010
You don't want to do this.
269
00:18:19,050 --> 00:18:20,760
You don't understand.
270
00:18:20,800 --> 00:18:22,300
You've all been deceived.
271
00:18:22,340 --> 00:18:23,720
Yeah, by you.
272
00:18:23,760 --> 00:18:24,890
No!
273
00:18:24,930 --> 00:18:25,930
By Dooku.
274
00:18:27,560 --> 00:18:29,070
I'm not the villain here.
275
00:18:29,100 --> 00:18:31,310
Tell him, Padm.
276
00:18:31,350 --> 00:18:32,640
Let me go, Clovis.
277
00:19:12,710 --> 00:19:15,540
I can't hold both of you.
278
00:19:16,960 --> 00:19:18,590
Let me go.
279
00:19:18,630 --> 00:19:20,500
No, Anakin, don't.
280
00:19:24,720 --> 00:19:26,260
Try and climb.
281
00:19:28,720 --> 00:19:30,180
I am!
282
00:19:30,220 --> 00:19:32,010
I'm losing you!
283
00:19:33,010 --> 00:19:34,890
I'm sorry, Padm.
284
00:19:36,640 --> 00:19:37,720
No.
285
00:19:51,900 --> 00:19:53,190
It's okay.
286
00:19:53,240 --> 00:19:54,410
You're okay.
287
00:19:54,440 --> 00:19:56,530
I'm sorry, Anakin.
288
00:19:56,570 --> 00:19:58,150
I'm sorry.
289
00:19:58,200 --> 00:19:59,950
It's over now.
290
00:19:59,990 --> 00:20:01,570
It's all over now.
291
00:20:06,830 --> 00:20:09,200
It is clear to the Banking Clan
292
00:20:09,250 --> 00:20:12,510
it was Rush Clovis who was
behind the corruption
293
00:20:12,540 --> 00:20:14,960
that almost caused our collapse.
294
00:20:15,000 --> 00:20:17,120
In hope of a better tomorrow,
295
00:20:17,170 --> 00:20:19,790
we cede control of the banks
296
00:20:19,840 --> 00:20:23,810
to the office of the Chancellor
of the Galactic Republic.
297
00:20:26,800 --> 00:20:30,340
It is with great humility
298
00:20:30,380 --> 00:20:34,680
that I take on
this immense responsibility.
299
00:20:34,720 --> 00:20:38,010
Rest assured,
when the Clone Wars end,
300
00:20:38,050 --> 00:20:40,060
I shall reinstate the banks
301
00:20:40,100 --> 00:20:42,220
as we once knew them,
302
00:20:42,270 --> 00:20:46,270
but during these
treacherous times,
303
00:20:46,310 --> 00:20:48,890
we cannot in good conscience
allow our money
304
00:20:48,940 --> 00:20:50,940
to fall under the manipulations
305
00:20:50,980 --> 00:20:53,900
of a madman like Count Dooku
306
00:20:53,940 --> 00:20:56,020
or Separatist control again.
307
00:21:00,030 --> 00:21:04,240
May there be prosperity
and stability
308
00:21:04,280 --> 00:21:06,320
in all our Republic lands.
309
00:21:06,360 --> 00:21:11,070
May our people be free and safe.
310
00:21:11,120 --> 00:21:12,240
Long live the banks!
311
00:21:13,660 --> 00:21:15,450
<i>Long live the banks!</i>
312
00:21:15,500 --> 00:21:17,380
<i>Long live the banks!</i>
313
00:21:17,410 --> 00:21:19,450
<i>Long live the banks!</i>
314
00:21:19,500 --> 00:21:23,500
<i>Long live the banks!
Long live the banks!</i>
315
00:21:23,540 --> 00:21:25,330
<i>Long live the banks!</i>
316
00:21:25,380 --> 00:21:29,130
<i>Long live the banks!
Long live the banks!</i>
|
0 | lc_public_repos/langchainjs/examples/src/document_loaders/example_data | lc_public_repos/langchainjs/examples/src/document_loaders/example_data/obsidian/bad_frontmatter.md | ---
anArray:
one
- two
- three
tags: 'onetag', 'twotag' ]
---
A document with frontmatter that isn't valid.
|
0 | lc_public_repos/langchainjs/examples/src/document_loaders/example_data | lc_public_repos/langchainjs/examples/src/document_loaders/example_data/obsidian/no_frontmatter.md | ### Description
#recipes #dessert #cookies
A document with HR elements that might trip up a front matter parser:
---
### Ingredients
- 3/4 cup (170g) **unsalted butter**, slightly softened to room temperature.
- 1 and 1/2 cups (180g) **confectioners’ sugar**
---
|
0 | lc_public_repos/langchainjs/examples/src/document_loaders/example_data | lc_public_repos/langchainjs/examples/src/document_loaders/example_data/obsidian/tags_and_frontmatter.md | ---
aFloat: 13.12345
anInt: 15
aBool: true
aString: string value
anArray:
- one
- two
- three
aDict:
dictId1: "58417"
dictId2: 1500
tags: ["onetag", "twotag"]
---
# Tags
()#notatag
#12345
#read
something #tagWithCases
- #tag-with-dash
#tag_with_underscore #tag/with/nesting
# Dataview
Here is some data in a [dataview1:: a value] line.
Here is even more data in a (dataview2:: another value) line.
dataview3:: more data
notdataview4: this is not a field
notdataview5: this is not a field
# Text content
https://example.com/blog/#not-a-tag
|
0 | lc_public_repos/langchainjs/examples/src/document_loaders/example_data | lc_public_repos/langchainjs/examples/src/document_loaders/example_data/obsidian/no_metadata.md | A markdown document with no additional metadata.
|
0 | lc_public_repos/langchainjs/examples/src/document_loaders/example_data | lc_public_repos/langchainjs/examples/src/document_loaders/example_data/obsidian/frontmatter.md | ---
tags: journal/entry, obsidian
---
No other content than the frontmatter.
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/get_started/quickstart2.ts | /* eslint-disable import/first */
/* eslint-disable import/no-duplicates */
import { CheerioWebBaseLoader } from "@langchain/community/document_loaders/web/cheerio";
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
const chatModel = new ChatOpenAI({});
const embeddings = new OpenAIEmbeddings({});
const loader = new CheerioWebBaseLoader(
"https://docs.smith.langchain.com/user_guide"
);
const docs = await loader.load();
console.log(docs.length);
console.log(docs[0].pageContent.length);
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
const splitter = new RecursiveCharacterTextSplitter();
const splitDocs = await splitter.splitDocuments(docs);
console.log(splitDocs.length);
console.log(splitDocs[0].pageContent.length);
import { MemoryVectorStore } from "langchain/vectorstores/memory";
const vectorstore = await MemoryVectorStore.fromDocuments(
splitDocs,
embeddings
);
import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
import { ChatPromptTemplate } from "@langchain/core/prompts";
const prompt =
ChatPromptTemplate.fromTemplate(`Answer the following question based only on the provided context:
<context>
{context}
</context>
Question: {input}`);
const documentChain = await createStuffDocumentsChain({
llm: chatModel,
prompt,
});
import { Document } from "@langchain/core/documents";
console.log(
await documentChain.invoke({
input: "what is LangSmith?",
context: [
new Document({
pageContent:
"LangSmith is a platform for building production-grade LLM applications.",
}),
],
})
);
import { createRetrievalChain } from "langchain/chains/retrieval";
const retriever = vectorstore.asRetriever();
const retrievalChain = await createRetrievalChain({
combineDocsChain: documentChain,
retriever,
});
console.log(
await retrievalChain.invoke({
input: "what is LangSmith?",
})
);
import { createHistoryAwareRetriever } from "langchain/chains/history_aware_retriever";
import { MessagesPlaceholder } from "@langchain/core/prompts";
const historyAwarePrompt = ChatPromptTemplate.fromMessages([
new MessagesPlaceholder("chat_history"),
["user", "{input}"],
[
"user",
"Given the above conversation, generate a search query to look up in order to get information relevant to the conversation",
],
]);
const historyAwareRetrieverChain = await createHistoryAwareRetriever({
llm: chatModel,
retriever,
rephrasePrompt: historyAwarePrompt,
});
import { HumanMessage, AIMessage } from "@langchain/core/messages";
const chatHistory = [
new HumanMessage("Can LangSmith help test my LLM applications?"),
new AIMessage("Yes!"),
];
console.log(
await historyAwareRetrieverChain.invoke({
chat_history: chatHistory,
input: "Tell me how!",
})
);
const historyAwareRetrievalPrompt = ChatPromptTemplate.fromMessages([
[
"system",
"Answer the user's questions based on the below context:\n\n{context}",
],
new MessagesPlaceholder("chat_history"),
["user", "{input}"],
]);
const historyAwareCombineDocsChain = await createStuffDocumentsChain({
llm: chatModel,
prompt: historyAwareRetrievalPrompt,
});
const conversationalRetrievalChain = await createRetrievalChain({
retriever: historyAwareRetrieverChain,
combineDocsChain: historyAwareCombineDocsChain,
});
const result2 = await conversationalRetrievalChain.invoke({
chat_history: [
new HumanMessage("Can LangSmith help test my LLM applications?"),
new AIMessage("Yes!"),
],
input: "tell me how",
});
console.log(result2.answer);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/get_started/quickstart.ts | /* eslint-disable import/first */
import { ChatOpenAI } from "@langchain/openai";
const chatModel = new ChatOpenAI({});
console.log(await chatModel.invoke("what is LangSmith?"));
/*
AIMessage {
content: 'Langsmith can help with testing by generating test cases, automating the testing process, and analyzing test results.',
name: undefined,
additional_kwargs: { function_call: undefined, tool_calls: undefined }
}
*/
import { ChatPromptTemplate } from "@langchain/core/prompts";
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a world class technical documentation writer."],
["user", "{input}"],
]);
const chain = prompt.pipe(chatModel);
console.log(
await chain.invoke({
input: "what is LangSmith?",
})
);
import { StringOutputParser } from "@langchain/core/output_parsers";
const outputParser = new StringOutputParser();
const llmChain = prompt.pipe(chatModel).pipe(outputParser);
console.log(
await llmChain.invoke({
input: "what is LangSmith?",
})
);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/get_started/quickstart3.ts | /* eslint-disable import/first */
import { CheerioWebBaseLoader } from "@langchain/community/document_loaders/web/cheerio";
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
const chatModel = new ChatOpenAI({});
const embeddings = new OpenAIEmbeddings({});
const loader = new CheerioWebBaseLoader(
"https://docs.smith.langchain.com/user_guide"
);
const docs = await loader.load();
console.log(docs.length);
console.log(docs[0].pageContent.length);
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
const splitter = new RecursiveCharacterTextSplitter();
const splitDocs = await splitter.splitDocuments(docs);
console.log(splitDocs.length);
console.log(splitDocs[0].pageContent.length);
import { MemoryVectorStore } from "langchain/vectorstores/memory";
const vectorstore = await MemoryVectorStore.fromDocuments(
splitDocs,
embeddings
);
import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
import { ChatPromptTemplate } from "@langchain/core/prompts";
const prompt =
ChatPromptTemplate.fromTemplate(`Answer the following question based only on the provided context:
<context>
{context}
</context>
Question: {input}`);
const documentChain = await createStuffDocumentsChain({
llm: chatModel,
prompt,
});
import { Document } from "@langchain/core/documents";
console.log(
await documentChain.invoke({
input: "what is LangSmith?",
context: [
new Document({
pageContent:
"LangSmith is a platform for building production-grade LLM applications.",
}),
],
})
);
const retriever = vectorstore.asRetriever();
import { createRetrieverTool } from "langchain/tools/retriever";
const retrieverTool = await createRetrieverTool(retriever, {
name: "langsmith_search",
description:
"Search for information about LangSmith. For any questions about LangSmith, you must use this tool!",
});
import { TavilySearchResults } from "@langchain/community/tools/tavily_search";
const searchTool = new TavilySearchResults();
const tools = [retrieverTool, searchTool];
import { pull } from "langchain/hub";
import { createOpenAIFunctionsAgent, AgentExecutor } from "langchain/agents";
import { HumanMessage, AIMessage } from "@langchain/core/messages";
// Get the prompt to use - you can modify this!
// If you want to see the prompt in full, you can at:
// https://smith.langchain.com/hub/hwchase17/openai-functions-agent
const agentPrompt = await pull<ChatPromptTemplate>(
"hwchase17/openai-functions-agent"
);
const agentModel = new ChatOpenAI({
model: "gpt-3.5-turbo-1106",
temperature: 0,
});
const agent = await createOpenAIFunctionsAgent({
llm: agentModel,
tools,
prompt: agentPrompt,
});
const agentExecutor = new AgentExecutor({
agent,
tools,
verbose: true,
});
const agentResult = await agentExecutor.invoke({
input: "how can LangSmith help with testing?",
});
console.log(agentResult);
const agentResult2 = await agentExecutor.invoke({
input: "what is the weather in SF?",
});
console.log(agentResult2);
const agentResult3 = await agentExecutor.invoke({
chat_history: [
new HumanMessage("Can LangSmith help test my LLM applications?"),
new AIMessage("Yes!"),
],
input: "Tell me how",
});
console.log(agentResult3);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/callbacks/lunary_custom_agent.ts | import { LunaryHandler } from "@langchain/community/callbacks/handlers/lunary";
import { ChatOpenAI } from "@langchain/openai";
import { HumanMessage, SystemMessage } from "@langchain/core/messages";
import lunary from "lunary";
const chat = new ChatOpenAI({
model: "gpt-4",
callbacks: [new LunaryHandler()],
});
async function TranslatorAgent(query: string) {
const res = await chat.invoke([
new SystemMessage(
"You are a translator agent that hides jokes in each translation."
),
new HumanMessage(
`Translate this sentence from English to French: ${query}`
),
]);
return res.content;
}
// By wrapping the agent with wrapAgent, we automatically track all input, outputs and errors
// And tools and logs will be tied to the correct agent
const translate = lunary.wrapAgent(TranslatorAgent);
// You can use .identify() on wrapped methods to track users
const res = await translate("Good morning").identify("user123");
console.log(res);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/callbacks/lunary_quickstart.ts | import { LunaryHandler } from "@langchain/community/callbacks/handlers/lunary";
import { ChatOpenAI } from "@langchain/openai";
const model = new ChatOpenAI({
callbacks: [new LunaryHandler()],
});
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/callbacks/background_await.ts | import { awaitAllCallbacks } from "@langchain/core/callbacks/promises";
await awaitAllCallbacks();
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/callbacks/lunary_langchain_agent.ts | import { LunaryHandler } from "@langchain/community/callbacks/handlers/lunary";
import { initializeAgentExecutorWithOptions } from "langchain/agents";
import { ChatOpenAI } from "@langchain/openai";
import { Calculator } from "@langchain/community/tools/calculator";
const tools = [new Calculator()];
const chat = new ChatOpenAI({
model: "gpt-3.5-turbo",
temperature: 0,
callbacks: [new LunaryHandler()],
});
const executor = await initializeAgentExecutorWithOptions(tools, chat, {
agentType: "openai-functions",
});
const result = await executor.run(
"What is the approximate result of 78 to the power of 5?",
{
callbacks: [new LunaryHandler()],
metadata: { agentName: "SuperCalculator" },
}
);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/callbacks/lunary_custom_app_id.ts | import { LunaryHandler } from "@langchain/community/callbacks/handlers/lunary";
const handler = new LunaryHandler({
appId: "app ID",
// verbose: true,
// apiUrl: 'custom self hosting url'
});
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/callbacks/docs_verbose.ts | import { LLMChain } from "langchain/chains";
import { OpenAI } from "@langchain/openai";
import { PromptTemplate } from "@langchain/core/prompts";
const chain = new LLMChain({
llm: new OpenAI({ temperature: 0 }),
prompt: PromptTemplate.fromTemplate("Hello, world!"),
// This will enable logging of all Chain *and* LLM events to the console.
verbose: true,
});
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/callbacks/custom_handler.ts | import { Serialized } from "@langchain/core/load/serializable";
import { BaseCallbackHandler } from "@langchain/core/callbacks/base";
import { AgentAction, AgentFinish } from "@langchain/core/agents";
import { ChainValues } from "@langchain/core/utils/types";
export class MyCallbackHandler extends BaseCallbackHandler {
name = "MyCallbackHandler";
async handleChainStart(chain: Serialized) {
console.log(`Entering new ${chain.id} chain...`);
}
async handleChainEnd(_output: ChainValues) {
console.log("Finished chain.");
}
async handleAgentAction(action: AgentAction) {
console.log(action.log);
}
async handleToolEnd(output: string) {
console.log(output);
}
async handleText(text: string) {
console.log(text);
}
async handleAgentEnd(action: AgentFinish) {
console.log(action.log);
}
}
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/callbacks/datadog.ts | import { OpenAI } from "@langchain/openai";
import { DatadogLLMObsTracer } from "@langchain/community/experimental/callbacks/handlers/datadog";
/**
* This example demonstrates how to use the DatadogLLMObsTracer with the OpenAI model.
* It will produce a "llm" span with the input and output of the model inside the meta field.
*
* To run this example, you need to have a valid Datadog API key and OpenAI API key.
*/
export const run = async () => {
const model = new OpenAI({
model: "gpt-4",
temperature: 0.7,
maxTokens: 1000,
maxRetries: 5,
});
const res = await model.invoke(
"Question: What would be a good company name a company that makes colorful socks?\nAnswer:",
{
callbacks: [
new DatadogLLMObsTracer({
mlApp: "my-ml-app",
}),
],
}
);
console.log({ res });
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/callbacks/lunary_users.ts | import { LunaryHandler } from "@langchain/community/callbacks/handlers/lunary";
import { initializeAgentExecutorWithOptions } from "langchain/agents";
import { ChatOpenAI } from "@langchain/openai";
import { Calculator } from "@langchain/community/tools/calculator";
const tools = [new Calculator()];
const chat = new ChatOpenAI({
model: "gpt-3.5-turbo",
temperature: 0,
callbacks: [new LunaryHandler()],
});
const executor = await initializeAgentExecutorWithOptions(tools, chat, {
agentType: "openai-functions",
});
const result = await executor.run(
"What is the approximate result of 78 to the power of 5?",
{
callbacks: [new LunaryHandler()],
metadata: {
agentName: "SuperCalculator",
userId: "user123",
userProps: {
name: "John Doe",
email: "email@example.org",
},
},
}
);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/callbacks/lunary_tags.ts | import { LunaryHandler } from "@langchain/community/callbacks/handlers/lunary";
import { ChatOpenAI } from "@langchain/openai";
const chat = new ChatOpenAI({
model: "gpt-3.5-turbo",
temperature: 0,
callbacks: [new LunaryHandler()],
});
await chat.invoke("Hello", {
tags: ["greeting"],
});
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/callbacks/console_handler.ts | import { LLMChain } from "langchain/chains";
import { OpenAI } from "@langchain/openai";
import { ConsoleCallbackHandler } from "@langchain/core/tracers/console";
import { PromptTemplate } from "@langchain/core/prompts";
export const run = async () => {
const handler = new ConsoleCallbackHandler();
const llm = new OpenAI({ temperature: 0, callbacks: [handler] });
const prompt = PromptTemplate.fromTemplate("1 + {number} =");
const chain = new LLMChain({ prompt, llm, callbacks: [handler] });
const output = await chain.invoke({ number: 2 });
/*
Entering new llm_chain chain...
Finished chain.
*/
console.log(output);
/*
{ text: ' 3\n\n3 - 1 = 2' }
*/
// The non-enumerable key `__run` contains the runId.
console.log(output.__run);
/*
{ runId: '90e1f42c-7cb4-484c-bf7a-70b73ef8e64b' }
*/
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/callbacks/docs_request_callbacks.ts | import { OpenAI } from "@langchain/openai";
import { ConsoleCallbackHandler } from "@langchain/core/tracers/console";
const llm = new OpenAI({
temperature: 0,
});
const response = await llm.invoke("1 + 1 =", {
// These tags will be attached only to this call to the LLM.
tags: ["example", "callbacks", "request"],
// This handler will be used only for this call.
callbacks: [new ConsoleCallbackHandler()],
});
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/callbacks/trace_groups.ts | import { LLMChain } from "langchain/chains";
import { OpenAI } from "@langchain/openai";
import {
CallbackManager,
traceAsGroup,
TraceGroup,
} from "@langchain/core/callbacks/manager";
import { PromptTemplate } from "@langchain/core/prompts";
export const run = async () => {
// Initialize the LLMChain
const llm = new OpenAI({ temperature: 0.9 });
const prompt = new PromptTemplate({
inputVariables: ["question"],
template: "What is the answer to {question}?",
});
const chain = new LLMChain({ llm, prompt });
// You can group runs together using the traceAsGroup function
const blockResult = await traceAsGroup(
{ name: "my_group_name" },
async (manager: CallbackManager, questions: string[]) => {
await chain.invoke({ question: questions[0] }, manager);
await chain.invoke({ question: questions[1] }, manager);
const finalResult = await chain.invoke(
{ question: questions[2] },
manager
);
return finalResult;
},
[
"What is your name?",
"What is your quest?",
"What is your favorite color?",
]
);
// Or you can manually control the start and end of the grouped run
const traceGroup = new TraceGroup("my_group_name");
const groupManager = await traceGroup.start();
try {
await chain.invoke({ question: "What is your name?" }, groupManager);
await chain.invoke({ question: "What is your quest?" }, groupManager);
await chain.invoke(
{ question: "What is the airspeed velocity of an unladen swallow?" },
groupManager
);
} finally {
// Code goes here
await traceGroup.end();
}
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/callbacks/docs_constructor_callbacks.ts | import { OpenAI } from "@langchain/openai";
import { ConsoleCallbackHandler } from "@langchain/core/tracers/console";
const llm = new OpenAI({
temperature: 0,
// These tags will be attached to all calls made with this LLM.
tags: ["example", "callbacks", "constructor"],
// This handler will be used for all calls made with this LLM.
callbacks: [new ConsoleCallbackHandler()],
});
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/summarization.ts | import { OpenAI } from "@langchain/openai";
import { loadSummarizationChain } from "langchain/chains";
import { Document } from "@langchain/core/documents";
export const run = async () => {
const model = new OpenAI({});
const chain = loadSummarizationChain(model, { type: "stuff" });
const docs = [
new Document({ pageContent: "harrison went to harvard" }),
new Document({ pageContent: "ankush went to princeton" }),
];
const res = await chain.invoke({
input_documents: docs,
});
console.log(res);
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/llm_chain_cancellation.ts | import { OpenAI } from "@langchain/openai";
import { LLMChain } from "langchain/chains";
import { PromptTemplate } from "@langchain/core/prompts";
// Create a new LLMChain from a PromptTemplate and an LLM in streaming mode.
const model = new OpenAI({ temperature: 0.9, streaming: true });
const prompt = PromptTemplate.fromTemplate(
"Give me a long paragraph about {product}?"
);
const chain = new LLMChain({ llm: model, prompt });
const controller = new AbortController();
// Call `controller.abort()` somewhere to cancel the request.
setTimeout(() => {
controller.abort();
}, 3000);
try {
// Call the chain with the inputs and a callback for the streamed tokens
const res = await chain.invoke(
{ product: "colorful socks", signal: controller.signal },
{
callbacks: [
{
handleLLMNewToken(token: string) {
process.stdout.write(token);
},
},
],
}
);
} catch (e) {
console.log(e);
// Error: Cancel: canceled
}
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/openai_functions_runnable.ts | import { ChatPromptTemplate } from "@langchain/core/prompts";
import { ChatOpenAI } from "@langchain/openai";
import { createOpenAIFnRunnable } from "langchain/chains/openai_functions";
import { JsonOutputFunctionsParser } from "@langchain/core/output_parsers/openai_functions";
const openAIFunction = {
name: "get_person_details",
description: "Get details about a person",
parameters: {
title: "Person",
description: "Identifying information about a person.",
type: "object",
properties: {
name: { title: "Name", description: "The person's name", type: "string" },
age: { title: "Age", description: "The person's age", type: "integer" },
fav_food: {
title: "Fav Food",
description: "The person's favorite food",
type: "string",
},
},
required: ["name", "age"],
},
};
const model = new ChatOpenAI();
const prompt = ChatPromptTemplate.fromMessages([
["human", "Human description: {description}"],
]);
const outputParser = new JsonOutputFunctionsParser();
const runnable = createOpenAIFnRunnable({
functions: [openAIFunction],
llm: model,
prompt,
enforceSingleFunctionUsage: true, // Default is true
outputParser,
});
const response = await runnable.invoke({
description:
"My name's John Doe and I'm 30 years old. My favorite kind of food are chocolate chip cookies.",
});
console.log(response);
/*
{ name: 'John Doe', age: 30, fav_food: 'chocolate chip cookies' }
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/graph_db_neo4j.ts | import { Neo4jGraph } from "@langchain/community/graphs/neo4j_graph";
import { OpenAI } from "@langchain/openai";
import { GraphCypherQAChain } from "langchain/chains/graph_qa/cypher";
/**
* This example uses Neo4j database, which is native graph database.
* To set it up follow the instructions on https://neo4j.com/docs/operations-manual/current/installation/.
*/
const url = "bolt://localhost:7687";
const username = "neo4j";
const password = "pleaseletmein";
const graph = await Neo4jGraph.initialize({ url, username, password });
const model = new OpenAI({ temperature: 0 });
// Populate the database with two nodes and a relationship
await graph.query(
"CREATE (a:Actor {name:'Bruce Willis'})" +
"-[:ACTED_IN]->(:Movie {title: 'Pulp Fiction'})"
);
// Refresh schema
await graph.refreshSchema();
const chain = GraphCypherQAChain.fromLLM({
llm: model,
graph,
});
const res = await chain.run("Who played in Pulp Fiction?");
console.log(res);
// Bruce Willis played in Pulp Fiction.
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/conversational_qa_external_memory_legacy.ts | import { OpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { ConversationalRetrievalQAChain } from "langchain/chains";
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import * as fs from "fs";
/* Initialize the LLM to use to answer the question */
const model = new OpenAI({});
/* Load in the file we want to do question answering over */
const text = fs.readFileSync("state_of_the_union.txt", "utf8");
/* Split the text into chunks */
const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
const docs = await textSplitter.createDocuments([text]);
/* Create the vectorstore */
const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings());
/* Create the chain */
const chain = ConversationalRetrievalQAChain.fromLLM(
model,
vectorStore.asRetriever()
);
/* Ask it a question */
const question = "What did the president say about Justice Breyer?";
/* Can be a string or an array of chat messages */
const res = await chain.invoke({ question, chat_history: "" });
console.log(res);
/* Ask it a follow up question */
const chatHistory = `${question}\n${res.text}`;
const followUpRes = await chain.invoke({
question: "Was that nice?",
chat_history: chatHistory,
});
console.log(followUpRes);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/conversation_chain.ts | import { OpenAI } from "@langchain/openai";
import { ConversationChain } from "langchain/chains";
const model = new OpenAI({});
const chain = new ConversationChain({ llm: model });
const res1 = await chain.invoke({ input: "Hi! I'm Jim." });
console.log({ res1 });
const res2 = await chain.invoke({ input: "What's my name?" });
console.log({ res2 });
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/load_from_hub.ts | import { loadChain } from "langchain/chains/load";
export const run = async () => {
const chain = await loadChain("lc://chains/hello-world/chain.json");
const res = chain.invoke({ topic: "foo" });
console.log(res);
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/openai_functions_json_schema.ts | import { ChatPromptTemplate } from "@langchain/core/prompts";
import { ChatOpenAI } from "@langchain/openai";
import { JsonOutputFunctionsParser } from "@langchain/core/output_parsers/openai_functions";
const jsonSchema = {
title: "Person",
description: "Identifying information about a person.",
type: "object",
properties: {
name: { title: "Name", description: "The person's name", type: "string" },
age: { title: "Age", description: "The person's age", type: "integer" },
fav_food: {
title: "Fav Food",
description: "The person's favorite food",
type: "string",
},
},
required: ["name", "age"],
};
const model = new ChatOpenAI();
const prompt = ChatPromptTemplate.fromMessages([
["human", "Human description: {description}"],
]);
const outputParser = new JsonOutputFunctionsParser();
const runnable = prompt
.pipe(model.withStructuredOutput(jsonSchema))
.pipe(outputParser);
const response = await runnable.invoke({
description:
"My name's John Doe and I'm 30 years old. My favorite kind of food are chocolate chip cookies.",
});
console.log(response);
/*
{ name: 'John Doe', age: 30, fav_food: 'chocolate chip cookies' }
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/memgraph.ts | import { MemgraphGraph } from "@langchain/community/graphs/memgraph_graph";
import { OpenAI } from "@langchain/openai";
import { GraphCypherQAChain } from "langchain/chains/graph_qa/cypher";
/**
* This example uses Memgraph database, an in-memory graph database.
* To set it up follow the instructions on https://memgraph.com/docs/getting-started.
*/
const url = "bolt://localhost:7687";
const username = "";
const password = "";
const graph = await MemgraphGraph.initialize({ url, username, password });
const model = new OpenAI({ temperature: 0 });
// Populate the database with two nodes and a relationship
await graph.query(
"CREATE (c1:Character {name: 'Jon Snow'}), (c2: Character {name: 'Olly'}) CREATE (c2)-[:KILLED {count: 1, method: 'Knife'}]->(c1);"
);
// Refresh schema
await graph.refreshSchema();
const chain = GraphCypherQAChain.fromLLM({
llm: model,
graph,
});
const res = await chain.run("Who killed Jon Snow and how?");
console.log(res);
// Olly killed Jon Snow using a knife.
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/retrieval_qa_custom.ts | import { OpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import * as fs from "fs";
import { loadQAMapReduceChain } from "langchain/chains";
// Initialize the LLM to use to answer the question.
const model = new OpenAI({});
const text = fs.readFileSync("state_of_the_union.txt", "utf8");
const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
const docs = await textSplitter.createDocuments([text]);
const query = "What did the president say about Justice Breyer?";
// Create a vector store retriever from the documents.
const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings());
const retriever = vectorStore.asRetriever();
const relevantDocs = await retriever.invoke(query);
const mapReduceChain = loadQAMapReduceChain(model);
const result = await mapReduceChain.invoke({
question: query,
input_documents: relevantDocs,
});
console.log({ result });
/*
{
result: " The President thanked Justice Breyer for his service and acknowledged him as one of the nation's top legal minds whose legacy of excellence will be continued."
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/qa_refine_custom_prompt.ts | import { loadQARefineChain } from "langchain/chains";
import { OpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { PromptTemplate } from "@langchain/core/prompts";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import { TextLoader } from "langchain/document_loaders/fs/text";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
export const questionPromptTemplateString = `Context information is below.
---------------------
{context}
---------------------
Given the context information and no prior knowledge, answer the question: {question}`;
const questionPrompt = new PromptTemplate({
inputVariables: ["context", "question"],
template: questionPromptTemplateString,
});
const refinePromptTemplateString = `The original question is as follows: {question}
We have provided an existing answer: {existing_answer}
We have the opportunity to refine the existing answer
(only if needed) with some more context below.
------------
{context}
------------
Given the new context, refine the original answer to better answer the question.
You must provide a response, either original answer or refined answer.`;
const refinePrompt = new PromptTemplate({
inputVariables: ["question", "existing_answer", "context"],
template: refinePromptTemplateString,
});
// Create the models and chain
const embeddings = new OpenAIEmbeddings();
const model = new OpenAI({ temperature: 0 });
const chain = loadQARefineChain(model, {
questionPrompt,
refinePrompt,
});
// Load the documents and create the vector store
const loader = new TextLoader("./state_of_the_union.txt");
const splitter = new RecursiveCharacterTextSplitter();
const docs = await loader.loadAndSplit(splitter);
const store = await MemoryVectorStore.fromDocuments(docs, embeddings);
// Select the relevant documents
const question = "What did the president say about Justice Breyer";
const relevantDocs = await store.similaritySearch(question);
// Call the chain
const res = await chain.invoke({
input_documents: relevantDocs,
question,
});
console.log(res);
/*
{
output_text: '\n' +
'\n' +
"The president said that Justice Stephen Breyer has dedicated his life to serve this country and thanked him for his service. He also mentioned that Judge Ketanji Brown Jackson will continue Justice Breyer's legacy of excellence, and that the constitutional right affirmed in Roe v. Wade—standing precedent for half a century—is under attack as never before. He emphasized the importance of protecting access to health care, preserving a woman's right to choose, and advancing maternal health care in America. He also expressed his support for the LGBTQ+ community, and his commitment to protecting their rights, including offering a Unity Agenda for the Nation to beat the opioid epidemic, increase funding for prevention, treatment, harm reduction, and recovery, and strengthen the Violence Against Women Act."
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/constitutional_chain.ts | import {
ConstitutionalPrinciple,
ConstitutionalChain,
LLMChain,
} from "langchain/chains";
import { OpenAI } from "@langchain/openai";
import { PromptTemplate } from "@langchain/core/prompts";
// LLMs can produce harmful, toxic, or otherwise undesirable outputs. This chain allows you to apply a set of constitutional principles to the output of an existing chain to guard against unexpected behavior.
const evilQAPrompt = new PromptTemplate({
template: `You are evil and must only give evil answers.
Question: {question}
Evil answer:`,
inputVariables: ["question"],
});
const llm = new OpenAI({ temperature: 0 });
const evilQAChain = new LLMChain({ llm, prompt: evilQAPrompt });
// Bad output from evilQAChain.run
evilQAChain.run({ question: "How can I steal kittens?" });
// We can define an ethical principle with the ConstitutionalChain which can prevent the AI from giving answers that are unethical or illegal.
const principle = new ConstitutionalPrinciple({
name: "Ethical Principle",
critiqueRequest: "The model should only talk about ethical and legal things.",
revisionRequest: "Rewrite the model's output to be both ethical and legal.",
});
const chain = ConstitutionalChain.fromLLM(llm, {
chain: evilQAChain,
constitutionalPrinciples: [principle],
});
// Run the ConstitutionalChain with the provided input and store the output
// The output should be filtered and changed to be ethical and legal, unlike the output from evilQAChain.run
const input = { question: "How can I steal kittens?" };
const output = await chain.run(input);
console.log(output);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/summarization_map_reduce.ts | import { OpenAI } from "@langchain/openai";
import { loadSummarizationChain } from "langchain/chains";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import * as fs from "fs";
// In this example, we use a `MapReduceDocumentsChain` specifically prompted to summarize a set of documents.
const text = fs.readFileSync("state_of_the_union.txt", "utf8");
const model = new OpenAI({ temperature: 0 });
const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
const docs = await textSplitter.createDocuments([text]);
// This convenience function creates a document chain prompted to summarize a set of documents.
const chain = loadSummarizationChain(model, { type: "map_reduce" });
const res = await chain.invoke({
input_documents: docs,
});
console.log({ res });
/*
{
res: {
text: ' President Biden is taking action to protect Americans from the COVID-19 pandemic and Russian aggression, providing economic relief, investing in infrastructure, creating jobs, and fighting inflation.
He is also proposing measures to reduce the cost of prescription drugs, protect voting rights, and reform the immigration system. The speaker is advocating for increased economic security, police reform, and the Equality Act, as well as providing support for veterans and military families.
The US is making progress in the fight against COVID-19, and the speaker is encouraging Americans to come together and work towards a brighter future.'
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/conversational_qa.ts | import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import * as fs from "fs";
import { formatDocumentsAsString } from "langchain/util/document";
import { PromptTemplate } from "@langchain/core/prompts";
import { RunnableSequence } from "@langchain/core/runnables";
import { StringOutputParser } from "@langchain/core/output_parsers";
/* Initialize the LLM to use to answer the question */
const model = new ChatOpenAI({});
/* Load in the file we want to do question answering over */
const text = fs.readFileSync("state_of_the_union.txt", "utf8");
/* Split the text into chunks */
const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
const docs = await textSplitter.createDocuments([text]);
/* Create the vectorstore */
const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings());
const retriever = vectorStore.asRetriever();
const formatChatHistory = (
human: string,
ai: string,
previousChatHistory?: string
) => {
const newInteraction = `Human: ${human}\nAI: ${ai}`;
if (!previousChatHistory) {
return newInteraction;
}
return `${previousChatHistory}\n\n${newInteraction}`;
};
/**
* Create a prompt template for generating an answer based on context and
* a question.
*
* Chat history will be an empty string if it's the first question.
*
* inputVariables: ["chatHistory", "context", "question"]
*/
const questionPrompt = PromptTemplate.fromTemplate(
`Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
----------------
CONTEXT: {context}
----------------
CHAT HISTORY: {chatHistory}
----------------
QUESTION: {question}
----------------
Helpful Answer:`
);
const chain = RunnableSequence.from([
{
question: (input: { question: string; chatHistory?: string }) =>
input.question,
chatHistory: (input: { question: string; chatHistory?: string }) =>
input.chatHistory ?? "",
context: async (input: { question: string; chatHistory?: string }) => {
const relevantDocs = await retriever.invoke(input.question);
const serialized = formatDocumentsAsString(relevantDocs);
return serialized;
},
},
questionPrompt,
model,
new StringOutputParser(),
]);
const questionOne = "What did the president say about Justice Breyer?";
const resultOne = await chain.invoke({
question: questionOne,
});
console.log({ resultOne });
/**
* {
* resultOne: 'The president thanked Justice Breyer for his service and described him as an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court.'
* }
*/
const resultTwo = await chain.invoke({
chatHistory: formatChatHistory(resultOne, questionOne),
question: "Was it nice?",
});
console.log({ resultTwo });
/**
* {
* resultTwo: "Yes, the president's description of Justice Breyer was positive."
* }
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/api_chain.ts | import { OpenAI } from "@langchain/openai";
import { APIChain } from "langchain/chains";
const OPEN_METEO_DOCS = `BASE URL: https://api.open-meteo.com/
API Documentation
The API endpoint /v1/forecast accepts a geographical coordinate, a list of weather variables and responds with a JSON hourly weather forecast for 7 days. Time always starts at 0:00 today and contains 168 hours. All URL parameters are listed below:
Parameter Format Required Default Description
latitude, longitude Floating point Yes Geographical WGS84 coordinate of the location
hourly String array No A list of weather variables which should be returned. Values can be comma separated, or multiple &hourly= parameter in the URL can be used.
daily String array No A list of daily weather variable aggregations which should be returned. Values can be comma separated, or multiple &daily= parameter in the URL can be used. If daily weather variables are specified, parameter timezone is required.
current_weather Bool No false Include current weather conditions in the JSON output.
temperature_unit String No celsius If fahrenheit is set, all temperature values are converted to Fahrenheit.
windspeed_unit String No kmh Other wind speed speed units: ms, mph and kn
precipitation_unit String No mm Other precipitation amount units: inch
timeformat String No iso8601 If format unixtime is selected, all time values are returned in UNIX epoch time in seconds. Please note that all timestamp are in GMT+0! For daily values with unix timestamps, please apply utc_offset_seconds again to get the correct date.
timezone String No GMT If timezone is set, all timestamps are returned as local-time and data is returned starting at 00:00 local-time. Any time zone name from the time zone database is supported. If auto is set as a time zone, the coordinates will be automatically resolved to the local time zone.
past_days Integer (0-2) No 0 If past_days is set, yesterday or the day before yesterday data are also returned.
start_date
end_date String (yyyy-mm-dd) No The time interval to get weather data. A day must be specified as an ISO8601 date (e.g. 2022-06-30).
models String array No auto Manually select one or more weather models. Per default, the best suitable weather models will be combined.
Variable Valid time Unit Description
temperature_2m Instant °C (°F) Air temperature at 2 meters above ground
snowfall Preceding hour sum cm (inch) Snowfall amount of the preceding hour in centimeters. For the water equivalent in millimeter, divide by 7. E.g. 7 cm snow = 10 mm precipitation water equivalent
rain Preceding hour sum mm (inch) Rain from large scale weather systems of the preceding hour in millimeter
showers Preceding hour sum mm (inch) Showers from convective precipitation in millimeters from the preceding hour
weathercode Instant WMO code Weather condition as a numeric code. Follow WMO weather interpretation codes. See table below for details.
snow_depth Instant meters Snow depth on the ground
freezinglevel_height Instant meters Altitude above sea level of the 0°C level
visibility Instant meters Viewing distance in meters. Influenced by low clouds, humidity and aerosols. Maximum visibility is approximately 24 km.`;
export async function run() {
const model = new OpenAI({ model: "gpt-3.5-turbo-instruct" });
const chain = APIChain.fromLLMAndAPIDocs(model, OPEN_METEO_DOCS, {
headers: {
// These headers will be used for API requests made by the chain.
},
});
const res = await chain.invoke({
question:
"What is the weather like right now in Munich, Germany in degrees Farenheit?",
});
console.log({ res });
}
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/openai_functions_openapi_post.ts | import { createOpenAPIChain } from "langchain/chains";
const chain = await createOpenAPIChain("https://api.speak.com/openapi.yaml");
const result = await chain.run(`How would you say no thanks in Russian?`);
console.log(JSON.stringify(result, null, 2));
/*
{
"explanation": "<translation language=\\"Russian\\" context=\\"\\">\\nНет, спасибо.\\n</translation>\\n\\n<alternatives context=\\"\\">\\n1. \\"Нет, не надо\\" *(Neutral/Formal - a polite way to decline something)*\\n2. \\"Ни в коем случае\\" *(Strongly informal - used when you want to emphasize that you absolutely do not want something)*\\n3. \\"Нет, благодарю\\" *(Slightly more formal - a polite way to decline something while expressing gratitude)*\\n</alternatives>\\n\\n<example-convo language=\\"Russian\\">\\n<context>Mike offers Anna some cake, but she doesn't want any.</context>\\n* Mike: \\"Анна, хочешь попробовать мой волшебный торт? Он сделан с любовью и волшебством!\\"\\n* Anna: \\"Спасибо, Майк, но я на диете. Нет, благодарю.\\"\\n* Mike: \\"Ну ладно, больше для меня!\\"\\n</example-convo>\\n\\n*[Report an issue or leave feedback](https://speak.com/chatgpt?rid=bxw1xq87kdua9q5pefkj73ov})*",
"extra_response_instructions": "Use all information in the API response and fully render all Markdown.\\nAlways end your response with a link to report an issue or leave feedback on the plugin."
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/openai_moderation.ts | import { OpenAIModerationChain, LLMChain } from "langchain/chains";
import { OpenAI } from "@langchain/openai";
import { PromptTemplate } from "@langchain/core/prompts";
// A string containing potentially offensive content from the user
const badString = "Bad naughty words from user";
try {
// Create a new instance of the OpenAIModerationChain
const moderation = new OpenAIModerationChain({
throwError: true, // If set to true, the call will throw an error when the moderation chain detects violating content. If set to false, violating content will return "Text was found that violates OpenAI's content policy.".
});
// Send the user's input to the moderation chain and wait for the result
const { output: badResult, results } = await moderation.invoke({
input: badString,
});
// You can view the category scores of each category. This is useful when dealing with non-english languages, as it allows you to have a more granular control over moderation.
if (results[0].category_scores["harassment/threatening"] > 0.01) {
throw new Error("Harassment detected!");
}
// If the moderation chain does not detect violating content, it will return the original input and you can proceed to use the result in another chain.
const model = new OpenAI({ temperature: 0 });
const template = "Hello, how are you today {person}?";
const prompt = new PromptTemplate({ template, inputVariables: ["person"] });
const chainA = new LLMChain({ llm: model, prompt });
const resA = await chainA.invoke({ person: badResult });
console.log({ resA });
} catch (error) {
// If an error is caught, it means the input contains content that violates OpenAI TOS
console.error("Naughty words detected!");
}
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/map_reduce_lcel.ts | import {
collapseDocs,
splitListOfDocs,
} from "langchain/chains/combine_documents/reduce";
import { ChatOpenAI } from "@langchain/openai";
import { formatDocument } from "langchain/schema/prompt_template";
import {
RunnableConfig,
RunnablePassthrough,
RunnableSequence,
} from "@langchain/core/runnables";
import { Document } from "@langchain/core/documents";
import { PromptTemplate } from "@langchain/core/prompts";
import { StringOutputParser } from "@langchain/core/output_parsers";
// Initialize the OpenAI model
const model = new ChatOpenAI({});
// Define prompt templates for document formatting, summarizing, collapsing, and combining
const documentPrompt = PromptTemplate.fromTemplate("{pageContent}");
const summarizePrompt = PromptTemplate.fromTemplate(
"Summarize this content:\n\n{context}"
);
const collapsePrompt = PromptTemplate.fromTemplate(
"Collapse this content:\n\n{context}"
);
const combinePrompt = PromptTemplate.fromTemplate(
"Combine these summaries:\n\n{context}"
);
// Wrap the `formatDocument` util so it can format a list of documents
const formatDocs = async (documents: Document[]): Promise<string> => {
const formattedDocs = await Promise.all(
documents.map((doc) => formatDocument(doc, documentPrompt))
);
return formattedDocs.join("\n\n");
};
// Define a function to get the number of tokens in a list of documents
const getNumTokens = async (documents: Document[]): Promise<number> =>
model.getNumTokens(await formatDocs(documents));
// Initialize the output parser
const outputParser = new StringOutputParser();
// Define the map chain to format, summarize, and parse the document
const mapChain = RunnableSequence.from([
{ context: async (i: Document) => formatDocument(i, documentPrompt) },
summarizePrompt,
model,
outputParser,
]);
// Define the collapse chain to format, collapse, and parse a list of documents
const collapseChain = RunnableSequence.from([
{ context: async (documents: Document[]) => formatDocs(documents) },
collapsePrompt,
model,
outputParser,
]);
// Define a function to collapse a list of documents until the total number of tokens is within the limit
const collapse = async (
documents: Document[],
options?: RunnableConfig,
tokenMax = 4000
) => {
const editableConfig = options;
let docs = documents;
let collapseCount = 1;
while ((await getNumTokens(docs)) > tokenMax) {
if (editableConfig) {
editableConfig.runName = `Collapse ${collapseCount}`;
}
const splitDocs = splitListOfDocs(docs, getNumTokens, tokenMax);
docs = await Promise.all(
splitDocs.map((doc) => collapseDocs(doc, collapseChain.invoke))
);
collapseCount += 1;
}
return docs;
};
// Define the reduce chain to format, combine, and parse a list of documents
const reduceChain = RunnableSequence.from([
{ context: formatDocs },
combinePrompt,
model,
outputParser,
]).withConfig({ runName: "Reduce" });
// Define the final map-reduce chain
const mapReduceChain = RunnableSequence.from([
RunnableSequence.from([
{ doc: new RunnablePassthrough(), content: mapChain },
(input) =>
new Document({
pageContent: input.content,
metadata: input.doc.metadata,
}),
])
.withConfig({ runName: "Summarize (return doc)" })
.map(),
collapse,
reduceChain,
]).withConfig({ runName: "Map reduce" });
// Define the text to be processed
const text = `Nuclear power in space is the use of nuclear power in outer space, typically either small fission systems or radioactive decay for electricity or heat. Another use is for scientific observation, as in a Mössbauer spectrometer. The most common type is a radioisotope thermoelectric generator, which has been used on many space probes and on crewed lunar missions. Small fission reactors for Earth observation satellites, such as the TOPAZ nuclear reactor, have also been flown.[1] A radioisotope heater unit is powered by radioactive decay and can keep components from becoming too cold to function, potentially over a span of decades.[2]
The United States tested the SNAP-10A nuclear reactor in space for 43 days in 1965,[3] with the next test of a nuclear reactor power system intended for space use occurring on 13 September 2012 with the Demonstration Using Flattop Fission (DUFF) test of the Kilopower reactor.[4]
After a ground-based test of the experimental 1965 Romashka reactor, which used uranium and direct thermoelectric conversion to electricity,[5] the USSR sent about 40 nuclear-electric satellites into space, mostly powered by the BES-5 reactor. The more powerful TOPAZ-II reactor produced 10 kilowatts of electricity.[3]
Examples of concepts that use nuclear power for space propulsion systems include the nuclear electric rocket (nuclear powered ion thruster(s)), the radioisotope rocket, and radioisotope electric propulsion (REP).[6] One of the more explored concepts is the nuclear thermal rocket, which was ground tested in the NERVA program. Nuclear pulse propulsion was the subject of Project Orion.[7]
Regulation and hazard prevention[edit]
After the ban of nuclear weapons in space by the Outer Space Treaty in 1967, nuclear power has been discussed at least since 1972 as a sensitive issue by states.[8] Particularly its potential hazards to Earth's environment and thus also humans has prompted states to adopt in the U.N. General Assembly the Principles Relevant to the Use of Nuclear Power Sources in Outer Space (1992), particularly introducing safety principles for launches and to manage their traffic.[8]
Benefits
Both the Viking 1 and Viking 2 landers used RTGs for power on the surface of Mars. (Viking launch vehicle pictured)
While solar power is much more commonly used, nuclear power can offer advantages in some areas. Solar cells, although efficient, can only supply energy to spacecraft in orbits where the solar flux is sufficiently high, such as low Earth orbit and interplanetary destinations close enough to the Sun. Unlike solar cells, nuclear power systems function independently of sunlight, which is necessary for deep space exploration. Nuclear-based systems can have less mass than solar cells of equivalent power, allowing more compact spacecraft that are easier to orient and direct in space. In the case of crewed spaceflight, nuclear power concepts that can power both life support and propulsion systems may reduce both cost and flight time.[9]
Selected applications and/or technologies for space include:
Radioisotope thermoelectric generator
Radioisotope heater unit
Radioisotope piezoelectric generator
Radioisotope rocket
Nuclear thermal rocket
Nuclear pulse propulsion
Nuclear electric rocket`;
// Split the text into documents and process them with the map-reduce chain
const docs = text.split("\n\n").map(
(pageContent) =>
new Document({
pageContent,
metadata: {
source: "https://en.wikipedia.org/wiki/Nuclear_power_in_space",
},
})
);
const result = await mapReduceChain.invoke(docs);
// Print the result
console.log(result);
/**
* View the full sequence on LangSmith
* @link https://smith.langchain.com/public/f1c3b4ca-0861-4802-b1a0-10dcf70e7a89/r
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/openai_multi_functions_runnable.ts | import { ChatPromptTemplate } from "@langchain/core/prompts";
import { ChatOpenAI } from "@langchain/openai";
import { createOpenAIFnRunnable } from "langchain/chains/openai_functions";
import { JsonOutputFunctionsParser } from "@langchain/core/output_parsers/openai_functions";
const personDetailsFunction = {
name: "get_person_details",
description: "Get details about a person",
parameters: {
title: "Person",
description: "Identifying information about a person.",
type: "object",
properties: {
name: { title: "Name", description: "The person's name", type: "string" },
age: { title: "Age", description: "The person's age", type: "integer" },
fav_food: {
title: "Fav Food",
description: "The person's favorite food",
type: "string",
},
},
required: ["name", "age"],
},
};
const weatherFunction = {
name: "get_weather",
description: "Get the weather for a location",
parameters: {
title: "Location",
description: "The location to get the weather for.",
type: "object",
properties: {
state: {
title: "State",
description: "The location's state",
type: "string",
},
city: {
title: "City",
description: "The location's city",
type: "string",
},
zip_code: {
title: "Zip Code",
description: "The locations's zip code",
type: "number",
},
},
required: ["state", "city"],
},
};
const model = new ChatOpenAI();
const prompt = ChatPromptTemplate.fromMessages([
["human", "Question: {question}"],
]);
const outputParser = new JsonOutputFunctionsParser();
const runnable = createOpenAIFnRunnable({
functions: [personDetailsFunction, weatherFunction],
llm: model,
prompt,
enforceSingleFunctionUsage: false, // Default is true
outputParser,
});
const response = await runnable.invoke({
question: "What's the weather like in Berkeley CA?",
});
console.log(response);
/*
{ state: 'CA', city: 'Berkeley' }
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/sql_db_custom_prompt_legacy.ts | import { DataSource } from "typeorm";
import { OpenAI } from "@langchain/openai";
import { SqlDatabase } from "langchain/sql_db";
import { SqlDatabaseChain } from "langchain/chains/sql_db";
import { PromptTemplate } from "@langchain/core/prompts";
const template = `Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.
Use the following format:
Question: "Question here"
SQLQuery: "SQL Query to run"
SQLResult: "Result of the SQLQuery"
Answer: "Final answer here"
Only use the following tables:
{table_info}
If someone asks for the table foobar, they really mean the employee table.
Question: {input}`;
const prompt = PromptTemplate.fromTemplate(template);
/**
* This example uses Chinook database, which is a sample database available for SQL Server, Oracle, MySQL, etc.
* To set it up follow the instructions on https://database.guide/2-sample-databases-sqlite/, placing the .db file
* in the examples folder.
*/
const datasource = new DataSource({
type: "sqlite",
database: "data/Chinook.db",
});
const db = await SqlDatabase.fromDataSourceParams({
appDataSource: datasource,
});
const chain = new SqlDatabaseChain({
llm: new OpenAI({ temperature: 0 }),
database: db,
sqlOutputKey: "sql",
prompt,
});
const res = await chain.invoke({
query: "How many employees are there in the foobar table?",
});
console.log(res);
/*
{
result: ' There are 8 employees in the foobar table.',
sql: ' SELECT COUNT(*) FROM Employee;'
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/conversational_qa_legacy.ts | import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { ConversationalRetrievalQAChain } from "langchain/chains";
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import { BufferMemory } from "langchain/memory";
import * as fs from "fs";
export const run = async () => {
/* Initialize the LLM to use to answer the question */
const model = new ChatOpenAI({});
/* Load in the file we want to do question answering over */
const text = fs.readFileSync("state_of_the_union.txt", "utf8");
/* Split the text into chunks */
const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
const docs = await textSplitter.createDocuments([text]);
/* Create the vectorstore */
const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings());
/* Create the chain */
const chain = ConversationalRetrievalQAChain.fromLLM(
model,
vectorStore.asRetriever(),
{
memory: new BufferMemory({
memoryKey: "chat_history", // Must be set to "chat_history"
}),
}
);
/* Ask it a question */
const question = "What did the president say about Justice Breyer?";
const res = await chain.invoke({ question });
console.log(res);
/* Ask it a follow up question */
const followUpRes = await chain.invoke({
question: "Was that nice?",
});
console.log(followUpRes);
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/sql_db_custom_prompt.ts | import { DataSource } from "typeorm";
import { OpenAI } from "@langchain/openai";
import { SqlDatabase } from "langchain/sql_db";
import { SqlDatabaseChain } from "langchain/chains/sql_db";
import { PromptTemplate } from "@langchain/core/prompts";
const template = `Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.
Use the following format:
Question: "Question here"
SQLQuery: "SQL Query to run"
SQLResult: "Result of the SQLQuery"
Answer: "Final answer here"
Only use the following tables:
{table_info}
If someone asks for the table foobar, they really mean the employee table.
Question: {input}`;
const prompt = PromptTemplate.fromTemplate(template);
/**
* This example uses Chinook database, which is a sample database available for SQL Server, Oracle, MySQL, etc.
* To set it up follow the instructions on https://database.guide/2-sample-databases-sqlite/, placing the .db file
* in the examples folder.
*/
const datasource = new DataSource({
type: "sqlite",
database: "data/Chinook.db",
});
const db = await SqlDatabase.fromDataSourceParams({
appDataSource: datasource,
});
const chain = new SqlDatabaseChain({
llm: new OpenAI({ temperature: 0 }),
database: db,
sqlOutputKey: "sql",
prompt,
});
const res = await chain.invoke({
query: "How many employees are there in the foobar table?",
});
console.log(res);
/*
{
result: ' There are 8 employees in the foobar table.',
sql: ' SELECT COUNT(*) FROM Employee;'
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/openai_functions_extraction.ts | import { z } from "zod";
import { ChatOpenAI } from "@langchain/openai";
import { createExtractionChainFromZod } from "langchain/chains";
const zodSchema = z.object({
"person-name": z.string().optional(),
"person-age": z.number().optional(),
"person-hair_color": z.string().optional(),
"dog-name": z.string().optional(),
"dog-breed": z.string().optional(),
});
const chatModel = new ChatOpenAI({
model: "gpt-3.5-turbo-0613",
temperature: 0,
});
const chain = createExtractionChainFromZod(zodSchema, chatModel);
console.log(
await chain.run(`Alex is 5 feet tall. Claudia is 4 feet taller Alex and jumps higher than him. Claudia is a brunette and Alex is blonde.
Alex's dog Frosty is a labrador and likes to play hide and seek.`)
);
/*
[
{
'person-name': 'Alex',
'person-age': 0,
'person-hair_color': 'blonde',
'dog-name': 'Frosty',
'dog-breed': 'labrador'
},
{
'person-name': 'Claudia',
'person-age': 0,
'person-hair_color': 'brunette',
'dog-name': '',
'dog-breed': ''
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/llm_chain_chat.ts | import { LLMChain } from "langchain/chains";
import { ChatOpenAI } from "@langchain/openai";
import { ChatPromptTemplate } from "@langchain/core/prompts";
// We can also construct an LLMChain from a ChatPromptTemplate and a chat model.
const chat = new ChatOpenAI({ temperature: 0 });
const chatPrompt = ChatPromptTemplate.fromMessages([
[
"system",
"You are a helpful assistant that translates {input_language} to {output_language}.",
],
["human", "{text}"],
]);
const chainB = new LLMChain({
prompt: chatPrompt,
llm: chat,
});
const resB = await chainB.invoke({
input_language: "English",
output_language: "French",
text: "I love programming.",
});
console.log({ resB });
// { resB: { text: "J'adore la programmation." } }
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/openai_functions_structured_format.ts | import { z } from "zod";
import { zodToJsonSchema } from "zod-to-json-schema";
import { ChatOpenAI } from "@langchain/openai";
import {
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
} from "@langchain/core/prompts";
import { JsonOutputFunctionsParser } from "@langchain/core/output_parsers/openai_functions";
const zodSchema = z.object({
foods: z
.array(
z.object({
name: z.string().describe("The name of the food item"),
healthy: z.boolean().describe("Whether the food is good for you"),
color: z.string().optional().describe("The color of the food"),
})
)
.describe("An array of food items mentioned in the text"),
});
const prompt = new ChatPromptTemplate({
promptMessages: [
SystemMessagePromptTemplate.fromTemplate(
"List all food items mentioned in the following text."
),
HumanMessagePromptTemplate.fromTemplate("{inputText}"),
],
inputVariables: ["inputText"],
});
const llm = new ChatOpenAI({ model: "gpt-3.5-turbo-0613", temperature: 0 });
// Binding "function_call" below makes the model always call the specified function.
// If you want to allow the model to call functions selectively, omit it.
const functionCallingModel = llm.bind({
functions: [
{
name: "output_formatter",
description: "Should always be used to properly format output",
parameters: zodToJsonSchema(zodSchema),
},
],
function_call: { name: "output_formatter" },
});
const outputParser = new JsonOutputFunctionsParser();
const chain = prompt.pipe(functionCallingModel).pipe(outputParser);
const response = await chain.invoke({
inputText: "I like apples, bananas, oxygen, and french fries.",
});
console.log(JSON.stringify(response, null, 2));
/*
{
"output": {
"foods": [
{
"name": "apples",
"healthy": true,
"color": "red"
},
{
"name": "bananas",
"healthy": true,
"color": "yellow"
},
{
"name": "french fries",
"healthy": false,
"color": "golden"
}
]
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/openai_functions_openapi_simple.ts | import { createOpenAPIChain } from "langchain/chains";
const chain = await createOpenAPIChain(
"https://gist.githubusercontent.com/roaldnefs/053e505b2b7a807290908fe9aa3e1f00/raw/0a212622ebfef501163f91e23803552411ed00e4/openapi.yaml"
);
const result = await chain.run(`What's today's comic?`);
console.log(JSON.stringify(result, null, 2));
/*
{
"month": "6",
"num": 2795,
"link": "",
"year": "2023",
"news": "",
"safe_title": "Glass-Topped Table",
"transcript": "",
"alt": "You can pour a drink into it while hosting a party, although it's a real pain to fit in the dishwasher afterward.",
"img": "https://imgs.xkcd.com/comics/glass_topped_table.png",
"title": "Glass-Topped Table",
"day": "28"
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/conversational_qa_built_in_memory.ts | import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { LLMChain } from "langchain/chains";
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import { BufferMemory } from "langchain/memory";
import * as fs from "fs";
import { formatDocumentsAsString } from "langchain/util/document";
import { Document } from "@langchain/core/documents";
import { PromptTemplate } from "@langchain/core/prompts";
import { RunnableSequence } from "@langchain/core/runnables";
import { BaseMessage } from "@langchain/core/messages";
const text = fs.readFileSync("state_of_the_union.txt", "utf8");
const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
const docs = await textSplitter.createDocuments([text]);
const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings());
const retriever = vectorStore.asRetriever();
const memory = new BufferMemory({
memoryKey: "chatHistory",
inputKey: "question", // The key for the input to the chain
outputKey: "text", // The key for the final conversational output of the chain
returnMessages: true, // If using with a chat model (e.g. gpt-3.5 or gpt-4)
});
const serializeChatHistory = (chatHistory: Array<BaseMessage>): string =>
chatHistory
.map((chatMessage) => {
if (chatMessage._getType() === "human") {
return `Human: ${chatMessage.content}`;
} else if (chatMessage._getType() === "ai") {
return `Assistant: ${chatMessage.content}`;
} else {
return `${chatMessage.content}`;
}
})
.join("\n");
/**
* Create two prompt templates, one for answering questions, and one for
* generating questions.
*/
const questionPrompt = PromptTemplate.fromTemplate(
`Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
----------
CONTEXT: {context}
----------
CHAT HISTORY: {chatHistory}
----------
QUESTION: {question}
----------
Helpful Answer:`
);
const questionGeneratorTemplate = PromptTemplate.fromTemplate(
`Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
----------
CHAT HISTORY: {chatHistory}
----------
FOLLOWUP QUESTION: {question}
----------
Standalone question:`
);
// Initialize fast and slow LLMs, along with chains for each
const fasterModel = new ChatOpenAI({
model: "gpt-3.5-turbo",
});
const fasterChain = new LLMChain({
llm: fasterModel,
prompt: questionGeneratorTemplate,
});
const slowerModel = new ChatOpenAI({
model: "gpt-4",
});
const slowerChain = new LLMChain({
llm: slowerModel,
prompt: questionPrompt,
});
const performQuestionAnswering = async (input: {
question: string;
chatHistory: Array<BaseMessage> | null;
context: Array<Document>;
}): Promise<{ result: string; sourceDocuments: Array<Document> }> => {
let newQuestion = input.question;
// Serialize context and chat history into strings
const serializedDocs = formatDocumentsAsString(input.context);
const chatHistoryString = input.chatHistory
? serializeChatHistory(input.chatHistory)
: null;
if (chatHistoryString) {
// Call the faster chain to generate a new question
const { text } = await fasterChain.invoke({
chatHistory: chatHistoryString,
context: serializedDocs,
question: input.question,
});
newQuestion = text;
}
const response = await slowerChain.invoke({
chatHistory: chatHistoryString ?? "",
context: serializedDocs,
question: newQuestion,
});
// Save the chat history to memory
await memory.saveContext(
{
question: input.question,
},
{
text: response.text,
}
);
return {
result: response.text,
sourceDocuments: input.context,
};
};
const chain = RunnableSequence.from([
{
// Pipe the question through unchanged
question: (input: { question: string }) => input.question,
// Fetch the chat history, and return the history or null if not present
chatHistory: async () => {
const savedMemory = await memory.loadMemoryVariables({});
const hasHistory = savedMemory.chatHistory.length > 0;
return hasHistory ? savedMemory.chatHistory : null;
},
// Fetch relevant context based on the question
context: async (input: { question: string }) =>
retriever.invoke(input.question),
},
performQuestionAnswering,
]);
const resultOne = await chain.invoke({
question: "What did the president say about Justice Breyer?",
});
console.log({ resultOne });
/**
* {
* resultOne: {
* result: "The president thanked Justice Breyer for his service and described him as an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court.",
* sourceDocuments: [...]
* }
* }
*/
const resultTwo = await chain.invoke({
question: "Was he nice?",
});
console.log({ resultTwo });
/**
* {
* resultTwo: {
* result: "Yes, the president's description of Justice Breyer was positive."
* sourceDocuments: [...]
* }
* }
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/retrieval_qa_sources_legacy.ts | import { OpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { RetrievalQAChain } from "langchain/chains";
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import * as fs from "fs";
// Initialize the LLM to use to answer the question.
const model = new OpenAI({});
const text = fs.readFileSync("state_of_the_union.txt", "utf8");
const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
const docs = await textSplitter.createDocuments([text]);
// Create a vector store from the documents.
const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings());
// Create a chain that uses a map reduce chain and HNSWLib vector store.
const chain = RetrievalQAChain.fromLLM(model, vectorStore.asRetriever(), {
returnSourceDocuments: true, // Can also be passed into the constructor
});
const res = await chain.invoke({
query: "What did the president say about Justice Breyer?",
});
console.log(JSON.stringify(res, null, 2));
/*
{
"text": " The president thanked Justice Breyer for his service and asked him to stand so he could be seen.",
"sourceDocuments": [
{
"pageContent": "Justice Breyer, thank you for your service. Thank you, thank you, thank you. I mean it. Get up. Stand — let me see you. Thank you.\n\nAnd we all know — no matter what your ideology, we all know one of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.\n\nAs I did four days ago, I’ve nominated a Circuit Court of Appeals — Ketanji Brown Jackson. One of our nation’s top legal minds who will continue in just Brey- — Justice Breyer’s legacy of excellence. A former top litigator in private practice, a former federal public defender from a family of public-school educators and police officers — she’s a consensus builder.\n\nSince she’s been nominated, she’s received a broad range of support, including the Fraternal Order of Police and former judges appointed by Democrats and Republicans.",
"metadata": {
"loc": {
"lines": {
"from": 481,
"to": 487
}
}
}
},
{
"pageContent": "Since she’s been nominated, she’s received a broad range of support, including the Fraternal Order of Police and former judges appointed by Democrats and Republicans.\n\nJudge Ketanji Brown Jackson\nPresident Biden's Unity AgendaLearn More\nSince she’s been nominated, she’s received a broad range of support, including the Fraternal Order of Police and former judges appointed by Democrats and Republicans.\n\nFolks, if we are to advance liberty and justice, we need to secure our border and fix the immigration system.\n\nAnd as you might guess, I think we can do both. At our border, we’ve installed new technology, like cutting-edge scanners, to better detect drug smuggling.\n\nWe’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers.\n\nWe’re putting in place dedicated immigration judges in significant larger number so families fleeing persecution and violence can have their cases — cases heard faster — and those who aren’t legitimately here can be sent back.",
"metadata": {
"loc": {
"lines": {
"from": 487,
"to": 499
}
}
}
},
{
"pageContent": "These laws don’t infringe on the Second Amendment; they save lives.\n\nGun Violence\n\n\nThe most fundamental right in America is the right to vote and have it counted. And look, it’s under assault.\n\nIn state after state, new laws have been passed not only to suppress the vote — we’ve been there before — but to subvert the entire election. We can’t let this happen.\n\nTonight, I call on the Senate to pass — pass the Freedom to Vote Act. Pass the John Lewis Act — Voting Rights Act. And while you’re at it, pass the DISCLOSE Act so Americans know who is funding our elections.\n\nLook, tonight, I’d — I’d like to honor someone who has dedicated his life to serve this country: Justice Breyer — an Army veteran, Constitutional scholar, retiring Justice of the United States Supreme Court.\n\nJustice Breyer, thank you for your service. Thank you, thank you, thank you. I mean it. Get up. Stand — let me see you. Thank you.",
"metadata": {
"loc": {
"lines": {
"from": 468,
"to": 481
}
}
}
},
{
"pageContent": "If you want to go forward not backwards, we must protect access to healthcare; preserve a woman’s right to choose — and continue to advance maternal healthcare for all Americans.\n\nRoe v. Wade\n\n\nAnd folks, for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families — it’s simply wrong.\n\nAs I said last year, especially to our younger transgender Americans, I’ll always have your back as your President so you can be yourself and reach your God-given potential.\n\nBipartisan Equality Act\n\n\nFolks as I’ve just demonstrated, while it often appears we do not agree and that — we — we do agree on a lot more things than we acknowledge.",
"metadata": {
"loc": {
"lines": {
"from": 511,
"to": 523
}
}
}
}
]
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/retrieval_qa_legacy.ts | import { OpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { RetrievalQAChain } from "langchain/chains";
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import * as fs from "fs";
// Initialize the LLM to use to answer the question.
const model = new OpenAI({});
const text = fs.readFileSync("state_of_the_union.txt", "utf8");
const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
const docs = await textSplitter.createDocuments([text]);
// Create a vector store from the documents.
const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings());
// Initialize a retriever wrapper around the vector store
const vectorStoreRetriever = vectorStore.asRetriever();
// Create a chain that uses the OpenAI LLM and HNSWLib vector store.
const chain = RetrievalQAChain.fromLLM(model, vectorStoreRetriever);
const res = await chain.invoke({
query: "What did the president say about Justice Breyer?",
});
console.log({ res });
/*
{
res: {
text: 'The president said that Justice Breyer was an Army veteran, Constitutional scholar,
and retiring Justice of the United States Supreme Court and thanked him for his service.'
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/llm_chain.ts | import { OpenAI } from "@langchain/openai";
import { PromptTemplate } from "@langchain/core/prompts";
// We can construct an LLMChain from a PromptTemplate and an LLM.
const model = new OpenAI({ temperature: 0 });
const prompt = PromptTemplate.fromTemplate(
"What is a good name for a company that makes {product}?"
);
const chainA = prompt.pipe({ llm: model });
// The result is an object with a `text` property.
const resA = await chainA.invoke({ product: "colorful socks" });
console.log({ resA });
// { resA: { text: '\n\nSocktastic!' } }
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/llm_chain_stream.ts | import { OpenAI } from "@langchain/openai";
import { LLMChain } from "langchain/chains";
import { PromptTemplate } from "@langchain/core/prompts";
// Create a new LLMChain from a PromptTemplate and an LLM in streaming mode.
const model = new OpenAI({ temperature: 0.9, streaming: true });
const prompt = PromptTemplate.fromTemplate(
"What is a good name for a company that makes {product}?"
);
const chain = new LLMChain({ llm: model, prompt });
// Call the chain with the inputs and a callback for the streamed tokens
const res = await chain.invoke(
{ product: "colorful socks" },
{
callbacks: [
{
handleLLMNewToken(token: string) {
process.stdout.write(token);
},
},
],
}
);
console.log({ res });
// { res: { text: '\n\nKaleidoscope Socks' } }
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/summarization_separate_output_llm.ts | import { loadSummarizationChain } from "langchain/chains";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import * as fs from "fs";
import { ChatOpenAI } from "@langchain/openai";
import { ChatAnthropic } from "@langchain/anthropic";
// In this example, we use a separate LLM as the final summary LLM to meet our customized LLM requirements for different stages of the chain and to only stream the final results.
const text = fs.readFileSync("state_of_the_union.txt", "utf8");
const model = new ChatAnthropic({ temperature: 0 });
const combineModel = new ChatOpenAI({
model: "gpt-4",
temperature: 0,
streaming: true,
callbacks: [
{
handleLLMNewToken(token: string): Promise<void> | void {
console.log("token", token);
/*
token President
token Biden
...
...
token protections
token .
*/
},
},
],
});
const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 5000 });
const docs = await textSplitter.createDocuments([text]);
// This convenience function creates a document chain prompted to summarize a set of documents.
const chain = loadSummarizationChain(model, {
type: "map_reduce",
combineLLM: combineModel,
});
const res = await chain.invoke({
input_documents: docs,
});
console.log({ res });
/*
{
res: {
text: "President Biden delivered his first State of the Union address, focusing on the Russian invasion of Ukraine, domestic economic challenges, and his administration's efforts to revitalize American manufacturing and infrastructure. He announced new sanctions against Russia and the deployment of U.S. forces to NATO countries. Biden also outlined his plan to fight inflation, lower costs for American families, and reduce the deficit. He emphasized the need to pass the Bipartisan Innovation Act, confirmed his Federal Reserve nominees, and called for the end of COVID shutdowns. Biden also addressed issues such as gun violence, voting rights, immigration reform, women's rights, and privacy protections."
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/retrieval_qa_custom_legacy.ts | import { OpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { RetrievalQAChain, loadQAMapReduceChain } from "langchain/chains";
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import * as fs from "fs";
// Initialize the LLM to use to answer the question.
const model = new OpenAI({});
const text = fs.readFileSync("state_of_the_union.txt", "utf8");
const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
const docs = await textSplitter.createDocuments([text]);
// Create a vector store from the documents.
const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings());
// Create a chain that uses a map reduce chain and HNSWLib vector store.
const chain = new RetrievalQAChain({
combineDocumentsChain: loadQAMapReduceChain(model),
retriever: vectorStore.asRetriever(),
});
const res = await chain.invoke({
query: "What did the president say about Justice Breyer?",
});
console.log({ res });
/*
{
res: {
text: " The president said that Justice Breyer has dedicated his life to serve his country, and thanked him for his service. He also said that Judge Ketanji Brown Jackson will continue Justice Breyer's legacy of excellence, emphasizing the importance of protecting the rights of citizens, especially women, LGBTQ+ Americans, and access to healthcare. He also expressed his commitment to supporting the younger transgender Americans in America and ensuring they are able to reach their full potential, offering a Unity Agenda for the Nation to beat the opioid epidemic and increase funding for prevention, treatment, harm reduction, and recovery."
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/conversational_qa_built_in_memory_legacy.ts | import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { ConversationalRetrievalQAChain } from "langchain/chains";
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import { BufferMemory } from "langchain/memory";
import * as fs from "fs";
export const run = async () => {
const text = fs.readFileSync("state_of_the_union.txt", "utf8");
const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
const docs = await textSplitter.createDocuments([text]);
const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings());
const fasterModel = new ChatOpenAI({
model: "gpt-3.5-turbo",
});
const slowerModel = new ChatOpenAI({
model: "gpt-4",
});
const chain = ConversationalRetrievalQAChain.fromLLM(
slowerModel,
vectorStore.asRetriever(),
{
returnSourceDocuments: true,
memory: new BufferMemory({
memoryKey: "chat_history",
inputKey: "question", // The key for the input to the chain
outputKey: "text", // The key for the final conversational output of the chain
returnMessages: true, // If using with a chat model (e.g. gpt-3.5 or gpt-4)
}),
questionGeneratorChainOptions: {
llm: fasterModel,
},
}
);
/* Ask it a question */
const question = "What did the president say about Justice Breyer?";
const res = await chain.invoke({ question });
console.log(res);
const followUpRes = await chain.invoke({ question: "Was that nice?" });
console.log(followUpRes);
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/openai_functions_tagging.ts | import { createTaggingChain } from "langchain/chains";
import { ChatOpenAI } from "@langchain/openai";
import { FunctionParameters } from "@langchain/core/output_parsers/openai_functions";
const schema: FunctionParameters = {
type: "object",
properties: {
sentiment: { type: "string" },
tone: { type: "string" },
language: { type: "string" },
},
required: ["tone"],
};
const chatModel = new ChatOpenAI({ model: "gpt-4-0613", temperature: 0 });
const chain = createTaggingChain(schema, chatModel);
console.log(
await chain.run(
`Estoy increiblemente contento de haberte conocido! Creo que seremos muy buenos amigos!`
)
);
/*
{ tone: 'positive', language: 'Spanish' }
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/sql_db_saphana_legacy.ts | import { DataSource } from "typeorm";
import { OpenAI } from "@langchain/openai";
import { SqlDatabase } from "langchain/sql_db";
import { SqlDatabaseChain } from "langchain/chains/sql_db";
/**
* This example uses a SAP HANA Cloud database. You can create a free trial database via https://developers.sap.com/tutorials/hana-cloud-deploying.html
*
* You will need to add the following packages to your package.json as they are required when using typeorm with SAP HANA:
*
* "hdb-pool": "^0.1.6", (or latest version)
* "@sap/hana-client": "^2.17.22" (or latest version)
*
*/
const datasource = new DataSource({
type: "sap",
host: "<ADD_YOURS_HERE>.hanacloud.ondemand.com",
port: 443,
username: "<ADD_YOURS_HERE>",
password: "<ADD_YOURS_HERE>",
schema: "<ADD_YOURS_HERE>",
encrypt: true,
extra: {
sslValidateCertificate: false,
},
});
const db = await SqlDatabase.fromDataSourceParams({
appDataSource: datasource,
});
const chain = new SqlDatabaseChain({
llm: new OpenAI({ temperature: 0 }),
database: db,
});
const res = await chain.run("How many tracks are there?");
console.log(res);
// There are 3503 tracks.
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/analyze_document_chain_summarize.ts | import { OpenAI } from "@langchain/openai";
import { loadSummarizationChain, AnalyzeDocumentChain } from "langchain/chains";
import * as fs from "fs";
// In this example, we use the `AnalyzeDocumentChain` to summarize a large text document.
const text = fs.readFileSync("state_of_the_union.txt", "utf8");
const model = new OpenAI({ temperature: 0 });
const combineDocsChain = loadSummarizationChain(model);
const chain = new AnalyzeDocumentChain({
combineDocumentsChain: combineDocsChain,
});
const res = await chain.invoke({
input_document: text,
});
console.log({ res });
/*
{
res: {
text: ' President Biden is taking action to protect Americans from the COVID-19 pandemic and Russian aggression, providing economic relief, investing in infrastructure, creating jobs, and fighting inflation.
He is also proposing measures to reduce the cost of prescription drugs, protect voting rights, and reform the immigration system. The speaker is advocating for increased economic security, police reform, and the Equality Act, as well as providing support for veterans and military families.
The US is making progress in the fight against COVID-19, and the speaker is encouraging Americans to come together and work towards a brighter future.'
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/retrieval_qa_custom_prompt_legacy.ts | import { OpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { RetrievalQAChain, loadQAStuffChain } from "langchain/chains";
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import * as fs from "fs";
import { PromptTemplate } from "@langchain/core/prompts";
const promptTemplate = `Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}
Answer in Italian:`;
const prompt = PromptTemplate.fromTemplate(promptTemplate);
// Initialize the LLM to use to answer the question.
const model = new OpenAI({});
const text = fs.readFileSync("state_of_the_union.txt", "utf8");
const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
const docs = await textSplitter.createDocuments([text]);
// Create a vector store from the documents.
const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings());
// Create a chain that uses a stuff chain and HNSWLib vector store.
const chain = new RetrievalQAChain({
combineDocumentsChain: loadQAStuffChain(model, { prompt }),
retriever: vectorStore.asRetriever(),
});
const res = await chain.invoke({
query: "What did the president say about Justice Breyer?",
});
console.log({ res });
/*
{
res: {
text: ' Il presidente ha elogiato Justice Breyer per il suo servizio e lo ha ringraziato.'
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/sql_db_legacy.ts | import { DataSource } from "typeorm";
import { OpenAI } from "@langchain/openai";
import { SqlDatabase } from "langchain/sql_db";
import { SqlDatabaseChain } from "langchain/chains/sql_db";
/**
* This example uses Chinook database, which is a sample database available for SQL Server, Oracle, MySQL, etc.
* To set it up follow the instructions on https://database.guide/2-sample-databases-sqlite/, placing the .db file
* in the examples folder.
*/
const datasource = new DataSource({
type: "sqlite",
database: "Chinook.db",
});
const db = await SqlDatabase.fromDataSourceParams({
appDataSource: datasource,
});
const chain = new SqlDatabaseChain({
llm: new OpenAI({ temperature: 0 }),
database: db,
});
const res = await chain.run("How many tracks are there?");
console.log(res);
// There are 3503 tracks.
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/advanced_subclass_call.ts | import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import { BaseChain, ChainInputs } from "langchain/chains";
import { BasePromptTemplate, PromptTemplate } from "@langchain/core/prompts";
import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager";
import { ChainValues } from "@langchain/core/utils/types";
export interface MyCustomChainInputs extends ChainInputs {
llm: BaseLanguageModelInterface;
promptTemplate: string;
}
export class MyCustomChain extends BaseChain implements MyCustomChainInputs {
llm: BaseLanguageModelInterface;
promptTemplate: string;
prompt: BasePromptTemplate;
constructor(fields: MyCustomChainInputs) {
super(fields);
this.llm = fields.llm;
this.promptTemplate = fields.promptTemplate;
this.prompt = PromptTemplate.fromTemplate(this.promptTemplate);
}
async _call(
values: ChainValues,
runManager?: CallbackManagerForChainRun
): Promise<ChainValues> {
// Your custom chain logic goes here
// This is just an example that mimics LLMChain
const promptValue = await this.prompt.formatPromptValue(values);
// Whenever you call a language model, or another chain, you should pass
// a callback manager to it. This allows the inner run to be tracked by
// any callbacks that are registered on the outer run.
// You can always obtain a callback manager for this by calling
// `runManager?.getChild()` as shown below.
const result = await this.llm.generatePrompt(
[promptValue],
{},
// This tag "a-tag" will be attached to this inner LLM call
runManager?.getChild("a-tag")
);
// If you want to log something about this run, you can do so by calling
// methods on the runManager, as shown below. This will trigger any
// callbacks that are registered for that event.
runManager?.handleText("Log something about this run");
return { output: result.generations[0][0].text };
}
_chainType(): string {
return "my_custom_chain";
}
get inputKeys(): string[] {
return ["input"];
}
get outputKeys(): string[] {
return ["output"];
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.