index
int64 0
0
| repo_id
stringclasses 351
values | file_path
stringlengths 26
186
| content
stringlengths 1
990k
|
|---|---|---|---|
0
|
hf_public_repos/chat-ui/src/lib
|
hf_public_repos/chat-ui/src/lib/server/embeddingModels.ts
|
import { env } from "$env/dynamic/private";
import { z } from "zod";
import { sum } from "$lib/utils/sum";
import {
embeddingEndpoints,
embeddingEndpointSchema,
type EmbeddingEndpoint,
} from "$lib/server/embeddingEndpoints/embeddingEndpoints";
import { embeddingEndpointTransformersJS } from "$lib/server/embeddingEndpoints/transformersjs/embeddingEndpoints";
import JSON5 from "json5";
const modelConfig = z.object({
/** Used as an identifier in DB */
id: z.string().optional(),
/** Used to link to the model page, and for inference */
name: z.string().min(1),
displayName: z.string().min(1).optional(),
description: z.string().min(1).optional(),
websiteUrl: z.string().url().optional(),
modelUrl: z.string().url().optional(),
endpoints: z.array(embeddingEndpointSchema).nonempty(),
chunkCharLength: z.number().positive(),
maxBatchSize: z.number().positive().optional(),
preQuery: z.string().default(""),
prePassage: z.string().default(""),
});
// Default embedding model for backward compatibility
const rawEmbeddingModelJSON =
env.TEXT_EMBEDDING_MODELS ||
`[
{
"name": "Xenova/gte-small",
"chunkCharLength": 512,
"endpoints": [
{ "type": "transformersjs" }
]
}
]`;
const embeddingModelsRaw = z.array(modelConfig).parse(JSON5.parse(rawEmbeddingModelJSON));
const processEmbeddingModel = async (m: z.infer<typeof modelConfig>) => ({
...m,
id: m.id || m.name,
});
const addEndpoint = (m: Awaited<ReturnType<typeof processEmbeddingModel>>) => ({
...m,
getEndpoint: async (): Promise<EmbeddingEndpoint> => {
if (!m.endpoints) {
return embeddingEndpointTransformersJS({
type: "transformersjs",
weight: 1,
model: m,
});
}
const totalWeight = sum(m.endpoints.map((e) => e.weight));
let random = Math.random() * totalWeight;
for (const endpoint of m.endpoints) {
if (random < endpoint.weight) {
const args = { ...endpoint, model: m };
switch (args.type) {
case "tei":
return embeddingEndpoints.tei(args);
case "transformersjs":
return embeddingEndpoints.transformersjs(args);
case "openai":
return embeddingEndpoints.openai(args);
case "hfapi":
return embeddingEndpoints.hfapi(args);
default:
throw new Error(`Unknown endpoint type: ${args}`);
}
}
random -= endpoint.weight;
}
throw new Error(`Failed to select embedding endpoint`);
},
});
export const embeddingModels = await Promise.all(
embeddingModelsRaw.map((e) => processEmbeddingModel(e).then(addEndpoint))
);
export const defaultEmbeddingModel = embeddingModels[0];
const validateEmbeddingModel = (_models: EmbeddingBackendModel[], key: "id" | "name") => {
return z.enum([_models[0][key], ..._models.slice(1).map((m) => m[key])]);
};
export const validateEmbeddingModelById = (_models: EmbeddingBackendModel[]) => {
return validateEmbeddingModel(_models, "id");
};
export const validateEmbeddingModelByName = (_models: EmbeddingBackendModel[]) => {
return validateEmbeddingModel(_models, "name");
};
export type EmbeddingBackendModel = typeof defaultEmbeddingModel;
|
0
|
hf_public_repos/chat-ui/src/lib
|
hf_public_repos/chat-ui/src/lib/server/abortedGenerations.ts
|
// Shouldn't be needed if we dove into sveltekit internals, see https://github.com/huggingface/chat-ui/pull/88#issuecomment-1523173850
import { logger } from "$lib/server/logger";
import { collections } from "$lib/server/database";
import { onExit } from "./exitHandler";
export class AbortedGenerations {
private static instance: AbortedGenerations;
private abortedGenerations: Map<string, Date> = new Map();
private constructor() {
const interval = setInterval(this.updateList, 1000);
onExit(() => clearInterval(interval));
}
public static getInstance(): AbortedGenerations {
if (!AbortedGenerations.instance) {
AbortedGenerations.instance = new AbortedGenerations();
}
return AbortedGenerations.instance;
}
public getList(): Map<string, Date> {
return this.abortedGenerations;
}
private async updateList() {
try {
const aborts = await collections.abortedGenerations.find({}).sort({ createdAt: 1 }).toArray();
this.abortedGenerations = new Map(
aborts.map(({ conversationId, createdAt }) => [conversationId.toString(), createdAt])
);
} catch (err) {
logger.error(err);
}
}
}
|
0
|
hf_public_repos/chat-ui/src/lib
|
hf_public_repos/chat-ui/src/lib/server/exitHandler.ts
|
import { randomUUID } from "$lib/utils/randomUuid";
import { timeout } from "$lib/utils/timeout";
import { logger } from "./logger";
type ExitHandler = () => void | Promise<void>;
type ExitHandlerUnsubscribe = () => void;
const listeners = new Map<string, ExitHandler>();
export function onExit(cb: ExitHandler): ExitHandlerUnsubscribe {
const uuid = randomUUID();
listeners.set(uuid, cb);
return () => {
listeners.delete(uuid);
};
}
async function runExitHandler(handler: ExitHandler): Promise<void> {
return timeout(Promise.resolve().then(handler), 30_000).catch((err) => {
logger.error(err, "Exit handler failed to run");
});
}
export function initExitHandler() {
let signalCount = 0;
const exitHandler = async () => {
signalCount++;
if (signalCount === 1) {
logger.info("Received signal... Exiting");
await Promise.all(Array.from(listeners.values()).map(runExitHandler));
logger.info("All exit handlers ran... Waiting for svelte server to exit");
}
if (signalCount === 3) {
logger.warn("Received 3 signals... Exiting immediately");
process.exit(1);
}
};
process.on("SIGINT", exitHandler);
process.on("SIGTERM", exitHandler);
}
|
0
|
hf_public_repos/chat-ui/src/lib
|
hf_public_repos/chat-ui/src/lib/server/isURLLocal.spec.ts
|
import { isURLLocal } from "./isURLLocal";
import { describe, expect, it } from "vitest";
describe("isURLLocal", async () => {
it("should return true for localhost", async () => {
expect(await isURLLocal(new URL("http://localhost"))).toBe(true);
});
it("should return true for 127.0.0.1", async () => {
expect(await isURLLocal(new URL("http://127.0.0.1"))).toBe(true);
});
it("should return true for 127.254.254.254", async () => {
expect(await isURLLocal(new URL("http://127.254.254.254"))).toBe(true);
});
it("should return false for huggingface.co", async () => {
expect(await isURLLocal(new URL("https://huggingface.co/"))).toBe(false);
});
it("should return true for 127.0.0.1.nip.io", async () => {
expect(await isURLLocal(new URL("http://127.0.0.1.nip.io"))).toBe(true);
});
it("should fail on ipv6", async () => {
await expect(isURLLocal(new URL("http://[::1]"))).rejects.toThrow();
});
it("should fail on ipv6 --1.sslip.io", async () => {
await expect(isURLLocal(new URL("http://--1.sslip.io"))).rejects.toThrow();
});
it("should fail on invalid domain names", async () => {
await expect(
isURLLocal(new URL("http://34329487239847329874923948732984.com/"))
).rejects.toThrow();
});
});
|
0
|
hf_public_repos/chat-ui/src/lib/server
|
hf_public_repos/chat-ui/src/lib/server/embeddingEndpoints/embeddingEndpoints.ts
|
import { z } from "zod";
import {
embeddingEndpointTei,
embeddingEndpointTeiParametersSchema,
} from "./tei/embeddingEndpoints";
import {
embeddingEndpointTransformersJS,
embeddingEndpointTransformersJSParametersSchema,
} from "./transformersjs/embeddingEndpoints";
import {
embeddingEndpointOpenAI,
embeddingEndpointOpenAIParametersSchema,
} from "./openai/embeddingEndpoints";
import { embeddingEndpointHfApi, embeddingEndpointHfApiSchema } from "./hfApi/embeddingHfApi";
// parameters passed when generating text
interface EmbeddingEndpointParameters {
inputs: string[];
}
export type Embedding = number[];
// type signature for the endpoint
export type EmbeddingEndpoint = (params: EmbeddingEndpointParameters) => Promise<Embedding[]>;
export const embeddingEndpointSchema = z.discriminatedUnion("type", [
embeddingEndpointTeiParametersSchema,
embeddingEndpointTransformersJSParametersSchema,
embeddingEndpointOpenAIParametersSchema,
embeddingEndpointHfApiSchema,
]);
type EmbeddingEndpointTypeOptions = z.infer<typeof embeddingEndpointSchema>["type"];
// generator function that takes in type discrimantor value for defining the endpoint and return the endpoint
export type EmbeddingEndpointGenerator<T extends EmbeddingEndpointTypeOptions> = (
inputs: Extract<z.infer<typeof embeddingEndpointSchema>, { type: T }>
) => EmbeddingEndpoint | Promise<EmbeddingEndpoint>;
// list of all endpoint generators
export const embeddingEndpoints: {
[Key in EmbeddingEndpointTypeOptions]: EmbeddingEndpointGenerator<Key>;
} = {
tei: embeddingEndpointTei,
transformersjs: embeddingEndpointTransformersJS,
openai: embeddingEndpointOpenAI,
hfapi: embeddingEndpointHfApi,
};
export default embeddingEndpoints;
|
0
|
hf_public_repos/chat-ui/src/lib/server/embeddingEndpoints
|
hf_public_repos/chat-ui/src/lib/server/embeddingEndpoints/openai/embeddingEndpoints.ts
|
import { z } from "zod";
import type { EmbeddingEndpoint, Embedding } from "../embeddingEndpoints";
import { chunk } from "$lib/utils/chunk";
import { env } from "$env/dynamic/private";
export const embeddingEndpointOpenAIParametersSchema = z.object({
weight: z.number().int().positive().default(1),
model: z.any(),
type: z.literal("openai"),
url: z.string().url().default("https://api.openai.com/v1/embeddings"),
apiKey: z.string().default(env.OPENAI_API_KEY),
defaultHeaders: z.record(z.string()).default({}),
});
export async function embeddingEndpointOpenAI(
input: z.input<typeof embeddingEndpointOpenAIParametersSchema>
): Promise<EmbeddingEndpoint> {
const { url, model, apiKey, defaultHeaders } =
embeddingEndpointOpenAIParametersSchema.parse(input);
const maxBatchSize = model.maxBatchSize || 100;
return async ({ inputs }) => {
const requestURL = new URL(url);
const batchesInputs = chunk(inputs, maxBatchSize);
const batchesResults = await Promise.all(
batchesInputs.map(async (batchInputs) => {
const response = await fetch(requestURL, {
method: "POST",
headers: {
Accept: "application/json",
"Content-Type": "application/json",
...(apiKey ? { Authorization: `Bearer ${apiKey}` } : {}),
...defaultHeaders,
},
body: JSON.stringify({ input: batchInputs, model: model.name }),
});
const embeddings: Embedding[] = [];
const responseObject = await response.json();
for (const embeddingObject of responseObject.data) {
embeddings.push(embeddingObject.embedding);
}
return embeddings;
})
);
const flatAllEmbeddings = batchesResults.flat();
return flatAllEmbeddings;
};
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/embeddingEndpoints
|
hf_public_repos/chat-ui/src/lib/server/embeddingEndpoints/transformersjs/embeddingEndpoints.ts
|
import { z } from "zod";
import type { EmbeddingEndpoint } from "../embeddingEndpoints";
import type { Tensor, FeatureExtractionPipeline } from "@huggingface/transformers";
import { pipeline } from "@huggingface/transformers";
export const embeddingEndpointTransformersJSParametersSchema = z.object({
weight: z.number().int().positive().default(1),
model: z.any(),
type: z.literal("transformersjs"),
});
// Use the Singleton pattern to enable lazy construction of the pipeline.
class TransformersJSModelsSingleton {
static instances: Array<[string, Promise<FeatureExtractionPipeline>]> = [];
static async getInstance(modelName: string): Promise<FeatureExtractionPipeline> {
const modelPipelineInstance = this.instances.find(([name]) => name === modelName);
if (modelPipelineInstance) {
const [, modelPipeline] = modelPipelineInstance;
// dispose of the previous pipeline to clear memory
await (await modelPipeline).dispose();
this.instances = this.instances.filter(([name]) => name !== modelName);
}
const newModelPipeline = pipeline("feature-extraction", modelName);
this.instances.push([modelName, newModelPipeline]);
return newModelPipeline;
}
}
export async function calculateEmbedding(modelName: string, inputs: string[]) {
const extractor = await TransformersJSModelsSingleton.getInstance(modelName);
const output: Tensor = await extractor(inputs, { pooling: "mean", normalize: true });
return output.tolist();
}
export function embeddingEndpointTransformersJS(
input: z.input<typeof embeddingEndpointTransformersJSParametersSchema>
): EmbeddingEndpoint {
const { model } = embeddingEndpointTransformersJSParametersSchema.parse(input);
return async ({ inputs }) => {
return calculateEmbedding(model.name, inputs);
};
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/embeddingEndpoints
|
hf_public_repos/chat-ui/src/lib/server/embeddingEndpoints/tei/embeddingEndpoints.ts
|
import { z } from "zod";
import type { EmbeddingEndpoint, Embedding } from "../embeddingEndpoints";
import { chunk } from "$lib/utils/chunk";
import { env } from "$env/dynamic/private";
import { logger } from "$lib/server/logger";
export const embeddingEndpointTeiParametersSchema = z.object({
weight: z.number().int().positive().default(1),
model: z.any(),
type: z.literal("tei"),
url: z.string().url(),
authorization: z
.string()
.optional()
.transform((v) => (!v && env.HF_TOKEN ? "Bearer " + env.HF_TOKEN : v)), // if the header is not set but HF_TOKEN is, use it as the authorization header
});
const getModelInfoByUrl = async (url: string, authorization?: string) => {
const { origin } = new URL(url);
const response = await fetch(`${origin}/info`, {
headers: {
Accept: "application/json",
"Content-Type": "application/json",
...(authorization ? { Authorization: authorization } : {}),
},
});
try {
const json = await response.json();
return { max_client_batch_size: 32, max_batch_tokens: 16384, ...json };
} catch {
logger.debug("Could not get info from TEI embedding endpoint. Using defaults.");
return { max_client_batch_size: 32, max_batch_tokens: 16384 };
}
};
export async function embeddingEndpointTei(
input: z.input<typeof embeddingEndpointTeiParametersSchema>
): Promise<EmbeddingEndpoint> {
const { url, model, authorization } = embeddingEndpointTeiParametersSchema.parse(input);
const { max_client_batch_size, max_batch_tokens } = await getModelInfoByUrl(url);
const maxBatchSize = Math.min(
max_client_batch_size,
Math.floor(max_batch_tokens / model.chunkCharLength)
);
return async ({ inputs }) => {
const { origin } = new URL(url);
const batchesInputs = chunk(inputs, maxBatchSize);
const batchesResults = await Promise.all(
batchesInputs.map(async (batchInputs) => {
const response = await fetch(`${origin}/embed`, {
method: "POST",
headers: {
Accept: "application/json",
"Content-Type": "application/json",
...(authorization ? { Authorization: authorization } : {}),
},
body: JSON.stringify({ inputs: batchInputs, normalize: true, truncate: true }),
});
const embeddings: Embedding[] = await response.json();
return embeddings;
})
);
const flatAllEmbeddings = batchesResults.flat();
return flatAllEmbeddings;
};
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/embeddingEndpoints
|
hf_public_repos/chat-ui/src/lib/server/embeddingEndpoints/hfApi/embeddingHfApi.ts
|
import { z } from "zod";
import type { EmbeddingEndpoint, Embedding } from "../embeddingEndpoints";
import { chunk } from "$lib/utils/chunk";
import { env } from "$env/dynamic/private";
import { logger } from "$lib/server/logger";
export const embeddingEndpointHfApiSchema = z.object({
weight: z.number().int().positive().default(1),
model: z.any(),
type: z.literal("hfapi"),
authorization: z
.string()
.optional()
.transform((v) => (!v && env.HF_TOKEN ? "Bearer " + env.HF_TOKEN : v)), // if the header is not set but HF_TOKEN is, use it as the authorization header
});
export async function embeddingEndpointHfApi(
input: z.input<typeof embeddingEndpointHfApiSchema>
): Promise<EmbeddingEndpoint> {
const { model, authorization } = embeddingEndpointHfApiSchema.parse(input);
const url = "https://api-inference.huggingface.co/models/" + model.id;
return async ({ inputs }) => {
const batchesInputs = chunk(inputs, 128);
const batchesResults = await Promise.all(
batchesInputs.map(async (batchInputs) => {
const response = await fetch(`${url}`, {
method: "POST",
headers: {
Accept: "application/json",
"Content-Type": "application/json",
...(authorization ? { Authorization: authorization } : {}),
},
body: JSON.stringify({
inputs: {
source_sentence: batchInputs[0],
sentences: batchInputs.slice(1),
},
}),
});
if (!response.ok) {
logger.error(await response.text());
logger.error(response, "Failed to get embeddings from Hugging Face API");
return [];
}
const embeddings: Embedding[] = await response.json();
return embeddings;
})
);
const flatAllEmbeddings = batchesResults.flat();
return flatAllEmbeddings;
};
}
|
0
|
hf_public_repos/chat-ui/src/lib/server
|
hf_public_repos/chat-ui/src/lib/server/websearch/update.ts
|
import type { WebSearchSource } from "$lib/types/WebSearch";
import {
MessageUpdateType,
MessageWebSearchUpdateType,
type MessageWebSearchErrorUpdate,
type MessageWebSearchFinishedUpdate,
type MessageWebSearchGeneralUpdate,
type MessageWebSearchSourcesUpdate,
} from "$lib/types/MessageUpdate";
export function makeGeneralUpdate(
update: Pick<MessageWebSearchGeneralUpdate, "message" | "args">
): MessageWebSearchGeneralUpdate {
return {
type: MessageUpdateType.WebSearch,
subtype: MessageWebSearchUpdateType.Update,
...update,
};
}
export function makeErrorUpdate(
update: Pick<MessageWebSearchErrorUpdate, "message" | "args">
): MessageWebSearchErrorUpdate {
return {
type: MessageUpdateType.WebSearch,
subtype: MessageWebSearchUpdateType.Error,
...update,
};
}
export function makeSourcesUpdate(sources: WebSearchSource[]): MessageWebSearchSourcesUpdate {
return {
type: MessageUpdateType.WebSearch,
subtype: MessageWebSearchUpdateType.Sources,
message: "sources",
sources: sources.map(({ link, title }) => ({ link, title })),
};
}
export function makeFinalAnswerUpdate(): MessageWebSearchFinishedUpdate {
return {
type: MessageUpdateType.WebSearch,
subtype: MessageWebSearchUpdateType.Finished,
};
}
|
0
|
hf_public_repos/chat-ui/src/lib/server
|
hf_public_repos/chat-ui/src/lib/server/websearch/runWebSearch.ts
|
import { defaultEmbeddingModel, embeddingModels } from "$lib/server/embeddingModels";
import type { Conversation } from "$lib/types/Conversation";
import type { Message } from "$lib/types/Message";
import type { WebSearch, WebSearchScrapedSource } from "$lib/types/WebSearch";
import type { Assistant } from "$lib/types/Assistant";
import type { MessageWebSearchUpdate } from "$lib/types/MessageUpdate";
import { search } from "./search/search";
import { scrape } from "./scrape/scrape";
import { findContextSources } from "./embed/embed";
import { removeParents } from "./markdown/tree";
import {
makeErrorUpdate,
makeFinalAnswerUpdate,
makeGeneralUpdate,
makeSourcesUpdate,
} from "./update";
import { mergeAsyncGenerators } from "$lib/utils/mergeAsyncGenerators";
import { MetricsServer } from "../metrics";
import { logger } from "$lib/server/logger";
const MAX_N_PAGES_TO_SCRAPE = 8 as const;
const MAX_N_PAGES_TO_EMBED = 5 as const;
export async function* runWebSearch(
conv: Conversation,
messages: Message[],
ragSettings?: Assistant["rag"],
query?: string
): AsyncGenerator<MessageWebSearchUpdate, WebSearch, undefined> {
const prompt = messages[messages.length - 1].content;
const createdAt = new Date();
const updatedAt = new Date();
MetricsServer.getMetrics().webSearch.requestCount.inc();
try {
const embeddingModel =
embeddingModels.find((m) => m.id === conv.embeddingModel) ?? defaultEmbeddingModel;
if (!embeddingModel) {
throw Error(`Embedding model ${conv.embeddingModel} not available anymore`);
}
// Search the web
const { searchQuery, pages } = yield* search(messages, ragSettings, query);
if (pages.length === 0) throw Error("No results found for this search query");
// Scrape pages
yield makeGeneralUpdate({ message: "Browsing search results" });
const allScrapedPages = yield* mergeAsyncGenerators(
pages.slice(0, MAX_N_PAGES_TO_SCRAPE).map(scrape(embeddingModel.chunkCharLength))
);
const scrapedPages = allScrapedPages
.filter((p): p is WebSearchScrapedSource => Boolean(p))
.filter((p) => p.page.markdownTree.children.length > 0)
.slice(0, MAX_N_PAGES_TO_EMBED);
if (!scrapedPages.length) {
throw Error(`No text found in the first ${MAX_N_PAGES_TO_SCRAPE} results`);
}
// Chunk the text of each of the elements and find the most similar chunks to the prompt
yield makeGeneralUpdate({ message: "Extracting relevant information" });
const contextSources = await findContextSources(scrapedPages, prompt, embeddingModel).then(
(ctxSources) =>
ctxSources.map((source) => ({
...source,
page: { ...source.page, markdownTree: removeParents(source.page.markdownTree) },
}))
);
yield makeSourcesUpdate(contextSources);
const webSearch: WebSearch = {
prompt,
searchQuery,
results: scrapedPages.map(({ page, ...source }) => ({
...source,
page: { ...page, markdownTree: removeParents(page.markdownTree) },
})),
contextSources,
createdAt,
updatedAt,
};
yield makeFinalAnswerUpdate();
return webSearch;
} catch (searchError) {
const message = searchError instanceof Error ? searchError.message : String(searchError);
logger.error(message);
yield makeErrorUpdate({ message: "An error occurred", args: [message] });
const webSearch: WebSearch = {
prompt,
searchQuery: "",
results: [],
contextSources: [],
createdAt,
updatedAt,
};
yield makeFinalAnswerUpdate();
return webSearch;
}
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/websearch
|
hf_public_repos/chat-ui/src/lib/server/websearch/markdown/tree.ts
|
import type { SerializedHTMLElement } from "../scrape/types";
import { htmlElementToMarkdownElements, mergeAdjacentElements } from "./fromHtml";
import type { HeaderElement, MarkdownElement } from "./types";
import { MarkdownElementType } from "./types";
import { chunkElements } from "./utils/chunk";
/**
* Converts HTML elements to Markdown elements and creates a tree based on header tags
* For example: h1 [h2 [p p blockquote] h2 [h3 [...] ] ]
**/
export function htmlToMarkdownTree(
title: string,
htmlElements: SerializedHTMLElement[],
maxCharsPerElem: number
): HeaderElement {
let parent: HeaderElement = {
type: MarkdownElementType.Header,
level: 1,
parent: null,
content: title,
children: [],
};
const markdownElements = chunkElements(
mergeAdjacentElements(
htmlElements.flatMap((elem) => htmlElementToMarkdownElements(parent, elem))
),
maxCharsPerElem
);
for (const elem of markdownElements) {
if (elem.type !== MarkdownElementType.Header) {
elem.parent = parent;
parent.children.push(elem);
continue;
}
// add 1 to current level to offset for the title being level 1
elem.level += 1;
// Pop up header levels until reaching the same level as the current header
// or until we reach the root
inner: while (parent !== null && parent.parent !== null) {
if (parent.level < elem.level) break inner;
parent = parent.parent;
}
parent.children.push(elem);
parent = elem;
}
// Pop up to the root
while (parent.parent !== null) {
parent = parent.parent;
}
return parent;
}
export function removeParents<T extends MarkdownElement>(elem: T): T {
if ("children" in elem) {
return { ...elem, parent: null, children: elem.children.map((child) => removeParents(child)) };
}
return { ...elem, parent: null };
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/websearch
|
hf_public_repos/chat-ui/src/lib/server/websearch/markdown/fromHtml.ts
|
import { collapseString, sanitizeString } from "./utils/nlp";
import { stringifyHTMLElements, stringifyHTMLElementsUnformatted } from "./utils/stringify";
import { MarkdownElementType, tagNameMap, type HeaderElement, type MarkdownElement } from "./types";
import type { SerializedHTMLElement } from "../scrape/types";
interface ConversionState {
defaultType:
| MarkdownElementType.Paragraph
| MarkdownElementType.BlockQuote
| MarkdownElementType.UnorderedListItem
| MarkdownElementType.OrderedListItem;
listDepth: number;
blockQuoteDepth: number;
}
export function htmlElementToMarkdownElements(
parent: HeaderElement,
elem: SerializedHTMLElement | string,
prevState: ConversionState = {
defaultType: MarkdownElementType.Paragraph,
listDepth: 0,
blockQuoteDepth: 0,
}
): MarkdownElement | MarkdownElement[] {
// Found text so create an element based on the previous state
if (typeof elem === "string") {
if (elem.trim().length === 0) return [];
if (
prevState.defaultType === MarkdownElementType.UnorderedListItem ||
prevState.defaultType === MarkdownElementType.OrderedListItem
) {
return {
parent,
type: prevState.defaultType,
content: elem,
depth: prevState.listDepth,
};
}
if (prevState.defaultType === MarkdownElementType.BlockQuote) {
return {
parent,
type: prevState.defaultType,
content: elem,
depth: prevState.blockQuoteDepth,
};
}
return { parent, type: prevState.defaultType, content: elem };
}
const type = tagNameMap[elem.tagName] ?? MarkdownElementType.Paragraph;
// Update the state based on the current element
const state: ConversionState = { ...prevState };
if (type === MarkdownElementType.UnorderedList || type === MarkdownElementType.OrderedList) {
state.listDepth += 1;
state.defaultType =
type === MarkdownElementType.UnorderedList
? MarkdownElementType.UnorderedListItem
: MarkdownElementType.OrderedListItem;
}
if (type === MarkdownElementType.BlockQuote) {
state.defaultType = MarkdownElementType.BlockQuote;
state.blockQuoteDepth += 1;
}
// Headers
if (type === MarkdownElementType.Header) {
return {
parent,
type,
level: Number(elem.tagName[1]),
content: collapseString(stringifyHTMLElements(elem.content)),
children: [],
};
}
// Code blocks
if (type === MarkdownElementType.CodeBlock) {
return {
parent,
type,
content: sanitizeString(stringifyHTMLElementsUnformatted(elem.content)),
};
}
// Typical case, we want to flatten the DOM and only create elements when we see text
return elem.content.flatMap((el) => htmlElementToMarkdownElements(parent, el, state));
}
export function mergeAdjacentElements(elements: MarkdownElement[]): MarkdownElement[] {
return elements.reduce<MarkdownElement[]>((acc, elem) => {
const last = acc[acc.length - 1];
if (last && last.type === MarkdownElementType.Paragraph && last.type === elem.type) {
last.content += elem.content;
return acc;
}
return [...acc, elem];
}, []);
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/websearch
|
hf_public_repos/chat-ui/src/lib/server/websearch/markdown/types.ts
|
/* eslint-disable-next-line no-shadow */
export enum MarkdownElementType {
Header = "HEADER",
Paragraph = "PARAGRAPH",
BlockQuote = "BLOCKQUOTE",
CodeBlock = "CODE_BLOCK",
UnorderedList = "UNORDERED_LIST",
OrderedList = "ORDERED_LIST",
UnorderedListItem = "UNORDERED_LIST_ITEM",
OrderedListItem = "ORDERED_LIST_ITEM",
}
interface BaseMarkdownElement<T = MarkdownElementType> {
type: T;
content: string;
parent: HeaderElement | null;
}
export interface HeaderElement extends BaseMarkdownElement<MarkdownElementType.Header> {
level: number;
children: MarkdownElement[];
}
type ListItem = MarkdownElementType.UnorderedListItem | MarkdownElementType.OrderedListItem;
interface ListItemElement extends BaseMarkdownElement<ListItem> {
depth: number;
}
interface BlockQuoteElement extends BaseMarkdownElement<MarkdownElementType.BlockQuote> {
depth: number;
}
interface ParagraphElement extends BaseMarkdownElement<MarkdownElementType.Paragraph> {}
interface CodeBlockElement extends BaseMarkdownElement<MarkdownElementType.CodeBlock> {}
export type MarkdownElement =
| HeaderElement
| ParagraphElement
| BlockQuoteElement
| CodeBlockElement
| ListItemElement;
export const tagNameMap: Record<string, MarkdownElementType> = {
h1: MarkdownElementType.Header,
h2: MarkdownElementType.Header,
h3: MarkdownElementType.Header,
h4: MarkdownElementType.Header,
h5: MarkdownElementType.Header,
h6: MarkdownElementType.Header,
div: MarkdownElementType.Paragraph,
p: MarkdownElementType.Paragraph,
blockquote: MarkdownElementType.BlockQuote,
pre: MarkdownElementType.CodeBlock,
ul: MarkdownElementType.UnorderedList,
ol: MarkdownElementType.OrderedList,
li: MarkdownElementType.UnorderedListItem,
};
|
0
|
hf_public_repos/chat-ui/src/lib/server/websearch/markdown
|
hf_public_repos/chat-ui/src/lib/server/websearch/markdown/utils/nlp.ts
|
/** Remove excess whitespace and newlines */
export const sanitizeString = (str: string) =>
str
.split("\n")
.map((s) => s.trim())
.filter(Boolean)
.join("\n")
.replaceAll(/ +/g, " ");
/** Collapses a string into a single line */
export const collapseString = (str: string) => sanitizeString(str.replaceAll(/\n/g, " "));
|
0
|
hf_public_repos/chat-ui/src/lib/server/websearch/markdown
|
hf_public_repos/chat-ui/src/lib/server/websearch/markdown/utils/stringify.ts
|
import type { SerializedHTMLElement } from "../../scrape/types";
import { MarkdownElementType, type MarkdownElement } from "../types";
// --- Markdown Elements ---
/** Converts markdown element to a string with formatting */
export function stringifyMarkdownElement(elem: MarkdownElement): string {
const content = elem.content.trim();
if (elem.type === MarkdownElementType.Header) return `${"#".repeat(elem.level)} ${content}\n\n`;
if (elem.type === MarkdownElementType.BlockQuote) {
return `${"> ".repeat(elem.depth)}${content}\n\n`;
}
if (elem.type === MarkdownElementType.CodeBlock) return `\`\`\`\n${content}\n\`\`\`\n\n`;
if (elem.type === MarkdownElementType.UnorderedListItem) return `- ${content}\n`;
if (elem.type === MarkdownElementType.OrderedListItem) {
const siblings = elem.parent?.children ?? [elem];
const currentIndex = siblings.indexOf(elem);
const lastAdjacentIndex = siblings
.slice(currentIndex + 1)
.findLastIndex((child) => child.type === MarkdownElementType.OrderedListItem);
const order = currentIndex - lastAdjacentIndex + 1;
return `${order}. ${content}\n`;
}
return `${content}\n\n`;
}
/** Converts a tree of markdown elements to a string with formatting */
export function stringifyMarkdownElementTree(elem: MarkdownElement): string {
const stringified = stringifyMarkdownElement(elem);
if (!("children" in elem)) return stringified;
return stringified + elem.children.map(stringifyMarkdownElementTree).join("");
}
// ----- HTML Elements -----
/** Ignores all non-inline tag types and grabs their text. Converts inline tags to markdown */
export function stringifyHTMLElements(elems: (SerializedHTMLElement | string)[]): string {
return elems.map(stringifyHTMLElement).join("").trim();
}
/** Ignores all non-inline tag types and grabs their text. Converts inline tags to markdown */
export function stringifyHTMLElement(elem: SerializedHTMLElement | string): string {
if (typeof elem === "string") return elem;
if (elem.tagName === "br") return "\n";
const content = elem.content.map(stringifyHTMLElement).join("");
if (content.length === 0) return content;
if (elem.tagName === "strong" || elem.tagName === "b") return `**${content}**`;
if (elem.tagName === "em" || elem.tagName === "i") return `*${content}*`;
if (elem.tagName === "s" || elem.tagName === "strike") return `~~${content}~~`;
if (elem.tagName === "code" || elem.tagName === "var" || elem.tagName === "tt") {
return `\`${content}\``;
}
if (elem.tagName === "sup") return `<sup>${content}</sup>`;
if (elem.tagName === "sub") return `<sub>${content}</sub>`;
if (elem.tagName === "a" && content.trim().length > 0) {
const href = elem.attributes.href;
if (!href) return elem.content.map(stringifyHTMLElement).join("");
return `[${elem.content.map(stringifyHTMLElement).join("")}](${href})`;
}
return elem.content.map(stringifyHTMLElement).join("");
}
/** Grabs all text content directly, ignoring HTML tags */
export function stringifyHTMLElementsUnformatted(
elems: (SerializedHTMLElement | string)[]
): string {
return elems.map(stringifyHTMLElementUnformatted).join("");
}
/** Grabs all text content directly, ignoring HTML tags */
function stringifyHTMLElementUnformatted(elem: SerializedHTMLElement | string): string {
if (typeof elem === "string") return elem;
return elem.content.map(stringifyHTMLElementUnformatted).join("");
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/websearch/markdown
|
hf_public_repos/chat-ui/src/lib/server/websearch/markdown/utils/chunk.ts
|
import { sentences as splitBySentences } from "sbd";
import { MarkdownElementType, type MarkdownElement } from "../types";
export function chunkElements(elements: MarkdownElement[], maxLength: number): MarkdownElement[] {
return elements.flatMap((elem) => {
// Can't split headers because it would break the tree, and this situation should be rare
// so we just cut off the end
if (elem.type === MarkdownElementType.Header) {
return { ...elem, content: elem.content.slice(0, maxLength) };
}
const contentChunks = enforceMaxLength(elem.content, maxLength);
return contentChunks.map<MarkdownElement>((content) => ({ ...elem, content }));
});
}
const delimitersByPriority = ["?", "!", ".", ";", ":", ",", "|", " - ", " ", "-"];
function enforceMaxLength(text: string, maxLength: number): string[] {
if (text.length <= maxLength) return [text].filter(Boolean);
return splitBySentences(text)
.flatMap((sentence) => {
if (sentence.length <= maxLength) return sentence;
// Discover all necessary split points to fit the sentence within the max length
const indices: [number, number][] = [];
while ((indices.at(-1)?.[1] ?? 0) < sentence.length) {
const prevIndex = indices.at(-1)?.[1] ?? 0;
// Remaining text fits within maxLength
if (prevIndex + maxLength >= sentence.length) {
indices.push([prevIndex, sentence.length]);
continue;
}
const bestDelimiter = delimitersByPriority.find(
(delimiter) => sentence.lastIndexOf(delimiter, prevIndex + maxLength) !== -1
);
// Fallback in the unusual case that no delimiter is found
if (!bestDelimiter) {
indices.push([prevIndex, prevIndex + maxLength]);
continue;
}
const closestDelimiter = sentence.lastIndexOf(bestDelimiter, prevIndex + maxLength);
indices.push([prevIndex, Math.max(prevIndex + 1, closestDelimiter)]);
}
return indices.map((sliceIndices) => sentence.slice(...sliceIndices));
})
.reduce<string[]>(
(chunks, sentence) => {
const lastChunk = chunks[chunks.length - 1];
if (lastChunk.length + sentence.length <= maxLength) {
return [...chunks.slice(0, -1), lastChunk + sentence];
}
return [...chunks, sentence];
},
[""]
)
.filter(Boolean);
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/websearch
|
hf_public_repos/chat-ui/src/lib/server/websearch/scrape/parser.ts
|
import type { SerializedHTMLElement } from "./types";
interface DBSCANOptions<T> {
dataset: T[];
epsilon?: number;
epsilonCompare?: (distance: number, epsilon: number) => boolean;
minimumPoints?: number;
distanceFunction: (a: T, b: T) => number;
}
export function spatialParser() {
/**
* Implementation for dbscan, inlined and migrated to typescript from https://github.com/cdxOo/dbscan (MIT License)
*/
const DBSCAN = <T>({
dataset,
epsilon = 1,
epsilonCompare = (dist, e) => dist < e,
minimumPoints = 2,
distanceFunction,
}: DBSCANOptions<T>) => {
const visitedIndices: Record<number, boolean> = {};
const isVisited = (i: number) => visitedIndices[i];
const markVisited = (i: number) => {
visitedIndices[i] = true;
};
const clusteredIndices: Record<number, boolean> = {};
const isClustered = (i: number) => clusteredIndices[i];
const markClustered = (i: number) => {
clusteredIndices[i] = true;
};
const uniqueMerge = <U>(targetArray: U[], sourceArray: U[]) => {
for (let i = 0; i < sourceArray.length; i += 1) {
const item = sourceArray[i];
if (targetArray.indexOf(item) < 0) {
targetArray.push(item);
}
}
};
const findNeighbors = (index: number) => {
const neighbors = [];
for (let other = 0; other < dataset.length; other += 1) {
const distance = distanceFunction(dataset[index], dataset[other]);
if (epsilonCompare(distance, epsilon)) {
neighbors.push(other);
}
}
return neighbors;
};
const noise: number[] = [];
const addNoise = (i: number) => noise.push(i);
const clusters: number[][] = [];
const createCluster = () => clusters.push([]) - 1;
const addIndexToCluster = (c: number, i: number) => {
clusters[c].push(i);
markClustered(i);
};
const expandCluster = (c: number, neighbors: number[]) => {
for (let i = 0; i < neighbors.length; i += 1) {
const neighborIndex = neighbors[i];
if (!isVisited(neighborIndex)) {
markVisited(neighborIndex);
const secondaryNeighbors = findNeighbors(neighborIndex);
if (secondaryNeighbors.length >= minimumPoints) {
uniqueMerge(neighbors, secondaryNeighbors);
}
}
if (!isClustered(neighborIndex)) {
addIndexToCluster(c, neighborIndex);
}
}
};
dataset.forEach((_, index) => {
if (!isVisited(index)) {
markVisited(index);
const neighbors = findNeighbors(index);
if (neighbors.length < minimumPoints) {
addNoise(index);
} else {
const clusterIndex = createCluster();
addIndexToCluster(clusterIndex, index);
expandCluster(clusterIndex, neighbors);
}
}
});
return { clusters, noise };
};
// -----------
// Scraping implementation
const IgnoredTagsList = [
"footer",
"nav",
"aside",
"script",
"style",
"noscript",
"form",
"button",
];
const InlineTags = [
"a",
"abbrv",
"span",
"address",
"time",
"acronym",
"strong",
"b",
"br",
"sub",
"sup",
"tt",
"var",
"em",
"i",
];
type ReadableNode = HTMLElement;
type NodeWithRect = {
node: ReadableNode;
rect: DOMRect;
};
const isOnlyChild = (node: Node) => {
if (!node.parentElement) return true;
if (node.parentElement.nodeName === "body") return false;
if (node.parentElement.childNodes.length === 1) return true;
return false;
};
const hasValidInlineParent = (node: Node) => {
return node.parentElement && !node.parentElement.matches("div, section, article, main, body ");
};
const hasValidParent = (node: Node) => {
return node.parentElement && !node.parentElement.isSameNode(document.body);
};
const possibleCodeParents = Array.from(document.querySelectorAll("pre, p"));
const possibleTableParents = Array.from(document.querySelectorAll("table"));
const possibleListParents = Array.from(document.querySelectorAll("ul, ol"));
/**
* We want to find the highest parent of text node in the cluster.
* For example in this case: <p><span>Text here</span></p>
* the P tag is highest parent.
*/
const findHighestDirectParentOfReadableNode = (node: Node): HTMLElement => {
// go up the tree until the parent is no longer an only child
let parent = node.parentElement;
// if the parent is an inline tag, then go up one more level
while (
parent &&
hasValidInlineParent(parent) &&
InlineTags.includes(parent?.tagName.toLowerCase())
) {
parent = parent.parentElement;
}
while (parent && isOnlyChild(parent)) {
if (!hasValidParent(parent)) break;
parent = parent.parentElement;
}
if (!parent) {
throw new Error(
"disconnected node found, this should not really be possible when traversing through the dom"
);
}
// if the parent is a span, code or div tag check if there is a pre tag or p tag above it
if (["span", "code", "div"].includes(parent.nodeName.toLowerCase())) {
const hasParent = possibleCodeParents.find((tag) => tag.contains(parent)) as HTMLElement;
if (hasParent) {
parent = hasParent;
}
}
// if the parent is a li tag check if there is a ul or ol tag above it
if (parent.nodeName.toLowerCase() === "li") {
const hasParent = possibleListParents.find((tag) => tag.contains(parent)) as HTMLElement;
if (hasParent) {
parent = hasParent;
}
}
// if the parent is a td, th, tr tag check if there is a table tag above it
if (["td", "th", "tr"].includes(parent.nodeName.toLowerCase())) {
const hasParent = possibleTableParents.find((tag) => tag.contains(parent)) as HTMLElement;
if (hasParent) {
parent = hasParent;
}
}
return parent;
};
const barredNodes = Array.from(document.querySelectorAll(IgnoredTagsList.join(",")));
const doesNodePassHeuristics = (node: Node) => {
if ((node.textContent ?? "").trim().length < 10) {
return false;
}
const parentNode = findHighestDirectParentOfReadableNode(node);
if (parentNode && parentNode instanceof Element) {
if (
!parentNode.checkVisibility({
checkOpacity: true,
checkVisibilityCSS: true,
})
)
return false;
const rect = parentNode.getBoundingClientRect();
// elements that are readable usually don't have really small height or width
if (rect.width < 4 || rect.height < 4) {
return false;
}
}
if (parentNode && parentNode instanceof Element) {
if (barredNodes.some((barredNode) => barredNode.contains(parentNode))) {
return false;
}
}
return true;
};
const getAllReadableNodes = (): NodeWithRect[] => {
if (!document.body) throw new Error("Page failed to load");
const treeWalker = document.createTreeWalker(document.body, NodeFilter.SHOW_TEXT, {
acceptNode(node) {
if (doesNodePassHeuristics(node)) {
return NodeFilter.FILTER_ACCEPT;
} else {
return NodeFilter.FILTER_SKIP;
}
},
});
const readableNodes = [];
while (treeWalker.nextNode()) {
readableNodes.push(treeWalker.currentNode as ReadableNode);
}
/*
* <table><p>hello</p><p>world</p></table>
* table is already included in the parent of the first p tag
*/
const parentsForReadableNodes = readableNodes.map(findHighestDirectParentOfReadableNode);
const listWithOnlyParents: HTMLElement[] = [];
// find unique nodes in the parent list, a unique node is a node that is not a child of any other node in the list
for (let i = 0; i < parentsForReadableNodes.length; i++) {
const node = parentsForReadableNodes[i];
const hasParentInList = parentsForReadableNodes.find((otherNode, idx) => {
if (i === idx) return false;
return otherNode.contains(node);
});
listWithOnlyParents.push(hasParentInList ? hasParentInList : node);
}
const uniqueParents = Array.from(new Set(listWithOnlyParents));
return uniqueParents.map((node) => {
return {
node,
rect: node.getBoundingClientRect(),
};
});
};
const distanceFunction = (a: NodeWithRect, b: NodeWithRect) => {
// we make two assumptions here which are fine to make for rects returned from getBoundingClientRect
// 1. rects are upright and not rotated
// 2. If two rects intersect, we assume distance to be 0
let dx = 0;
let dy = 0;
const rect1 = a.rect;
const rect2 = b.rect;
// Calculate the horizontal distance
if (rect1.x + rect1.width < rect2.x) {
dx = rect2.x - (rect1.x + rect1.width);
} else if (rect2.x + rect2.width < rect1.x) {
dx = rect1.x - (rect2.x + rect2.width);
}
// Calculate the vertical distance
if (rect1.y + rect1.height < rect2.y) {
dy = rect2.y - (rect1.y + rect1.height);
} else if (rect2.y + rect2.height < rect1.y) {
dy = rect1.y - (rect2.y + rect2.height);
}
const distance = Math.sqrt(dx * dx + dy * dy);
// Return the Euclidean distance
return distance;
};
/**
* Clusters nodes using dbscan
*/
const clusterReadableNodes = (nodes: NodeWithRect[]) => {
const { clusters } = DBSCAN({
dataset: nodes,
epsilon: 28,
minimumPoints: 1,
distanceFunction,
});
return clusters;
};
const totalTextLength = (cluster: number[]) => {
return cluster
.map((t) => readableNodes[t].node.innerText?.replaceAll(/ {2}|\r\n|\n|\r/gm, ""))
.join("").length;
};
const approximatelyEqual = (a: number, b: number, epsilon = 1) => {
return Math.abs(a - b) < epsilon;
};
const getClusterBounds = (cluster: number[]) => {
const leftMostPoint = Math.min(...cluster.map((c) => readableNodes[c].rect.x));
const topMostPoint = Math.min(...cluster.map((c) => readableNodes[c].rect.y));
const rightMostPoint = Math.max(
...cluster.map((c) => readableNodes[c].rect.x + readableNodes[c].rect.width)
);
const bottomMostPoint = Math.max(
...cluster.map((c) => readableNodes[c].rect.y + readableNodes[c].rect.height)
);
return {
// left most element
x: leftMostPoint,
y: topMostPoint,
width: rightMostPoint - leftMostPoint,
height: bottomMostPoint - topMostPoint,
};
};
const round = (num: number, decimalPlaces = 2) => {
const factor = Math.pow(10, decimalPlaces);
return Math.round(num * factor) / factor;
};
/** minimum distance to center of the screen */
const clusterCentrality = (cluster: number[]) => {
const bounds = getClusterBounds(cluster);
const centerOfScreen = window.innerWidth / 2;
// the cluster contains the center of the screen
if (bounds.x < centerOfScreen && bounds.x + bounds.width > centerOfScreen) {
return 0;
}
// the cluster is to the left of the screen
if (bounds.x + bounds.width < centerOfScreen) {
return centerOfScreen - (bounds.x + bounds.width);
}
// the cluster is to the right of the screen
return bounds.x - centerOfScreen;
};
/** measure of text share that belong to the cluster */
const percentageTextShare = (cluster: number[], totalLength: number) => {
// apply an exponentially increasing penalty for centrality per 100 pixels distance from center
return round((totalTextLength(cluster) / totalLength) * 100);
};
const shouldMergeClusters = (clusterA: number[], clusterB: number[]) => {
const clusterABounds = getClusterBounds(clusterA);
const clusterBBounds = getClusterBounds(clusterB);
// A cluster is horizontally aligned if the x and width are roughly equal
const isHorizontallyAligned =
approximatelyEqual(clusterABounds.x, clusterBBounds.x, 40) &&
approximatelyEqual(clusterABounds.width, clusterBBounds.width, 40);
if (!isHorizontallyAligned) return false;
// check the y gap between the clusters
const higherCluster = clusterABounds.y < clusterBBounds.y ? clusterABounds : clusterBBounds;
const lowerCluster = clusterABounds.y < clusterBBounds.y ? clusterBBounds : clusterABounds;
const yGap = lowerCluster.y - (higherCluster.y + higherCluster.height);
if (approximatelyEqual(yGap, 0, 100)) return true;
};
const findCriticalClusters = (clusters: number[][]) => {
// merge the clusters that have similar widths and x position
let i = 0;
while (i < clusters.length) {
const cluster = clusters[i];
for (let j = i + 1; j < clusters.length; j++) {
const otherCluster = clusters[j];
if (shouldMergeClusters(cluster, otherCluster)) {
cluster.push(...otherCluster);
clusters.splice(j, 1);
j -= 1;
}
}
i++;
}
const totalText = totalTextLength(clusters.flat());
// sort in descending order of text share
const clusterWithMetrics = clusters.map((cluster) => {
const centrality = clusterCentrality(cluster);
return {
cluster,
centrality,
percentageTextShare: percentageTextShare(cluster, totalText),
};
});
// if there is a dominant cluster with more than 60% text share, return that
const dominantCluster = clusterWithMetrics[0]?.percentageTextShare > 60;
if (dominantCluster) return [clusterWithMetrics[0].cluster];
// clusters are sorted by text share after applying a penalty for centrality
const sortedClusters = clusterWithMetrics.sort((a, b) => {
const penaltyForA = Math.pow(0.9, a.centrality / 100);
const penaltyForB = Math.pow(0.9, b.centrality / 100);
const adjustedTextShareA = a.percentageTextShare * penaltyForA;
const adjustedTextShareB = b.percentageTextShare * penaltyForB;
return adjustedTextShareB - adjustedTextShareA;
});
// find all clusters that are similar to the largest cluster in terms of text share
// and see if they are enough to cover at least 60% of the text share
const largeTextShareClusters = sortedClusters.filter((c) =>
approximatelyEqual(c.percentageTextShare, sortedClusters[0]?.percentageTextShare, 10)
);
const totalTextShareOfLargeClusters = largeTextShareClusters.reduce(
(acc, cluster) => acc + cluster.percentageTextShare,
0
);
if (totalTextShareOfLargeClusters > 60) {
return largeTextShareClusters.map((c) => c.cluster);
}
// choose clusters till the text share is greater than 60%
let totalTextShare = 0;
const criticalClusters = [];
for (const cluster of sortedClusters) {
/** Ignore clusters with less than 2%*/
if (cluster.percentageTextShare < 2) continue;
if (totalTextShare > 60) break;
criticalClusters.push(cluster.cluster);
totalTextShare += cluster.percentageTextShare;
}
// if the total text share is less than 60% then return an empty array
// as this website should not be particularly useful for the web search anyways
// this should almost never happen on structured website with a lot of text
if (totalTextShare < 60) {
return [];
}
return criticalClusters;
};
const allowListedAttributes = ["href", "src", "alt", "title", "class", "id"];
function serializeHTMLElement(node: Element): SerializedHTMLElement {
return {
tagName: node.tagName.toLowerCase(),
attributes: allowListedAttributes.reduce((acc, attr) => {
const value = node.getAttribute(attr);
if (value) {
acc[attr] = value;
}
return acc;
}, {} as Record<string, string>),
content: Array.from(node.childNodes).map(serializeNode).filter(Boolean),
};
}
function serializeNode(node: Node): SerializedHTMLElement | string {
if (node.nodeType === 1) return serializeHTMLElement(node as Element);
else if (node.nodeType === 3) return node.textContent ?? "";
else return "";
}
function getPageMetadata(): {
title: string;
siteName?: string;
author?: string;
description?: string;
createdAt?: string;
updatedAt?: string;
} {
const title = document.title ?? "";
const siteName =
document.querySelector("meta[property='og:site_name']")?.getAttribute("content") ?? undefined;
const author =
document.querySelector("meta[name='author']")?.getAttribute("content") ?? undefined;
const description =
document.querySelector("meta[name='description']")?.getAttribute("content") ??
document.querySelector("meta[property='og:description']")?.getAttribute("content") ??
undefined;
const createdAt =
document.querySelector("meta[property='article:published_time']")?.getAttribute("content") ??
document.querySelector("meta[name='date']")?.getAttribute("content") ??
undefined;
const updatedAt =
document.querySelector("meta[property='article:modified_time']")?.getAttribute("content") ??
undefined;
return { title, siteName, author, description, createdAt, updatedAt };
}
const readableNodes = getAllReadableNodes();
const clusters = clusterReadableNodes(readableNodes);
const criticalClusters = findCriticalClusters(clusters);
// filter readable nodes using the above information as well as heuristics
const filteredNodes = readableNodes.filter((_, idx) => {
return criticalClusters.some((cluster) => {
return cluster.includes(idx);
});
});
const elements = filteredNodes
.filter(
(node, idx, nodes) => !nodes.slice(idx + 1).some((otherNode) => node.node === otherNode.node)
)
.map<SerializedHTMLElement>(({ node }) => serializeHTMLElement(node));
const metadata = getPageMetadata();
return { ...metadata, elements };
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/websearch
|
hf_public_repos/chat-ui/src/lib/server/websearch/scrape/types.ts
|
export interface SerializedHTMLElement {
tagName: string;
attributes: Record<string, string>;
content: (SerializedHTMLElement | string)[];
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/websearch
|
hf_public_repos/chat-ui/src/lib/server/websearch/scrape/playwright.ts
|
import {
chromium,
devices,
type Page,
type BrowserContextOptions,
type Response,
type Browser,
} from "playwright";
import { PlaywrightBlocker } from "@cliqz/adblocker-playwright";
import { env } from "$env/dynamic/private";
import { logger } from "$lib/server/logger";
import { onExit } from "$lib/server/exitHandler";
const blocker =
env.PLAYWRIGHT_ADBLOCKER === "true"
? await PlaywrightBlocker.fromPrebuiltAdsAndTracking(fetch)
.then((blker) => {
const mostBlocked = blker.blockFonts().blockMedias().blockFrames().blockImages();
if (env.WEBSEARCH_JAVASCRIPT === "false") return mostBlocked.blockScripts();
return mostBlocked;
})
.catch((err) => {
logger.error(err, "Failed to initialize PlaywrightBlocker from prebuilt lists");
return PlaywrightBlocker.empty();
})
: PlaywrightBlocker.empty();
let browserSingleton: Promise<Browser> | undefined;
async function getBrowser() {
const browser = await chromium.launch({ headless: true });
onExit(() => browser.close());
browser.on("disconnected", () => {
logger.warn("Browser closed");
browserSingleton = undefined;
});
return browser;
}
async function getPlaywrightCtx() {
if (!browserSingleton) browserSingleton = getBrowser();
const browser = await browserSingleton;
const device = devices["Desktop Chrome"];
const options: BrowserContextOptions = {
...device,
// Increasing width improves spatial clustering accuracy
screen: {
width: 3840,
height: 1080,
},
viewport: {
width: 3840,
height: 1080,
},
reducedMotion: "reduce",
acceptDownloads: false,
timezoneId: "America/New_York",
locale: "en-US",
};
return browser.newContext(options);
}
export async function withPage<T>(
url: string,
callback: (page: Page, response?: Response) => Promise<T>
): Promise<T> {
const ctx = await getPlaywrightCtx();
try {
const page = await ctx.newPage();
if (env.PLAYWRIGHT_ADBLOCKER === "true") {
await blocker.enableBlockingInPage(page);
}
await page.route("**", (route, request) => {
const requestUrl = request.url();
if (!requestUrl.startsWith("https://")) {
logger.warn(`Blocked request to: ${requestUrl}`);
return route.abort();
}
return route.continue();
});
const res = await page
.goto(url, { waitUntil: "load", timeout: parseInt(env.WEBSEARCH_TIMEOUT) })
.catch(() => {
console.warn(
`Failed to load page within ${parseInt(env.WEBSEARCH_TIMEOUT) / 1000}s: ${url}`
);
});
return await callback(page, res ?? undefined);
} finally {
await ctx.close();
}
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/websearch
|
hf_public_repos/chat-ui/src/lib/server/websearch/scrape/scrape.ts
|
import type { WebSearchScrapedSource, WebSearchSource } from "$lib/types/WebSearch";
import type { MessageWebSearchUpdate } from "$lib/types/MessageUpdate";
import { withPage } from "./playwright";
import { spatialParser } from "./parser";
import { htmlToMarkdownTree } from "../markdown/tree";
import { timeout } from "$lib/utils/timeout";
import { makeGeneralUpdate } from "../update";
import { MetricsServer } from "$lib/server/metrics";
import { logger } from "$lib/server/logger";
export const scrape = (maxCharsPerElem: number) =>
async function* (
source: WebSearchSource
): AsyncGenerator<MessageWebSearchUpdate, WebSearchScrapedSource | undefined, undefined> {
try {
const startTime = Date.now();
MetricsServer.getMetrics().webSearch.pageFetchCount.inc();
const page = await scrapeUrl(source.link, maxCharsPerElem);
MetricsServer.getMetrics().webSearch.pageFetchDuration.observe(Date.now() - startTime);
yield makeGeneralUpdate({
message: "Browsing webpage",
args: [source.link],
});
return { ...source, page };
} catch (e) {
MetricsServer.getMetrics().webSearch.pageFetchCountError.inc();
logger.error(e, `Error scraping webpage: ${source.link}`);
}
};
export async function scrapeUrl(url: string, maxCharsPerElem: number) {
return withPage(url, async (page, res) => {
if (!res) throw Error("Failed to load page");
// Check if it's a non-html content type that we can handle directly
// TODO: direct mappings to markdown can be added for markdown, csv and others
const contentType = res.headers()["content-type"] ?? "";
if (
contentType.includes("text/plain") ||
contentType.includes("text/markdown") ||
contentType.includes("application/json") ||
contentType.includes("application/xml") ||
contentType.includes("text/csv")
) {
const title = await page.title();
const content = await page.content();
return {
title,
markdownTree: htmlToMarkdownTree(
title,
[{ tagName: "p", attributes: {}, content: [content] }],
maxCharsPerElem
),
};
}
const scrapedOutput = await timeout(page.evaluate(spatialParser), 2000)
.then(({ elements, ...parsed }) => ({
...parsed,
markdownTree: htmlToMarkdownTree(parsed.title, elements, maxCharsPerElem),
}))
.catch((cause) => {
throw Error("Parsing failed", { cause });
});
return scrapedOutput;
});
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/websearch
|
hf_public_repos/chat-ui/src/lib/server/websearch/search/endpoints.ts
|
import { WebSearchProvider, type WebSearchSource } from "$lib/types/WebSearch";
import { env } from "$env/dynamic/private";
import searchSerper from "./endpoints/serper";
import searchSerpApi from "./endpoints/serpApi";
import searchSerpStack from "./endpoints/serpStack";
import searchYouApi from "./endpoints/youApi";
import searchWebLocal from "./endpoints/webLocal";
import searchSearxng from "./endpoints/searxng";
import searchSearchApi from "./endpoints/searchApi";
import searchBing from "./endpoints/bing";
export function getWebSearchProvider() {
if (env.YDC_API_KEY) return WebSearchProvider.YOU;
if (env.SEARXNG_QUERY_URL) return WebSearchProvider.SEARXNG;
if (env.BING_SUBSCRIPTION_KEY) return WebSearchProvider.BING;
return WebSearchProvider.GOOGLE;
}
/** Searches the web using the first available provider, based on the env */
export async function searchWeb(query: string): Promise<WebSearchSource[]> {
if (env.USE_LOCAL_WEBSEARCH) return searchWebLocal(query);
if (env.SEARXNG_QUERY_URL) return searchSearxng(query);
if (env.SERPER_API_KEY) return searchSerper(query);
if (env.YDC_API_KEY) return searchYouApi(query);
if (env.SERPAPI_KEY) return searchSerpApi(query);
if (env.SERPSTACK_API_KEY) return searchSerpStack(query);
if (env.SEARCHAPI_KEY) return searchSearchApi(query);
if (env.BING_SUBSCRIPTION_KEY) return searchBing(query);
throw new Error(
"No configuration found for web search. Please set USE_LOCAL_WEBSEARCH, SEARXNG_QUERY_URL, SERPER_API_KEY, YDC_API_KEY, SERPSTACK_API_KEY, or SEARCHAPI_KEY in your environment variables."
);
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/websearch
|
hf_public_repos/chat-ui/src/lib/server/websearch/search/search.ts
|
import type { WebSearchSource } from "$lib/types/WebSearch";
import type { Message } from "$lib/types/Message";
import type { Assistant } from "$lib/types/Assistant";
import { getWebSearchProvider, searchWeb } from "./endpoints";
import { generateQuery } from "./generateQuery";
import { isURLStringLocal } from "$lib/server/isURLLocal";
import { isURL } from "$lib/utils/isUrl";
import z from "zod";
import JSON5 from "json5";
import { env } from "$env/dynamic/private";
import { makeGeneralUpdate } from "../update";
import type { MessageWebSearchUpdate } from "$lib/types/MessageUpdate";
const listSchema = z.array(z.string()).default([]);
const allowList = listSchema.parse(JSON5.parse(env.WEBSEARCH_ALLOWLIST));
const blockList = listSchema.parse(JSON5.parse(env.WEBSEARCH_BLOCKLIST));
export async function* search(
messages: Message[],
ragSettings?: Assistant["rag"],
query?: string
): AsyncGenerator<
MessageWebSearchUpdate,
{ searchQuery: string; pages: WebSearchSource[] },
undefined
> {
if (ragSettings && ragSettings?.allowedLinks.length > 0) {
yield makeGeneralUpdate({ message: "Using links specified in Assistant" });
return {
searchQuery: "",
pages: await directLinksToSource(ragSettings.allowedLinks).then(filterByBlockList),
};
}
const searchQuery = query ?? (await generateQuery(messages));
yield makeGeneralUpdate({ message: `Searching ${getWebSearchProvider()}`, args: [searchQuery] });
// handle the global and (optional) rag lists
if (ragSettings && ragSettings?.allowedDomains.length > 0) {
yield makeGeneralUpdate({ message: "Filtering on specified domains" });
}
const filters = buildQueryFromSiteFilters(
[...(ragSettings?.allowedDomains ?? []), ...allowList],
blockList
);
const searchQueryWithFilters = `${filters} ${searchQuery}`;
const searchResults = await searchWeb(searchQueryWithFilters).then(filterByBlockList);
return {
searchQuery: searchQueryWithFilters,
pages: searchResults,
};
}
// ----------
// Utils
function filterByBlockList(results: WebSearchSource[]): WebSearchSource[] {
return results.filter((result) => !blockList.some((blocked) => result.link.includes(blocked)));
}
function buildQueryFromSiteFilters(allow: string[], block: string[]) {
return (
allow.map((item) => "site:" + item).join(" OR ") +
" " +
block.map((item) => "-site:" + item).join(" ")
);
}
async function directLinksToSource(links: string[]): Promise<WebSearchSource[]> {
if (env.ENABLE_LOCAL_FETCH !== "true") {
const localLinks = await Promise.all(links.map(isURLStringLocal));
links = links.filter((_, index) => !localLinks[index]);
}
return links.filter(isURL).map((link) => ({
link,
title: "",
text: [""],
}));
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/websearch
|
hf_public_repos/chat-ui/src/lib/server/websearch/search/generateQuery.ts
|
import type { Message } from "$lib/types/Message";
import { format } from "date-fns";
import type { EndpointMessage } from "../../endpoints/endpoints";
import { generateFromDefaultEndpoint } from "../../generateFromDefaultEndpoint";
import { getReturnFromGenerator } from "$lib/utils/getReturnFromGenerator";
export async function generateQuery(messages: Message[]) {
const currentDate = format(new Date(), "MMMM d, yyyy");
const userMessages = messages.filter(({ from }) => from === "user");
const previousUserMessages = userMessages.slice(0, -1);
const lastMessage = userMessages.slice(-1)[0];
const convQuery: Array<EndpointMessage> = [
{
from: "user",
content: `Previous Questions:
- Who is the president of France?
Current Question: What about Mexico?
`,
},
{
from: "assistant",
content: "President of Mexico",
},
{
from: "user",
content: `Previous questions:
- When is the next formula 1 grand prix?
Current Question: Where is it being hosted?`,
},
{
from: "assistant",
content: "location of next formula 1 grand prix",
},
{
from: "user",
content: "Current Question: What type of printhead does the Epson F2270 DTG printer use?",
},
{
from: "assistant",
content: "Epson F2270 DTG printer printhead",
},
{ from: "user", content: "What were the news yesterday?" },
{
from: "assistant",
content: `news ${format(new Date(Date.now() - 864e5), "MMMM d, yyyy")}`,
},
{ from: "user", content: "What is the current weather in Paris?" },
{ from: "assistant", content: `weather in Paris ${currentDate}` },
{
from: "user",
content:
(previousUserMessages.length > 0
? `Previous questions: \n${previousUserMessages
.map(({ content }) => `- ${content}`)
.join("\n")}`
: "") +
"\n\nCurrent Question: " +
lastMessage.content,
},
];
const webQuery = await getReturnFromGenerator(
generateFromDefaultEndpoint({
messages: convQuery,
preprompt: `You are tasked with generating web search queries. Give me an appropriate query to answer my question for google search. Answer with only the query. Today is ${currentDate}`,
generateSettings: {
max_new_tokens: 30,
},
})
);
return webQuery.trim();
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/websearch/search
|
hf_public_repos/chat-ui/src/lib/server/websearch/search/endpoints/searchApi.ts
|
import { env } from "$env/dynamic/private";
import type { WebSearchSource } from "$lib/types/WebSearch";
export default async function search(query: string): Promise<WebSearchSource[]> {
const response = await fetch(
`https://www.searchapi.io/api/v1/search?engine=google&hl=en&gl=us&q=${query}`,
{
method: "GET",
headers: {
Authorization: `Bearer ${env.SEARCHAPI_KEY}`,
"Content-type": "application/json",
},
}
);
/* eslint-disable @typescript-eslint/no-explicit-any */
const data = (await response.json()) as Record<string, any>;
if (!response.ok) {
throw new Error(
data["message"] ?? `SearchApi returned error code ${response.status} - ${response.statusText}`
);
}
return data["organic_results"] ?? [];
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/websearch/search
|
hf_public_repos/chat-ui/src/lib/server/websearch/search/endpoints/searxng.ts
|
import { env } from "$env/dynamic/private";
import { logger } from "$lib/server/logger";
import type { WebSearchSource } from "$lib/types/WebSearch";
import { isURL } from "$lib/utils/isUrl";
export default async function searchSearxng(query: string): Promise<WebSearchSource[]> {
const abortController = new AbortController();
setTimeout(() => abortController.abort(), 10000);
// Insert the query into the URL template
let url = env.SEARXNG_QUERY_URL.replace("<query>", query);
// Check if "&format=json" already exists in the URL
if (!url.includes("&format=json")) {
url += "&format=json";
}
// Call the URL to return JSON data
const jsonResponse = await fetch(url, {
signal: abortController.signal,
})
.then((response) => response.json() as Promise<{ results: { url: string }[] }>)
.catch((error) => {
logger.error(error, "Failed to fetch or parse JSON");
throw new Error("Failed to fetch or parse JSON", { cause: error });
});
// Extract 'url' elements from the JSON response and trim to the top 5 URLs
const urls = jsonResponse.results.slice(0, 5).map((item) => item.url);
if (!urls.length) {
throw new Error(`Response doesn't contain any "url" elements`);
}
// Map URLs to the correct object shape
return urls.filter(isURL).map((link) => ({ link }));
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/websearch/search
|
hf_public_repos/chat-ui/src/lib/server/websearch/search/endpoints/bing.ts
|
import type { WebSearchSource } from "$lib/types/WebSearch";
import { env } from "$env/dynamic/private";
export default async function search(query: string): Promise<WebSearchSource[]> {
// const params = {
// q: query,
// // You can add other parameters if needed, like 'count', 'offset', etc.
// };
const response = await fetch(
"https://api.bing.microsoft.com/v7.0/search" + "?q=" + encodeURIComponent(query),
{
method: "GET",
headers: {
"Ocp-Apim-Subscription-Key": env.BING_SUBSCRIPTION_KEY,
"Content-type": "application/json",
},
}
);
/* eslint-disable @typescript-eslint/no-explicit-any */
const data = (await response.json()) as Record<string, any>;
if (!response.ok) {
throw new Error(
data["message"] ?? `Bing API returned error code ${response.status} - ${response.statusText}`
);
}
console.log(data["webPages"]?.["value"]);
// Adapt the data structure from the Bing response to match the WebSearchSource type
const webPages = data["webPages"]?.["value"] ?? [];
return webPages.map((page: any) => ({
title: page.name,
link: page.url,
text: page.snippet,
displayLink: page.displayUrl,
}));
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/websearch/search
|
hf_public_repos/chat-ui/src/lib/server/websearch/search/endpoints/serpStack.ts
|
import { env } from "$env/dynamic/private";
import { isURL } from "$lib/utils/isUrl";
import type { WebSearchSource } from "$lib/types/WebSearch";
type SerpStackResponse = {
organic_results: {
title: string;
url: string;
snippet?: string;
}[];
error?: string;
};
export default async function searchSerpStack(query: string): Promise<WebSearchSource[]> {
const response = await fetch(
`http://api.serpstack.com/search?access_key=${env.SERPSTACK_API_KEY}&query=${query}&hl=en&gl=us`,
{ headers: { "Content-type": "application/json; charset=UTF-8" } }
);
const data = (await response.json()) as SerpStackResponse;
if (!response.ok) {
throw new Error(
data.error ?? `SerpStack API returned error code ${response.status} - ${response.statusText}`
);
}
return data.organic_results
.filter(({ url }) => isURL(url))
.map(({ title, url, snippet }) => ({
title,
link: url,
text: snippet ?? "",
}));
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/websearch/search
|
hf_public_repos/chat-ui/src/lib/server/websearch/search/endpoints/webLocal.ts
|
import { JSDOM, VirtualConsole } from "jsdom";
import { isURL } from "$lib/utils/isUrl";
import type { WebSearchSource } from "$lib/types/WebSearch";
export default async function searchWebLocal(query: string): Promise<WebSearchSource[]> {
const abortController = new AbortController();
setTimeout(() => abortController.abort(), 10000);
const htmlString = await fetch(
"https://www.google.com/search?hl=en&q=" + encodeURIComponent(query),
{ signal: abortController.signal }
)
.then((response) => response.text())
.catch();
const virtualConsole = new VirtualConsole();
virtualConsole.on("error", () => {}); // No-op to skip console errors.
const document = new JSDOM(htmlString ?? "", { virtualConsole }).window.document;
// get all links
const links = document.querySelectorAll("a");
if (!links.length) throw new Error(`webpage doesn't have any "a" element`);
// take url that start wirth /url?q=
// and do not contain google.com links
// and strip them up to '&sa='
const linksHref = Array.from(links)
.map((el) => el.href)
.filter((link) => link.startsWith("/url?q=") && !link.includes("google.com/"))
.map((link) => link.slice("/url?q=".length, link.indexOf("&sa=")))
.filter(isURL);
// remove duplicate links and map links to the correct object shape
return [...new Set(linksHref)].map((link) => ({ link }));
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/websearch/search
|
hf_public_repos/chat-ui/src/lib/server/websearch/search/endpoints/serper.ts
|
import { env } from "$env/dynamic/private";
import type { WebSearchSource } from "$lib/types/WebSearch";
export default async function search(query: string): Promise<WebSearchSource[]> {
const params = {
q: query,
hl: "en",
gl: "us",
};
const response = await fetch("https://google.serper.dev/search", {
method: "POST",
body: JSON.stringify(params),
headers: {
"x-api-key": env.SERPER_API_KEY,
"Content-type": "application/json",
},
});
/* eslint-disable @typescript-eslint/no-explicit-any */
const data = (await response.json()) as Record<string, any>;
if (!response.ok) {
throw new Error(
data["message"] ??
`Serper API returned error code ${response.status} - ${response.statusText}`
);
}
return data["organic"] ?? [];
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/websearch/search
|
hf_public_repos/chat-ui/src/lib/server/websearch/search/endpoints/youApi.ts
|
import { env } from "$env/dynamic/private";
import { isURL } from "$lib/utils/isUrl";
import type { WebSearchSource } from "$lib/types/WebSearch";
interface YouWebSearch {
hits: YouSearchHit[];
latency: number;
}
interface YouSearchHit {
url: string;
title: string;
description: string;
snippets: string[];
}
export default async function searchWebYouApi(query: string): Promise<WebSearchSource[]> {
const response = await fetch(`https://api.ydc-index.io/search?query=${query}`, {
method: "GET",
headers: {
"X-API-Key": env.YDC_API_KEY,
"Content-type": "application/json; charset=UTF-8",
},
});
if (!response.ok) {
throw new Error(`You.com API returned error code ${response.status} - ${response.statusText}`);
}
const data = (await response.json()) as YouWebSearch;
const formattedResultsWithSnippets = data.hits
.filter(({ url }) => isURL(url))
.map(({ title, url, snippets }) => ({
title,
link: url,
text: snippets?.join("\n") || "",
}))
.sort((a, b) => b.text.length - a.text.length); // desc order by text length
return formattedResultsWithSnippets;
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/websearch/search
|
hf_public_repos/chat-ui/src/lib/server/websearch/search/endpoints/serpApi.ts
|
import { env } from "$env/dynamic/private";
import { getJson, type GoogleParameters } from "serpapi";
import type { WebSearchSource } from "$lib/types/WebSearch";
import { isURL } from "$lib/utils/isUrl";
type SerpApiResponse = {
organic_results: {
link: string;
}[];
};
export default async function searchWebSerpApi(query: string): Promise<WebSearchSource[]> {
const params = {
q: query,
hl: "en",
gl: "us",
google_domain: "google.com",
api_key: env.SERPAPI_KEY,
} satisfies GoogleParameters;
// Show result as JSON
const response = (await getJson("google", params)) as unknown as SerpApiResponse;
return response.organic_results.filter(({ link }) => isURL(link));
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/websearch
|
hf_public_repos/chat-ui/src/lib/server/websearch/embed/embed.ts
|
import { MetricsServer } from "$lib/server/metrics";
import type { WebSearchScrapedSource, WebSearchUsedSource } from "$lib/types/WebSearch";
import type { EmbeddingBackendModel } from "../../embeddingModels";
import { getSentenceSimilarity, innerProduct } from "../../sentenceSimilarity";
import { MarkdownElementType, type MarkdownElement } from "../markdown/types";
import { stringifyMarkdownElement } from "../markdown/utils/stringify";
import { getCombinedSentenceSimilarity } from "./combine";
import { flattenTree } from "./tree";
const MIN_CHARS = 3_000;
const SOFT_MAX_CHARS = 8_000;
export async function findContextSources(
sources: WebSearchScrapedSource[],
prompt: string,
embeddingModel: EmbeddingBackendModel
) {
const startTime = Date.now();
const sourcesMarkdownElems = sources.map((source) => flattenTree(source.page.markdownTree));
const markdownElems = sourcesMarkdownElems.flat();
// When using CPU embedding (transformersjs), join sentences together to the max character limit
// to reduce inference time
const embeddingFunc =
embeddingModel.endpoints[0].type === "transformersjs"
? getCombinedSentenceSimilarity
: getSentenceSimilarity;
const embeddings = await embeddingFunc(
embeddingModel,
prompt,
markdownElems
.map(stringifyMarkdownElement)
// Safety in case the stringified markdown elements are too long
// but chunking should have happened earlier
.map((elem) => elem.slice(0, embeddingModel.chunkCharLength))
);
const topEmbeddings = embeddings
.sort((a, b) => a.distance - b.distance)
.filter((embedding) => markdownElems[embedding.idx].type !== MarkdownElementType.Header);
let totalChars = 0;
const selectedMarkdownElems = new Set<MarkdownElement>();
const selectedEmbeddings: number[][] = [];
for (const embedding of topEmbeddings) {
const elem = markdownElems[embedding.idx];
// Ignore elements that are too similar to already selected elements
const tooSimilar = selectedEmbeddings.some(
(selectedEmbedding) => innerProduct(selectedEmbedding, embedding.embedding) < 0.01
);
if (tooSimilar) continue;
// Add element
if (!selectedMarkdownElems.has(elem)) {
selectedMarkdownElems.add(elem);
selectedEmbeddings.push(embedding.embedding);
totalChars += elem.content.length;
}
// Add element's parent (header)
if (elem.parent && !selectedMarkdownElems.has(elem.parent)) {
selectedMarkdownElems.add(elem.parent);
totalChars += elem.parent.content.length;
}
if (totalChars > SOFT_MAX_CHARS) break;
if (totalChars > MIN_CHARS && embedding.distance > 0.25) break;
}
const contextSources = sourcesMarkdownElems
.map<WebSearchUsedSource>((elems, idx) => {
const sourceSelectedElems = elems.filter((elem) => selectedMarkdownElems.has(elem));
const context = sourceSelectedElems.map(stringifyMarkdownElement).join("\n");
const source = sources[idx];
return { ...source, context };
})
.filter((contextSource) => contextSource.context.length > 0);
MetricsServer.getMetrics().webSearch.embeddingDuration.observe(Date.now() - startTime);
return contextSources;
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/websearch
|
hf_public_repos/chat-ui/src/lib/server/websearch/embed/tree.ts
|
import type { MarkdownElement } from "../markdown/types";
export function flattenTree(elem: MarkdownElement): MarkdownElement[] {
if ("children" in elem) return [elem, ...elem.children.flatMap(flattenTree)];
return [elem];
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/websearch
|
hf_public_repos/chat-ui/src/lib/server/websearch/embed/combine.ts
|
import type { EmbeddingBackendModel } from "$lib/server/embeddingModels";
import { getSentenceSimilarity } from "$lib/server/sentenceSimilarity";
/**
* Combines sentences together to reach the maximum character limit of the embedding model
* Improves performance considerably when using CPU embedding
*/
export async function getCombinedSentenceSimilarity(
embeddingModel: EmbeddingBackendModel,
query: string,
sentences: string[]
): ReturnType<typeof getSentenceSimilarity> {
const combinedSentences = sentences.reduce<{ text: string; indices: number[] }[]>(
(acc, sentence, idx) => {
const lastSentence = acc[acc.length - 1];
if (!lastSentence) return [{ text: sentence, indices: [idx] }];
if (lastSentence.text.length + sentence.length < embeddingModel.chunkCharLength) {
lastSentence.text += ` ${sentence}`;
lastSentence.indices.push(idx);
return acc;
}
return [...acc, { text: sentence, indices: [idx] }];
},
[]
);
const embeddings = await getSentenceSimilarity(
embeddingModel,
query,
combinedSentences.map(({ text }) => text)
);
return embeddings.flatMap((embedding, idx) => {
const { indices } = combinedSentences[idx];
return indices.map((i) => ({ ...embedding, idx: i }));
});
}
|
0
|
hf_public_repos/chat-ui/src/lib/server
|
hf_public_repos/chat-ui/src/lib/server/files/uploadFile.ts
|
import type { Conversation } from "$lib/types/Conversation";
import type { MessageFile } from "$lib/types/Message";
import { sha256 } from "$lib/utils/sha256";
import { fileTypeFromBuffer } from "file-type";
import { collections } from "$lib/server/database";
export async function uploadFile(file: File, conv: Conversation): Promise<MessageFile> {
const sha = await sha256(await file.text());
const buffer = await file.arrayBuffer();
// Attempt to detect the mime type of the file, fallback to the uploaded mime
const mime = await fileTypeFromBuffer(buffer).then((fileType) => fileType?.mime ?? file.type);
const upload = collections.bucket.openUploadStream(`${conv._id}-${sha}`, {
metadata: { conversation: conv._id.toString(), mime },
});
upload.write((await file.arrayBuffer()) as unknown as Buffer);
upload.end();
// only return the filename when upload throws a finish event or a 20s time out occurs
return new Promise((resolve, reject) => {
upload.once("finish", () =>
resolve({ type: "hash", value: sha, mime: file.type, name: file.name })
);
upload.once("error", reject);
setTimeout(() => reject(new Error("Upload timed out")), 20_000);
});
}
|
0
|
hf_public_repos/chat-ui/src/lib/server
|
hf_public_repos/chat-ui/src/lib/server/files/downloadFile.ts
|
import { error } from "@sveltejs/kit";
import { collections } from "$lib/server/database";
import type { Conversation } from "$lib/types/Conversation";
import type { SharedConversation } from "$lib/types/SharedConversation";
import type { MessageFile } from "$lib/types/Message";
export async function downloadFile(
sha256: string,
convId: Conversation["_id"] | SharedConversation["_id"]
): Promise<MessageFile & { type: "base64" }> {
const fileId = collections.bucket.find({ filename: `${convId.toString()}-${sha256}` });
const file = await fileId.next();
if (!file) {
error(404, "File not found");
}
if (file.metadata?.conversation !== convId.toString()) {
error(403, "You don't have access to this file.");
}
const mime = file.metadata?.mime;
const name = file.filename;
const fileStream = collections.bucket.openDownloadStream(file._id);
const buffer = await new Promise<Buffer>((resolve, reject) => {
const chunks: Uint8Array[] = [];
fileStream.on("data", (chunk) => chunks.push(chunk));
fileStream.on("error", reject);
fileStream.on("end", () => resolve(Buffer.concat(chunks)));
});
return { type: "base64", name, value: buffer.toString("base64"), mime };
}
|
0
|
hf_public_repos/chat-ui/src/lib/server
|
hf_public_repos/chat-ui/src/lib/server/tools/directlyAnswer.ts
|
import type { ConfigTool } from "$lib/types/Tool";
import { ObjectId } from "mongodb";
const directlyAnswer: ConfigTool = {
_id: new ObjectId("00000000000000000000000D"),
type: "config",
description: "Answer the user's query directly",
color: "blue",
icon: "chat",
displayName: "Directly Answer",
isOnByDefault: true,
isLocked: true,
isHidden: true,
name: "directlyAnswer",
endpoint: null,
inputs: [],
outputComponent: null,
outputComponentIdx: null,
showOutput: false,
async *call() {
return {
outputs: [],
display: false,
};
},
};
export default directlyAnswer;
|
0
|
hf_public_repos/chat-ui/src/lib/server
|
hf_public_repos/chat-ui/src/lib/server/tools/index.ts
|
import { MessageUpdateType } from "$lib/types/MessageUpdate";
import {
ToolColor,
ToolIcon,
ToolOutputComponents,
type BackendCall,
type BaseTool,
type ConfigTool,
type ToolInput,
} from "$lib/types/Tool";
import type { TextGenerationContext } from "../textGeneration/types";
import { z } from "zod";
import JSON5 from "json5";
import { env } from "$env/dynamic/private";
import jp from "jsonpath";
import calculator from "./calculator";
import directlyAnswer from "./directlyAnswer";
import fetchUrl from "./web/url";
import websearch from "./web/search";
import { callSpace, getIpToken } from "./utils";
import { uploadFile } from "../files/uploadFile";
import type { MessageFile } from "$lib/types/Message";
import { sha256 } from "$lib/utils/sha256";
import { ObjectId } from "mongodb";
import { isValidOutputComponent, ToolOutputPaths } from "./outputs";
import { downloadFile } from "../files/downloadFile";
import { fileTypeFromBlob } from "file-type";
export type BackendToolContext = Pick<
TextGenerationContext,
"conv" | "messages" | "assistant" | "ip" | "username"
> & { preprompt?: string };
const IOType = z.union([z.literal("str"), z.literal("int"), z.literal("float"), z.literal("bool")]);
const toolInputBaseSchema = z.union([
z.object({
name: z.string().min(1).max(80),
description: z.string().max(200).optional(),
paramType: z.literal("required"),
}),
z.object({
name: z.string().min(1).max(80),
description: z.string().max(200).optional(),
paramType: z.literal("optional"),
default: z
.union([z.string().max(300), z.number(), z.boolean(), z.undefined()])
.transform((val) => (val === undefined ? "" : val)),
}),
z.object({
name: z.string().min(1).max(80),
paramType: z.literal("fixed"),
value: z
.union([z.string().max(300), z.number(), z.boolean(), z.undefined()])
.transform((val) => (val === undefined ? "" : val)),
}),
]);
const toolInputSchema = toolInputBaseSchema.and(
z.object({ type: IOType }).or(
z.object({
type: z.literal("file"),
mimeTypes: z.string().min(1),
})
)
);
export const editableToolSchema = z
.object({
name: z
.string()
.regex(/^[a-zA-Z_][a-zA-Z0-9_]*$/) // only allow letters, numbers, and underscores, and start with a letter or underscore
.min(1)
.max(40),
// only allow huggingface spaces either through namespace or direct URLs
baseUrl: z.union([
z.string().regex(/^[^/]+\/[^/]+$/),
z
.string()
.regex(/^https:\/\/huggingface\.co\/spaces\/[a-zA-Z0-9-]+\/[a-zA-Z0-9-]+$/)
.transform((url) => url.split("/").slice(-2).join("/")),
]),
endpoint: z.string().min(1).max(100),
inputs: z.array(toolInputSchema),
outputComponent: z.string().min(1).max(100),
showOutput: z.boolean(),
displayName: z.string().min(1).max(40),
color: ToolColor,
icon: ToolIcon,
description: z.string().min(1).max(100),
})
.transform((tool) => ({
...tool,
outputComponentIdx: parseInt(tool.outputComponent.split(";")[0]),
outputComponent: ToolOutputComponents.parse(tool.outputComponent.split(";")[1]),
}));
export const configTools = z
.array(
z
.object({
name: z.string(),
description: z.string(),
endpoint: z.union([z.string(), z.null()]),
inputs: z.array(toolInputSchema),
outputComponent: ToolOutputComponents.or(z.null()),
outputComponentIdx: z.number().int().default(0),
showOutput: z.boolean(),
_id: z
.string()
.length(24)
.regex(/^[0-9a-fA-F]{24}$/)
.transform((val) => new ObjectId(val)),
baseUrl: z.string().optional(),
displayName: z.string(),
color: ToolColor,
icon: ToolIcon,
isOnByDefault: z.optional(z.literal(true)),
isLocked: z.optional(z.literal(true)),
isHidden: z.optional(z.literal(true)),
})
.transform((val) => ({
type: "config" as const,
...val,
call: getCallMethod(val),
}))
)
// add the extra hardcoded tools
.transform((val) => [...val, calculator, directlyAnswer, fetchUrl, websearch]);
export function getCallMethod(tool: Omit<BaseTool, "call">): BackendCall {
return async function* (params, ctx, uuid) {
if (
tool.endpoint === null ||
!tool.baseUrl ||
!tool.outputComponent ||
tool.outputComponentIdx === null
) {
throw new Error(`Tool function ${tool.name} has no endpoint`);
}
const ipToken = await getIpToken(ctx.ip, ctx.username);
function coerceInput(value: unknown, type: ToolInput["type"]) {
const valueStr = String(value);
switch (type) {
case "str":
return valueStr;
case "int":
return parseInt(valueStr);
case "float":
return parseFloat(valueStr);
case "bool":
return valueStr === "true";
default:
throw new Error(`Unsupported type ${type}`);
}
}
const inputs = tool.inputs.map(async (input) => {
if (input.type === "file" && input.paramType !== "required") {
throw new Error("File inputs are always required and cannot be optional or fixed");
}
if (input.paramType === "fixed") {
return coerceInput(input.value, input.type);
} else if (input.paramType === "optional") {
return coerceInput(params[input.name] ?? input.default, input.type);
} else if (input.paramType === "required") {
if (params[input.name] === undefined) {
throw new Error(`Missing required input ${input.name}`);
}
if (input.type === "file") {
// todo: parse file here !
// structure is {input|output}-{msgIdx}-{fileIdx}-{filename}
const filename = params[input.name];
if (!filename || typeof filename !== "string") {
throw new Error(`Filename is not a string`);
}
const messages = ctx.messages;
const msgIdx = parseInt(filename.split("_")[1]);
const fileIdx = parseInt(filename.split("_")[2]);
if (Number.isNaN(msgIdx) || Number.isNaN(fileIdx)) {
throw Error(`Message index or file index is missing`);
}
if (msgIdx >= messages.length) {
throw Error(`Message index ${msgIdx} is out of bounds`);
}
const file = messages[msgIdx].files?.[fileIdx];
if (!file) {
throw Error(`File index ${fileIdx} is out of bounds`);
}
const blob = await downloadFile(file.value, ctx.conv._id)
.then((file) => fetch(`data:${file.mime};base64,${file.value}`))
.then((res) => res.blob())
.catch((err) => {
throw Error("Failed to download file", { cause: err });
});
return blob;
} else {
return coerceInput(params[input.name], input.type);
}
}
});
const outputs = yield* callSpace(
tool.baseUrl,
tool.endpoint,
await Promise.all(inputs),
ipToken,
uuid
);
if (!isValidOutputComponent(tool.outputComponent)) {
throw new Error(`Tool output component is not defined`);
}
const { type, path } = ToolOutputPaths[tool.outputComponent];
if (!path || !type) {
throw new Error(`Tool output type ${tool.outputComponent} is not supported`);
}
const files: MessageFile[] = [];
const toolOutputs: Array<Record<string, string>> = [];
if (outputs.length <= tool.outputComponentIdx) {
throw new Error(`Tool output component index is out of bounds`);
}
// if its not an object, return directly
if (
outputs[tool.outputComponentIdx] !== undefined &&
typeof outputs[tool.outputComponentIdx] !== "object"
) {
return {
outputs: [{ [tool.name + "-0"]: outputs[tool.outputComponentIdx] }],
display: tool.showOutput,
};
}
await Promise.all(
jp
.query(outputs[tool.outputComponentIdx], path)
.map(async (output: string | string[], idx) => {
const arrayedOutput = Array.isArray(output) ? output : [output];
if (type === "file") {
// output files are actually URLs
await Promise.all(
arrayedOutput.map(async (output, idx) => {
await fetch(output)
.then((res) => res.blob())
.then(async (blob) => {
const { ext, mime } = (await fileTypeFromBlob(blob)) ?? { ext: "octet-stream" };
return new File(
[blob],
`${idx}-${await sha256(JSON.stringify(params))}.${ext}`,
{
type: mime,
}
);
})
.then((file) => uploadFile(file, ctx.conv))
.then((file) => files.push(file));
})
);
toolOutputs.push({
[tool.name +
"-" +
idx.toString()]: `Only and always answer: 'I used the tool ${tool.displayName}, here is the result.' Don't add anything else.`,
});
} else {
for (const output of arrayedOutput) {
toolOutputs.push({
[tool.name + "-" + idx.toString()]: output,
});
}
}
})
);
for (const file of files) {
yield {
type: MessageUpdateType.File,
name: file.name,
sha: file.value,
mime: file.mime,
};
}
return { outputs: toolOutputs, display: tool.showOutput };
};
}
export const toolFromConfigs = configTools.parse(JSON5.parse(env.TOOLS)) satisfies ConfigTool[];
|
0
|
hf_public_repos/chat-ui/src/lib/server
|
hf_public_repos/chat-ui/src/lib/server/tools/calculator.ts
|
import type { ConfigTool } from "$lib/types/Tool";
import { ObjectId } from "mongodb";
import vm from "node:vm";
const calculator: ConfigTool = {
_id: new ObjectId("00000000000000000000000C"),
type: "config",
description: "Calculate the result of a mathematical expression",
color: "blue",
icon: "code",
displayName: "Calculator",
name: "calculator",
endpoint: null,
inputs: [
{
name: "equation",
type: "str",
description:
"A mathematical expression to be evaluated. The result of the expression will be returned.",
paramType: "required",
},
],
outputComponent: null,
outputComponentIdx: null,
showOutput: false,
async *call({ equation }) {
try {
const blocks = String(equation).split("\n");
const query = blocks[blocks.length - 1].replace(/[^-()\d/*+.]/g, "");
return {
outputs: [{ calculator: `${query} = ${vm.runInNewContext(query)}` }],
};
} catch (cause) {
throw new Error("Invalid expression", { cause });
}
},
};
export default calculator;
|
0
|
hf_public_repos/chat-ui/src/lib/server
|
hf_public_repos/chat-ui/src/lib/server/tools/utils.ts
|
import { env } from "$env/dynamic/private";
import { Client } from "@gradio/client";
import { SignJWT } from "jose";
import JSON5 from "json5";
import {
MessageToolUpdateType,
MessageUpdateType,
type MessageToolUpdate,
} from "$lib/types/MessageUpdate";
import { logger } from "$lib/server/logger";
export async function* callSpace<TInput extends unknown[], TOutput extends unknown[]>(
name: string,
func: string,
parameters: TInput,
ipToken: string | undefined,
uuid: string
): AsyncGenerator<MessageToolUpdate, TOutput, undefined> {
class CustomClient extends Client {
fetch(input: RequestInfo | URL, init?: RequestInit): Promise<Response> {
init = init || {};
init.headers = {
...(init.headers || {}),
...(ipToken ? { "X-IP-Token": ipToken } : {}),
};
return super.fetch(input, init);
}
}
const client = await CustomClient.connect(name, {
hf_token: ipToken // dont pass the hf token if we have an ip token
? undefined
: ((env.HF_TOKEN ?? env.HF_ACCESS_TOKEN) as unknown as `hf_${string}`),
events: ["status", "data"],
});
const job = client.submit(func, parameters);
let data;
for await (const output of job) {
if (output.type === "data") {
data = output.data as TOutput;
}
if (output.type === "status") {
if (output.stage === "error") {
logger.error(output.message);
throw new Error(output.message);
}
if (output.eta) {
yield {
type: MessageUpdateType.Tool,
subtype: MessageToolUpdateType.ETA,
eta: output.eta,
uuid,
};
}
}
}
if (!data) {
throw new Error("No data found in tool call");
}
return data;
}
export async function getIpToken(ip: string, username?: string) {
const ipTokenSecret = env.IP_TOKEN_SECRET;
if (!ipTokenSecret) {
return;
}
return await new SignJWT({ ip, user: username })
.setProtectedHeader({ alg: "HS256" })
.setIssuedAt()
.setExpirationTime("1m")
.sign(new TextEncoder().encode(ipTokenSecret));
}
export { toolHasName } from "$lib/utils/tools";
export async function extractJson(text: string): Promise<unknown[]> {
const calls: string[] = [];
let codeBlocks = Array.from(text.matchAll(/```json\n(.*?)```/gs))
.map(([, block]) => block)
// remove trailing comma
.map((block) => block.trim().replace(/,$/, ""));
// if there is no code block, try to find the first json object
// by trimming the string and trying to parse with JSON5
if (codeBlocks.length === 0) {
const start = [text.indexOf("["), text.indexOf("{")]
.filter((i) => i !== -1)
.reduce((a, b) => Math.max(a, b), -Infinity);
const end = [text.lastIndexOf("]"), text.lastIndexOf("}")]
.filter((i) => i !== -1)
.reduce((a, b) => Math.min(a, b), Infinity);
if (start === -Infinity || end === Infinity) {
return [""];
}
const json = text.substring(start, end + 1);
codeBlocks = [json];
}
// grab only the capture group from the regex match
for (const block of codeBlocks) {
// make it an array if it's not already
let call = JSON5.parse(block);
if (!Array.isArray(call)) {
call = [call];
}
calls.push(call);
}
return calls.flat();
}
|
0
|
hf_public_repos/chat-ui/src/lib/server
|
hf_public_repos/chat-ui/src/lib/server/tools/outputs.ts
|
import type { ToolIOType, ToolOutputComponents } from "$lib/types/Tool";
export const ToolOutputPaths: Record<
ToolOutputComponents,
{
type: ToolIOType;
path: string;
}
> = {
textbox: {
type: "str",
path: "$",
},
markdown: {
type: "str",
path: "$",
},
number: {
type: "float",
path: "$",
},
image: {
type: "file",
path: "$.url",
},
gallery: {
type: "file",
path: "$[*].image.url",
},
audio: {
type: "file",
path: "$.url",
},
video: {
type: "file",
path: "$.video.url",
},
file: {
type: "file",
path: "$.url",
},
json: {
type: "str",
path: "$",
},
};
export const isValidOutputComponent = (
outputComponent: string
): outputComponent is keyof typeof ToolOutputPaths => {
return Object.keys(ToolOutputPaths).includes(outputComponent);
};
|
0
|
hf_public_repos/chat-ui/src/lib/server/tools
|
hf_public_repos/chat-ui/src/lib/server/tools/web/url.ts
|
import { stringifyMarkdownElementTree } from "$lib/server/websearch/markdown/utils/stringify";
import { scrapeUrl } from "$lib/server/websearch/scrape/scrape";
import type { ConfigTool } from "$lib/types/Tool";
import { ObjectId } from "mongodb";
const fetchUrl: ConfigTool = {
_id: new ObjectId("00000000000000000000000B"),
type: "config",
description: "Fetch the contents of a URL",
color: "blue",
icon: "cloud",
displayName: "Fetch URL",
name: "fetchUrl",
endpoint: null,
inputs: [
{
name: "url",
type: "str",
description: "The URL of the webpage to fetch",
paramType: "required",
},
],
outputComponent: null,
outputComponentIdx: null,
showOutput: false,
async *call({ url }) {
const blocks = String(url).split("\n");
const urlStr = blocks[blocks.length - 1];
const { title, markdownTree } = await scrapeUrl(urlStr, Infinity);
return {
outputs: [{ title, text: stringifyMarkdownElementTree(markdownTree) }],
display: false,
};
},
};
export default fetchUrl;
|
0
|
hf_public_repos/chat-ui/src/lib/server/tools
|
hf_public_repos/chat-ui/src/lib/server/tools/web/search.ts
|
import type { ConfigTool } from "$lib/types/Tool";
import { ObjectId } from "mongodb";
import { runWebSearch } from "../../websearch/runWebSearch";
const websearch: ConfigTool = {
_id: new ObjectId("00000000000000000000000A"),
type: "config",
description: "Search the web for answers to the user's query",
color: "blue",
icon: "wikis",
displayName: "Web Search",
name: "websearch",
endpoint: null,
inputs: [
{
name: "query",
type: "str",
description:
"A search query which will be used to fetch the most relevant snippets regarding the user's query",
paramType: "required",
},
],
outputComponent: null,
outputComponentIdx: null,
showOutput: false,
async *call({ query }, { conv, assistant, messages }) {
const webSearchToolResults = yield* runWebSearch(conv, messages, assistant?.rag, String(query));
const webSearchContext = webSearchToolResults?.contextSources
.map(({ context }, idx) => `Source [${idx + 1}]\n${context.trim()}`)
.join("\n\n----------\n\n");
return {
outputs: [
{
websearch:
webSearchContext +
"\n\nWhen answering the question, you must reference the sources you used inline by wrapping the index in brackets like this: [1]. If multiple sources are used, you must reference each one of them without commas like this: [1][2][3].",
},
],
display: false,
};
},
};
export default websearch;
|
0
|
hf_public_repos/chat-ui/src/lib/server
|
hf_public_repos/chat-ui/src/lib/server/textGeneration/tools.ts
|
import { ToolResultStatus, type ToolCall, type Tool, type ToolResult } from "$lib/types/Tool";
import { v4 as uuidV4 } from "uuid";
import { getCallMethod, toolFromConfigs, type BackendToolContext } from "../tools";
import {
MessageToolUpdateType,
MessageUpdateStatus,
MessageUpdateType,
type MessageUpdate,
} from "$lib/types/MessageUpdate";
import type { TextGenerationContext } from "./types";
import directlyAnswer from "../tools/directlyAnswer";
import websearch from "../tools/web/search";
import { z } from "zod";
import { logger } from "../logger";
import { extractJson, toolHasName } from "../tools/utils";
import { mergeAsyncGenerators } from "$lib/utils/mergeAsyncGenerators";
import { MetricsServer } from "../metrics";
import { stringifyError } from "$lib/utils/stringifyError";
import { collections } from "../database";
import { ObjectId } from "mongodb";
import type { Message } from "$lib/types/Message";
import type { Assistant } from "$lib/types/Assistant";
import { assistantHasWebSearch } from "./assistant";
export async function getTools(
toolsPreference: Array<string>,
assistant: Pick<Assistant, "rag" | "tools"> | undefined
): Promise<Tool[]> {
let preferences = toolsPreference;
if (assistant) {
if (assistant?.tools?.length) {
preferences = assistant.tools;
if (assistantHasWebSearch(assistant)) {
preferences.push(websearch._id.toString());
}
} else {
if (assistantHasWebSearch(assistant)) {
return [websearch, directlyAnswer];
}
return [directlyAnswer];
}
}
// filter based on tool preferences, add the tools that are on by default
const activeConfigTools = toolFromConfigs.filter((el) => {
if (el.isLocked && el.isOnByDefault && !assistant) return true;
return preferences?.includes(el._id.toString()) ?? (el.isOnByDefault && !assistant);
});
// find tool where the id is in preferences
const activeCommunityTools = await collections.tools
.find({
_id: { $in: preferences.map((el) => new ObjectId(el)) },
})
.toArray()
.then((el) => el.map((el) => ({ ...el, call: getCallMethod(el) })));
return [...activeConfigTools, ...activeCommunityTools];
}
async function* callTool(
ctx: BackendToolContext,
tools: Tool[],
call: ToolCall
): AsyncGenerator<MessageUpdate, ToolResult | undefined, undefined> {
const uuid = uuidV4();
const tool = tools.find((el) => toolHasName(call.name, el));
if (!tool) {
return { call, status: ToolResultStatus.Error, message: `Could not find tool "${call.name}"` };
}
// Special case for directly_answer tool where we ignore
if (toolHasName(directlyAnswer.name, tool)) return;
const startTime = Date.now();
MetricsServer.getMetrics().tool.toolUseCount.inc({ tool: call.name });
yield {
type: MessageUpdateType.Tool,
subtype: MessageToolUpdateType.Call,
uuid,
call,
};
try {
const toolResult = yield* tool.call(call.parameters, ctx, uuid);
yield {
type: MessageUpdateType.Tool,
subtype: MessageToolUpdateType.Result,
uuid,
result: { ...toolResult, call, status: ToolResultStatus.Success },
};
MetricsServer.getMetrics().tool.toolUseDuration.observe(
{ tool: call.name },
Date.now() - startTime
);
await collections.tools.findOneAndUpdate({ _id: tool._id }, { $inc: { useCount: 1 } });
return { ...toolResult, call, status: ToolResultStatus.Success };
} catch (error) {
MetricsServer.getMetrics().tool.toolUseCountError.inc({ tool: call.name });
logger.error(error, `Failed while running tool ${call.name}. ${stringifyError(error)}`);
yield {
type: MessageUpdateType.Tool,
subtype: MessageToolUpdateType.Error,
uuid,
message:
"An error occurred while calling the tool " + call.name + ": " + stringifyError(error),
};
return {
call,
status: ToolResultStatus.Error,
message:
"An error occurred while calling the tool " + call.name + ": " + stringifyError(error),
};
}
}
export async function* runTools(
ctx: TextGenerationContext,
tools: Tool[],
preprompt?: string
): AsyncGenerator<MessageUpdate, ToolResult[], undefined> {
const { endpoint, conv, messages, assistant, ip, username } = ctx;
const calls: ToolCall[] = [];
const pickToolStartTime = Date.now();
// append a message with the list of all available files
const files = messages.reduce((acc, curr, idx) => {
if (curr.files) {
const prefix = (curr.from === "user" ? "input" : "ouput") + "_" + idx;
acc.push(
...curr.files.map(
(file, fileIdx) => `${prefix}_${fileIdx}.${file?.name?.split(".")?.pop()?.toLowerCase()}`
)
);
}
return acc;
}, [] as string[]);
let formattedMessages = messages.map((message, msgIdx) => {
let content = message.content;
if (message.files && message.files.length > 0) {
content +=
"\n\nAdded files: \n - " +
message.files
.map((file, fileIdx) => {
const prefix = message.from === "user" ? "input" : "output";
const fileName = file.name.split(".").pop()?.toLowerCase();
return `${prefix}_${msgIdx}_${fileIdx}.${fileName}`;
})
.join("\n - ");
}
return {
...message,
content,
} satisfies Message;
});
const fileMsg = {
id: crypto.randomUUID(),
from: "system",
content:
"Here is the list of available filenames that can be used as input for tools. Use the filenames that are in this list. \n The filename structure is as follows : {input for user|output for tool}_{message index in the conversation}_{file index in the list of files}.{file extension} \n - " +
files.join("\n - ") +
"\n\n\n",
} satisfies Message;
// put fileMsg before last if files.length > 0
formattedMessages = files.length
? [...formattedMessages.slice(0, -1), fileMsg, ...formattedMessages.slice(-1)]
: messages;
// do the function calling bits here
for await (const output of await endpoint({
messages: formattedMessages,
preprompt,
generateSettings: assistant?.generateSettings,
tools: tools.map((tool) => ({
...tool,
inputs: tool.inputs.map((input) => ({
...input,
type: input.type === "file" ? "str" : input.type,
})),
})),
conversationId: conv._id,
})) {
// model natively supports tool calls
if (output.token.toolCalls) {
calls.push(...output.token.toolCalls);
continue;
}
// look for a code blocks of ```json and parse them
// if they're valid json, add them to the calls array
if (output.generated_text) {
try {
const rawCalls = await extractJson(output.generated_text);
const newCalls = rawCalls
.filter(isExternalToolCall)
.map((call) => externalToToolCall(call, tools))
.filter((call) => call !== undefined) as ToolCall[];
calls.push(...newCalls);
} catch (e) {
logger.error(e, "Error while parsing tool calls, please retry");
// error parsing the calls
yield {
type: MessageUpdateType.Status,
status: MessageUpdateStatus.Error,
message: "Error while parsing tool calls, please retry",
};
}
}
}
MetricsServer.getMetrics().tool.timeToChooseTools.observe(
{ model: conv.model },
Date.now() - pickToolStartTime
);
const toolContext: BackendToolContext = { conv, messages, preprompt, assistant, ip, username };
const toolResults: (ToolResult | undefined)[] = yield* mergeAsyncGenerators(
calls.map((call) => callTool(toolContext, tools, call))
);
return toolResults.filter((result): result is ToolResult => result !== undefined);
}
const externalToolCall = z.object({
tool_name: z.string(),
parameters: z.record(z.any()),
});
type ExternalToolCall = z.infer<typeof externalToolCall>;
function isExternalToolCall(call: unknown): call is ExternalToolCall {
return externalToolCall.safeParse(call).success;
}
function externalToToolCall(call: ExternalToolCall, tools: Tool[]): ToolCall | undefined {
// Convert - to _ since some models insist on using _ instead of -
const tool = tools.find((tool) => toolHasName(call.tool_name, tool));
if (!tool) {
logger.debug(`Model requested tool that does not exist: "${call.tool_name}". Skipping tool...`);
return;
}
const parametersWithDefaults: Record<string, string> = {};
for (const input of tool.inputs) {
const value = call.parameters[input.name];
// Required so ensure it's there, otherwise return undefined
if (input.paramType === "required") {
if (value === undefined) {
logger.debug(
`Model requested tool "${call.tool_name}" but was missing required parameter "${input.name}". Skipping tool...`
);
return;
}
parametersWithDefaults[input.name] = value;
continue;
}
// Optional so use default if not there
parametersWithDefaults[input.name] = value;
if (input.paramType === "optional") {
parametersWithDefaults[input.name] ??= input.default.toString();
}
}
return {
name: call.tool_name,
parameters: parametersWithDefaults,
};
}
|
0
|
hf_public_repos/chat-ui/src/lib/server
|
hf_public_repos/chat-ui/src/lib/server/textGeneration/assistant.ts
|
import { isURLLocal } from "../isURLLocal";
import { env } from "$env/dynamic/private";
import { collections } from "$lib/server/database";
import type { Assistant } from "$lib/types/Assistant";
import type { ObjectId } from "mongodb";
export async function processPreprompt(preprompt: string) {
const urlRegex = /{{\s?url=(.*?)\s?}}/g;
for (const match of preprompt.matchAll(urlRegex)) {
try {
const url = new URL(match[1]);
if ((await isURLLocal(url)) && env.ENABLE_LOCAL_FETCH !== "true") {
throw new Error("URL couldn't be fetched, it resolved to a local address.");
}
const res = await fetch(url.href);
if (!res.ok) {
throw new Error("URL couldn't be fetched, error " + res.status);
}
const text = await res.text();
preprompt = preprompt.replaceAll(match[0], text);
} catch (e) {
preprompt = preprompt.replaceAll(match[0], (e as Error).message);
}
}
return preprompt;
}
export async function getAssistantById(id?: ObjectId) {
return collections.assistants
.findOne<Pick<Assistant, "rag" | "dynamicPrompt" | "generateSettings" | "tools">>(
{ _id: id },
{ projection: { rag: 1, dynamicPrompt: 1, generateSettings: 1, tools: 1 } }
)
.then((a) => a ?? undefined);
}
export function assistantHasWebSearch(assistant?: Pick<Assistant, "rag"> | null) {
return (
env.ENABLE_ASSISTANTS_RAG === "true" &&
!!assistant?.rag &&
(assistant.rag.allowedLinks.length > 0 ||
assistant.rag.allowedDomains.length > 0 ||
assistant.rag.allowAllDomains)
);
}
export function assistantHasDynamicPrompt(assistant?: Pick<Assistant, "dynamicPrompt">) {
return env.ENABLE_ASSISTANTS_RAG === "true" && Boolean(assistant?.dynamicPrompt);
}
|
0
|
hf_public_repos/chat-ui/src/lib/server
|
hf_public_repos/chat-ui/src/lib/server/textGeneration/generate.ts
|
import type { ToolResult } from "$lib/types/Tool";
import {
MessageReasoningUpdateType,
MessageUpdateType,
type MessageUpdate,
} from "$lib/types/MessageUpdate";
import { AbortedGenerations } from "../abortedGenerations";
import type { TextGenerationContext } from "./types";
import type { EndpointMessage } from "../endpoints/endpoints";
import { generateFromDefaultEndpoint } from "../generateFromDefaultEndpoint";
import { generateSummaryOfReasoning } from "./reasoning";
type GenerateContext = Omit<TextGenerationContext, "messages"> & { messages: EndpointMessage[] };
export async function* generate(
{ model, endpoint, conv, messages, assistant, isContinue, promptedAt }: GenerateContext,
toolResults: ToolResult[],
preprompt?: string
): AsyncIterable<MessageUpdate> {
// reasoning mode is false by default
let reasoning = false;
let reasoningBuffer = "";
let lastReasoningUpdate = new Date();
let status = "";
const startTime = new Date();
if (
model.reasoning &&
(model.reasoning.type === "regex" || model.reasoning.type === "summarize")
) {
// if the model has reasoning in regex or summarize mode, it starts in reasoning mode
// and we extract the answer from the reasoning
reasoning = true;
yield {
type: MessageUpdateType.Reasoning,
subtype: MessageReasoningUpdateType.Status,
status: "Started reasoning...",
};
}
for await (const output of await endpoint({
messages,
preprompt,
continueMessage: isContinue,
generateSettings: assistant?.generateSettings,
toolResults,
isMultimodal: model.multimodal,
conversationId: conv._id,
})) {
// text generation completed
if (output.generated_text) {
let interrupted =
!output.token.special && !model.parameters.stop?.includes(output.token.text);
let text = output.generated_text.trimEnd();
for (const stopToken of model.parameters.stop ?? []) {
if (!text.endsWith(stopToken)) continue;
interrupted = false;
text = text.slice(0, text.length - stopToken.length);
}
let finalAnswer = text;
if (model.reasoning && model.reasoning.type === "regex") {
const regex = new RegExp(model.reasoning.regex);
finalAnswer = regex.exec(reasoningBuffer)?.[1] ?? text;
} else if (model.reasoning && model.reasoning.type === "summarize") {
yield {
type: MessageUpdateType.Reasoning,
subtype: MessageReasoningUpdateType.Status,
status: "Summarizing reasoning...",
};
const summary = yield* generateFromDefaultEndpoint({
messages: [
{
from: "user",
content: `Question: ${
messages[messages.length - 1].content
}\n\nReasoning: ${reasoningBuffer}`,
},
],
preprompt: `Your task is to summarize concisely all your reasoning steps and then give the final answer. Keep it short, one short paragraph at most. If the reasoning steps explicitly include a code solution, make sure to include it in your answer.
If the user is just having a casual conversation that doesn't require explanations, answer directly without explaining your steps, otherwise make sure to summarize step by step, make sure to skip dead-ends in your reasoning and removing excess detail.
Do not use prefixes such as Response: or Answer: when answering to the user.`,
generateSettings: {
max_new_tokens: 1024,
},
});
finalAnswer = summary;
yield {
type: MessageUpdateType.Reasoning,
subtype: MessageReasoningUpdateType.Status,
status: `Done in ${Math.round((new Date().getTime() - startTime.getTime()) / 1000)}s.`,
};
}
yield {
type: MessageUpdateType.FinalAnswer,
text: finalAnswer,
interrupted,
webSources: output.webSources,
};
continue;
}
if (model.reasoning && model.reasoning.type === "tokens") {
if (output.token.text === model.reasoning.beginToken) {
reasoning = true;
reasoningBuffer += output.token.text;
yield {
type: MessageUpdateType.Reasoning,
subtype: MessageReasoningUpdateType.Status,
status: "Started thinking...",
};
} else if (output.token.text === model.reasoning.endToken) {
reasoning = false;
reasoningBuffer += output.token.text;
yield {
type: MessageUpdateType.Reasoning,
subtype: MessageReasoningUpdateType.Status,
status: `Done in ${Math.round((new Date().getTime() - startTime.getTime()) / 1000)}s.`,
};
}
}
// ignore special tokens
if (output.token.special) continue;
// pass down normal token
if (reasoning) {
reasoningBuffer += output.token.text;
// yield status update if it has changed
if (status !== "") {
yield {
type: MessageUpdateType.Reasoning,
subtype: MessageReasoningUpdateType.Status,
status,
};
status = "";
}
// create a new status every 5 seconds
if (new Date().getTime() - lastReasoningUpdate.getTime() > 4000) {
lastReasoningUpdate = new Date();
generateSummaryOfReasoning(reasoningBuffer).then((summary) => {
status = summary;
});
}
yield {
type: MessageUpdateType.Reasoning,
subtype: MessageReasoningUpdateType.Stream,
token: output.token.text,
};
} else {
yield { type: MessageUpdateType.Stream, token: output.token.text };
}
// abort check
const date = AbortedGenerations.getInstance().getList().get(conv._id.toString());
if (date && date > promptedAt) break;
// no output check
if (!output) break;
}
}
|
0
|
hf_public_repos/chat-ui/src/lib/server
|
hf_public_repos/chat-ui/src/lib/server/textGeneration/types.ts
|
import type { ProcessedModel } from "../models";
import type { Endpoint } from "../endpoints/endpoints";
import type { Conversation } from "$lib/types/Conversation";
import type { Message } from "$lib/types/Message";
import type { Assistant } from "$lib/types/Assistant";
export interface TextGenerationContext {
model: ProcessedModel;
endpoint: Endpoint;
conv: Conversation;
messages: Message[];
assistant?: Pick<Assistant, "rag" | "dynamicPrompt" | "generateSettings" | "tools">;
isContinue: boolean;
webSearch: boolean;
toolsPreference: Array<string>;
promptedAt: Date;
ip: string;
username?: string;
}
|
0
|
hf_public_repos/chat-ui/src/lib/server
|
hf_public_repos/chat-ui/src/lib/server/textGeneration/reasoning.ts
|
import { generateFromDefaultEndpoint } from "../generateFromDefaultEndpoint";
import { getReturnFromGenerator } from "$lib/utils/getReturnFromGenerator";
export async function generateSummaryOfReasoning(buffer: string): Promise<string> {
// debug 5s delay
await new Promise((resolve) => setTimeout(resolve, 3000));
const summary = await getReturnFromGenerator(
generateFromDefaultEndpoint({
messages: [
{
from: "user",
content: buffer.slice(-200),
},
],
preprompt: `You are tasked with summarizing the latest reasoning steps. Never describe results of the reasoning, only the process. Remain vague in your summary.
The text might be incomplete, try your best to summarize it in one very short sentence, starting with a gerund and ending with three points.
Example: "Thinking about life...", "Summarizing the results...", "Processing the input..."`,
generateSettings: {
max_new_tokens: 50,
},
})
).then((summary) => {
const parts = summary.split("...");
return parts[0] + "...";
});
return summary;
}
|
0
|
hf_public_repos/chat-ui/src/lib/server
|
hf_public_repos/chat-ui/src/lib/server/textGeneration/title.ts
|
import { env } from "$env/dynamic/private";
import { generateFromDefaultEndpoint } from "$lib/server/generateFromDefaultEndpoint";
import type { EndpointMessage } from "../endpoints/endpoints";
import { logger } from "$lib/server/logger";
import { MessageUpdateType, type MessageUpdate } from "$lib/types/MessageUpdate";
import type { Conversation } from "$lib/types/Conversation";
import { getReturnFromGenerator } from "$lib/utils/getReturnFromGenerator";
export async function* generateTitleForConversation(
conv: Conversation
): AsyncGenerator<MessageUpdate, undefined, undefined> {
try {
const userMessage = conv.messages.find((m) => m.from === "user");
// HACK: detect if the conversation is new
if (conv.title !== "New Chat" || !userMessage) return;
const prompt = userMessage.content;
const title = (await generateTitle(prompt)) ?? "New Chat";
yield {
type: MessageUpdateType.Title,
title,
};
} catch (cause) {
logger.error(Error("Failed whilte generating title for conversation", { cause }));
}
}
export async function generateTitle(prompt: string) {
if (env.LLM_SUMMARIZATION !== "true") {
return prompt.split(/\s+/g).slice(0, 5).join(" ");
}
const messages: Array<EndpointMessage> = [
{
from: "system",
content:
"You are a summarization AI. You'll never answer a user's question directly, but instead summarize the user's request into a single short sentence of four words or less. Always start your answer with an emoji relevant to the summary",
},
{ from: "user", content: "Who is the president of Gabon?" },
{ from: "assistant", content: "🇬🇦 President of Gabon" },
{ from: "user", content: "Who is Julien Chaumond?" },
{ from: "assistant", content: "🧑 Julien Chaumond" },
{ from: "user", content: "what is 1 + 1?" },
{ from: "assistant", content: "🔢 Simple math operation" },
{ from: "user", content: "What are the latest news?" },
{ from: "assistant", content: "📰 Latest news" },
{ from: "user", content: "How to make a great cheesecake?" },
{ from: "assistant", content: "🍰 Cheesecake recipe" },
{ from: "user", content: "what is your favorite movie? do a short answer." },
{ from: "assistant", content: "🎥 Favorite movie" },
{ from: "user", content: "Explain the concept of artificial intelligence in one sentence" },
{ from: "assistant", content: "🤖 AI definition" },
{ from: "user", content: "Draw a cute cat" },
{ from: "assistant", content: "🐱 Cute cat drawing" },
{ from: "user", content: prompt },
];
return await getReturnFromGenerator(
generateFromDefaultEndpoint({
messages,
preprompt:
"You are a summarization AI. Summarize the user's request into a single short sentence of four words or less. Do not try to answer it, only summarize the user's query. Always start your answer with an emoji relevant to the summary",
generateSettings: {
max_new_tokens: 15,
},
})
)
.then((summary) => {
// add an emoji if none is found in the first three characters
if (!/\p{Emoji}/u.test(summary.slice(0, 3))) {
return "💬 " + summary;
}
return summary;
})
.catch((e) => {
logger.error(e);
return null;
});
}
|
0
|
hf_public_repos/chat-ui/src/lib/server
|
hf_public_repos/chat-ui/src/lib/server/textGeneration/index.ts
|
import { runWebSearch } from "$lib/server/websearch/runWebSearch";
import { preprocessMessages } from "../endpoints/preprocessMessages";
import { generateTitleForConversation } from "./title";
import {
assistantHasDynamicPrompt,
assistantHasWebSearch,
getAssistantById,
processPreprompt,
} from "./assistant";
import { getTools, runTools } from "./tools";
import type { WebSearch } from "$lib/types/WebSearch";
import {
type MessageUpdate,
MessageUpdateType,
MessageUpdateStatus,
} from "$lib/types/MessageUpdate";
import { generate } from "./generate";
import { mergeAsyncGenerators } from "$lib/utils/mergeAsyncGenerators";
import type { TextGenerationContext } from "./types";
import type { ToolResult } from "$lib/types/Tool";
import { toolHasName } from "../tools/utils";
async function* keepAlive(done: AbortSignal): AsyncGenerator<MessageUpdate, undefined, undefined> {
while (!done.aborted) {
yield {
type: MessageUpdateType.Status,
status: MessageUpdateStatus.KeepAlive,
};
await new Promise((resolve) => setTimeout(resolve, 100));
}
}
export async function* textGeneration(ctx: TextGenerationContext) {
const done = new AbortController();
const titleGen = generateTitleForConversation(ctx.conv);
const textGen = textGenerationWithoutTitle(ctx, done);
const keepAliveGen = keepAlive(done.signal);
// keep alive until textGen is done
yield* mergeAsyncGenerators([titleGen, textGen, keepAliveGen]);
}
async function* textGenerationWithoutTitle(
ctx: TextGenerationContext,
done: AbortController
): AsyncGenerator<MessageUpdate, undefined, undefined> {
yield {
type: MessageUpdateType.Status,
status: MessageUpdateStatus.Started,
};
ctx.assistant ??= await getAssistantById(ctx.conv.assistantId);
const { model, conv, messages, assistant, isContinue, webSearch, toolsPreference } = ctx;
const convId = conv._id;
let webSearchResult: WebSearch | undefined;
// run websearch if:
// - it's not continuing a previous message
// - AND the model doesn't support tools and websearch is selected
// - OR the assistant has websearch enabled (no tools for assistants for now)
if (!isContinue && ((webSearch && !conv.assistantId) || assistantHasWebSearch(assistant))) {
webSearchResult = yield* runWebSearch(conv, messages, assistant?.rag);
}
let preprompt = conv.preprompt;
if (assistantHasDynamicPrompt(assistant) && preprompt) {
preprompt = await processPreprompt(preprompt);
if (messages[0].from === "system") messages[0].content = preprompt;
}
let toolResults: ToolResult[] = [];
if (model.tools) {
const tools = await getTools(toolsPreference, ctx.assistant);
const toolCallsRequired = tools.some((tool) => !toolHasName("directly_answer", tool));
if (toolCallsRequired) toolResults = yield* runTools(ctx, tools, preprompt);
}
const processedMessages = await preprocessMessages(messages, webSearchResult, convId);
yield* generate({ ...ctx, messages: processedMessages }, toolResults, preprompt);
done.abort();
}
|
0
|
hf_public_repos/chat-ui/src/lib/server
|
hf_public_repos/chat-ui/src/lib/server/endpoints/preprocessMessages.ts
|
import type { Message } from "$lib/types/Message";
import { format } from "date-fns";
import type { EndpointMessage } from "./endpoints";
import { downloadFile } from "../files/downloadFile";
import type { ObjectId } from "mongodb";
export async function preprocessMessages(
messages: Message[],
webSearch: Message["webSearch"],
convId: ObjectId
): Promise<EndpointMessage[]> {
return Promise.resolve(messages)
.then((msgs) => addWebSearchContext(msgs, webSearch))
.then((msgs) => downloadFiles(msgs, convId))
.then((msgs) => injectClipboardFiles(msgs));
}
function addWebSearchContext(messages: Message[], webSearch: Message["webSearch"]) {
const webSearchContext = webSearch?.contextSources
.map(({ context }, idx) => `Source [${idx + 1}]\n${context.trim()}`)
.join("\n\n----------\n\n");
// No web search context available, skip
if (!webSearch || !webSearchContext?.trim()) return messages;
// No messages available, skip
if (messages.length === 0) return messages;
const lastQuestion = messages.findLast((el) => el.from === "user")?.content ?? "";
const previousQuestions = messages
.filter((el) => el.from === "user")
.slice(0, -1)
.map((el) => el.content);
const currentDate = format(new Date(), "MMMM d, yyyy");
const finalMessage = {
...messages[messages.length - 1],
content: `I searched the web using the query: ${webSearch.searchQuery}.
Today is ${currentDate} and here are the results.
When answering the question, you must reference the sources you used inline by wrapping the index in brackets like this: [1]. If multiple sources are used, you must reference each one of them without commas like this: [1][2][3].
=====================
${webSearchContext}
=====================
${previousQuestions.length > 0 ? `Previous questions: \n- ${previousQuestions.join("\n- ")}` : ""}
Answer the question: ${lastQuestion}`,
};
return [...messages.slice(0, -1), finalMessage];
}
async function downloadFiles(messages: Message[], convId: ObjectId): Promise<EndpointMessage[]> {
return Promise.all(
messages.map<Promise<EndpointMessage>>((message) =>
Promise.all((message.files ?? []).map((file) => downloadFile(file.value, convId))).then(
(files) => ({ ...message, files })
)
)
);
}
async function injectClipboardFiles(messages: EndpointMessage[]) {
return Promise.all(
messages.map((message) => {
const plaintextFiles = message.files
?.filter((file) => file.mime === "application/vnd.chatui.clipboard")
.map((file) => Buffer.from(file.value, "base64").toString("utf-8"));
if (!plaintextFiles || plaintextFiles.length === 0) return message;
return {
...message,
content: `${plaintextFiles.join("\n\n")}\n\n${message.content}`,
files: message.files?.filter((file) => file.mime !== "application/vnd.chatui.clipboard"),
};
})
);
}
|
0
|
hf_public_repos/chat-ui/src/lib/server
|
hf_public_repos/chat-ui/src/lib/server/endpoints/document.ts
|
import type { MessageFile } from "$lib/types/Message";
import { z } from "zod";
export interface FileProcessorOptions<TMimeType extends string = string> {
supportedMimeTypes: TMimeType[];
maxSizeInMB: number;
}
export type ImageProcessor<TMimeType extends string = string> = (file: MessageFile) => Promise<{
file: Buffer;
mime: TMimeType;
}>;
export const createDocumentProcessorOptionsValidator = <TMimeType extends string = string>(
defaults: FileProcessorOptions<TMimeType>
) => {
return z
.object({
supportedMimeTypes: z
.array(
z.enum<string, [TMimeType, ...TMimeType[]]>([
defaults.supportedMimeTypes[0],
...defaults.supportedMimeTypes.slice(1),
])
)
.default(defaults.supportedMimeTypes),
maxSizeInMB: z.number().positive().default(defaults.maxSizeInMB),
})
.default(defaults);
};
export type DocumentProcessor<TMimeType extends string = string> = (file: MessageFile) => {
file: Buffer;
mime: TMimeType;
};
export type AsyncDocumentProcessor<TMimeType extends string = string> = (
file: MessageFile
) => Promise<{
file: Buffer;
mime: TMimeType;
}>;
export function makeDocumentProcessor<TMimeType extends string = string>(
options: FileProcessorOptions<TMimeType>
): AsyncDocumentProcessor<TMimeType> {
return async (file) => {
const { supportedMimeTypes, maxSizeInMB } = options;
const { mime, value } = file;
const buffer = Buffer.from(value, "base64");
const tooLargeInBytes = buffer.byteLength > maxSizeInMB * 1000 * 1000;
if (tooLargeInBytes) {
throw Error("Document is too large");
}
const outputMime = validateMimeType(supportedMimeTypes, mime);
return { file: buffer, mime: outputMime };
};
}
const validateMimeType = <T extends readonly string[]>(
supportedMimes: T,
mime: string
): T[number] => {
if (!supportedMimes.includes(mime)) {
const supportedMimesStr = supportedMimes.join(", ");
throw Error(`Mimetype "${mime}" not found in supported mimes: ${supportedMimesStr}`);
}
return mime;
};
|
0
|
hf_public_repos/chat-ui/src/lib/server
|
hf_public_repos/chat-ui/src/lib/server/endpoints/endpoints.ts
|
import type { Conversation } from "$lib/types/Conversation";
import type { Message } from "$lib/types/Message";
import type { TextGenerationStreamOutput, TextGenerationStreamToken } from "@huggingface/inference";
import { endpointTgi, endpointTgiParametersSchema } from "./tgi/endpointTgi";
import { z } from "zod";
import endpointAws, { endpointAwsParametersSchema } from "./aws/endpointAws";
import { endpointOAIParametersSchema, endpointOai } from "./openai/endpointOai";
import endpointLlamacpp, { endpointLlamacppParametersSchema } from "./llamacpp/endpointLlamacpp";
import endpointOllama, { endpointOllamaParametersSchema } from "./ollama/endpointOllama";
import endpointVertex, { endpointVertexParametersSchema } from "./google/endpointVertex";
import endpointGenAI, { endpointGenAIParametersSchema } from "./google/endpointGenAI";
import { endpointBedrock, endpointBedrockParametersSchema } from "./aws/endpointBedrock";
import {
endpointAnthropic,
endpointAnthropicParametersSchema,
} from "./anthropic/endpointAnthropic";
import {
endpointAnthropicVertex,
endpointAnthropicVertexParametersSchema,
} from "./anthropic/endpointAnthropicVertex";
import type { Model } from "$lib/types/Model";
import endpointCloudflare, {
endpointCloudflareParametersSchema,
} from "./cloudflare/endpointCloudflare";
import { endpointCohere, endpointCohereParametersSchema } from "./cohere/endpointCohere";
import endpointLangserve, {
endpointLangserveParametersSchema,
} from "./langserve/endpointLangserve";
import type { Tool, ToolCall, ToolResult } from "$lib/types/Tool";
import type { ObjectId } from "mongodb";
export type EndpointMessage = Omit<Message, "id">;
// parameters passed when generating text
export interface EndpointParameters {
messages: EndpointMessage[];
preprompt?: Conversation["preprompt"];
continueMessage?: boolean; // used to signal that the last message will be extended
generateSettings?: Partial<Model["parameters"]>;
tools?: Tool[];
toolResults?: ToolResult[];
isMultimodal?: boolean;
conversationId?: ObjectId;
}
interface CommonEndpoint {
weight: number;
}
export type TextGenerationStreamOutputWithToolsAndWebSources = TextGenerationStreamOutput & {
token: TextGenerationStreamToken & { toolCalls?: ToolCall[] };
webSources?: { uri: string; title: string }[];
};
// type signature for the endpoint
export type Endpoint = (
params: EndpointParameters
) => Promise<AsyncGenerator<TextGenerationStreamOutputWithToolsAndWebSources, void, void>>;
// generator function that takes in parameters for defining the endpoint and return the endpoint
export type EndpointGenerator<T extends CommonEndpoint> = (parameters: T) => Endpoint;
// list of all endpoint generators
export const endpoints = {
tgi: endpointTgi,
anthropic: endpointAnthropic,
anthropicvertex: endpointAnthropicVertex,
bedrock: endpointBedrock,
aws: endpointAws,
openai: endpointOai,
llamacpp: endpointLlamacpp,
ollama: endpointOllama,
vertex: endpointVertex,
genai: endpointGenAI,
cloudflare: endpointCloudflare,
cohere: endpointCohere,
langserve: endpointLangserve,
};
export const endpointSchema = z.discriminatedUnion("type", [
endpointAnthropicParametersSchema,
endpointAnthropicVertexParametersSchema,
endpointAwsParametersSchema,
endpointBedrockParametersSchema,
endpointOAIParametersSchema,
endpointTgiParametersSchema,
endpointLlamacppParametersSchema,
endpointOllamaParametersSchema,
endpointVertexParametersSchema,
endpointGenAIParametersSchema,
endpointCloudflareParametersSchema,
endpointCohereParametersSchema,
endpointLangserveParametersSchema,
]);
export default endpoints;
|
0
|
hf_public_repos/chat-ui/src/lib/server
|
hf_public_repos/chat-ui/src/lib/server/endpoints/images.ts
|
import type { Sharp } from "sharp";
import sharp from "sharp";
import type { MessageFile } from "$lib/types/Message";
import { z, type util } from "zod";
export interface ImageProcessorOptions<TMimeType extends string = string> {
supportedMimeTypes: TMimeType[];
preferredMimeType: TMimeType;
maxSizeInMB: number;
maxWidth: number;
maxHeight: number;
}
export type ImageProcessor<TMimeType extends string = string> = (file: MessageFile) => Promise<{
image: Buffer;
mime: TMimeType;
}>;
export function createImageProcessorOptionsValidator<TMimeType extends string = string>(
defaults: ImageProcessorOptions<TMimeType>
) {
return z
.object({
supportedMimeTypes: z
.array(
z.enum<string, [TMimeType, ...TMimeType[]]>([
defaults.supportedMimeTypes[0],
...defaults.supportedMimeTypes.slice(1),
])
)
.default(defaults.supportedMimeTypes),
preferredMimeType: z
.enum([defaults.supportedMimeTypes[0], ...defaults.supportedMimeTypes.slice(1)])
.default(defaults.preferredMimeType as util.noUndefined<TMimeType>),
maxSizeInMB: z.number().positive().default(defaults.maxSizeInMB),
maxWidth: z.number().int().positive().default(defaults.maxWidth),
maxHeight: z.number().int().positive().default(defaults.maxHeight),
})
.default(defaults);
}
export function makeImageProcessor<TMimeType extends string = string>(
options: ImageProcessorOptions<TMimeType>
): ImageProcessor<TMimeType> {
return async (file) => {
const { supportedMimeTypes, preferredMimeType, maxSizeInMB, maxWidth, maxHeight } = options;
const { mime, value } = file;
const buffer = Buffer.from(value, "base64");
let sharpInst = sharp(buffer);
const metadata = await sharpInst.metadata();
if (!metadata) throw Error("Failed to read image metadata");
const { width, height } = metadata;
if (width === undefined || height === undefined) throw Error("Failed to read image size");
const tooLargeInSize = width > maxWidth || height > maxHeight;
const tooLargeInBytes = buffer.byteLength > maxSizeInMB * 1000 * 1000;
const outputMime = chooseMimeType(supportedMimeTypes, preferredMimeType, mime, {
preferSizeReduction: tooLargeInBytes,
});
// Resize if necessary
if (tooLargeInSize || tooLargeInBytes) {
const size = chooseImageSize({
mime: outputMime,
width,
height,
maxWidth,
maxHeight,
maxSizeInMB,
});
if (size.width !== width || size.height !== height) {
sharpInst = resizeImage(sharpInst, size.width, size.height);
}
}
// Convert format if necessary
// We always want to convert the image when the file was too large in bytes
// so we can guarantee that ideal options are used, which are expected when
// choosing the image size
if (outputMime !== mime || tooLargeInBytes) {
sharpInst = convertImage(sharpInst, outputMime);
}
const processedImage = await sharpInst.toBuffer();
return { image: processedImage, mime: outputMime };
};
}
const outputFormats = ["png", "jpeg", "webp", "avif", "tiff", "gif"] as const;
type OutputImgFormat = (typeof outputFormats)[number];
const isOutputFormat = (format: string): format is (typeof outputFormats)[number] =>
outputFormats.includes(format as OutputImgFormat);
export function convertImage(sharpInst: Sharp, outputMime: string): Sharp {
const [type, format] = outputMime.split("/");
if (type !== "image") throw Error(`Requested non-image mime type: ${outputMime}`);
if (!isOutputFormat(format)) {
throw Error(`Requested to convert to an unsupported format: ${format}`);
}
return sharpInst[format]();
}
// heic/heif requires proprietary license
// TODO: blocking heif may be incorrect considering it also supports av1, so we should instead
// detect the compression method used via sharp().metadata().compression
// TODO: consider what to do about animated formats: apng, gif, animated webp, ...
const blocklistedMimes = ["image/heic", "image/heif"];
/** Sorted from largest to smallest */
const mimesBySizeDesc = [
"image/png",
"image/tiff",
"image/gif",
"image/jpeg",
"image/webp",
"image/avif",
];
/**
* Defaults to preferred format or uses existing mime if supported
* When preferSizeReduction is true, it will choose the smallest format that is supported
**/
function chooseMimeType<T extends readonly string[]>(
supportedMimes: T,
preferredMime: string,
mime: string,
{ preferSizeReduction }: { preferSizeReduction: boolean }
): T[number] {
if (!supportedMimes.includes(preferredMime)) {
const supportedMimesStr = supportedMimes.join(", ");
throw Error(
`Preferred format "${preferredMime}" not found in supported mimes: ${supportedMimesStr}`
);
}
const [type] = mime.split("/");
if (type !== "image") throw Error(`Received non-image mime type: ${mime}`);
if (supportedMimes.includes(mime) && !preferSizeReduction) return mime;
if (blocklistedMimes.includes(mime)) throw Error(`Received blocklisted mime type: ${mime}`);
const smallestMime = mimesBySizeDesc.findLast((m) => supportedMimes.includes(m));
return smallestMime ?? preferredMime;
}
interface ImageSizeOptions {
mime: string;
width: number;
height: number;
maxWidth: number;
maxHeight: number;
maxSizeInMB: number;
}
/** Resizes the image to fit within the specified size in MB by guessing the output size */
export function chooseImageSize({
mime,
width,
height,
maxWidth,
maxHeight,
maxSizeInMB,
}: ImageSizeOptions): { width: number; height: number } {
const biggestDiscrepency = Math.max(1, width / maxWidth, height / maxHeight);
let selectedWidth = Math.ceil(width / biggestDiscrepency);
let selectedHeight = Math.ceil(height / biggestDiscrepency);
do {
const estimatedSize = estimateImageSizeInBytes(mime, selectedWidth, selectedHeight);
if (estimatedSize < maxSizeInMB * 1024 * 1024) {
return { width: selectedWidth, height: selectedHeight };
}
selectedWidth = Math.floor(selectedWidth / 1.1);
selectedHeight = Math.floor(selectedHeight / 1.1);
} while (selectedWidth > 1 && selectedHeight > 1);
throw Error(`Failed to resize image to fit within ${maxSizeInMB}MB`);
}
const mimeToCompressionRatio: Record<string, number> = {
"image/png": 1 / 2,
"image/jpeg": 1 / 10,
"image/webp": 1 / 4,
"image/avif": 1 / 5,
"image/tiff": 1,
"image/gif": 1 / 5,
};
/**
* Guesses the side of an image in MB based on its format and dimensions
* Should guess the worst case
**/
function estimateImageSizeInBytes(mime: string, width: number, height: number): number {
const compressionRatio = mimeToCompressionRatio[mime];
if (!compressionRatio) throw Error(`Unsupported image format: ${mime}`);
const bitsPerPixel = 32; // Assuming 32-bit color depth for 8-bit R G B A
const bytesPerPixel = bitsPerPixel / 8;
const uncompressedSize = width * height * bytesPerPixel;
return uncompressedSize * compressionRatio;
}
export function resizeImage(sharpInst: Sharp, maxWidth: number, maxHeight: number): Sharp {
return sharpInst.resize({ width: maxWidth, height: maxHeight, fit: "inside" });
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/endpoints
|
hf_public_repos/chat-ui/src/lib/server/endpoints/anthropic/endpointAnthropicVertex.ts
|
import { z } from "zod";
import type { Endpoint } from "../endpoints";
import type { TextGenerationStreamOutput } from "@huggingface/inference";
import { createImageProcessorOptionsValidator } from "../images";
import { endpointMessagesToAnthropicMessages } from "./utils";
import type { MessageParam } from "@anthropic-ai/sdk/resources/messages.mjs";
export const endpointAnthropicVertexParametersSchema = z.object({
weight: z.number().int().positive().default(1),
model: z.any(),
type: z.literal("anthropic-vertex"),
region: z.string().default("us-central1"),
projectId: z.string(),
defaultHeaders: z.record(z.string()).optional(),
defaultQuery: z.record(z.string()).optional(),
multimodal: z
.object({
image: createImageProcessorOptionsValidator({
supportedMimeTypes: ["image/png", "image/jpeg", "image/webp"],
preferredMimeType: "image/webp",
// The 4 / 3 compensates for the 33% increase in size when converting to base64
maxSizeInMB: (5 / 4) * 3,
maxWidth: 4096,
maxHeight: 4096,
}),
})
.default({}),
});
export async function endpointAnthropicVertex(
input: z.input<typeof endpointAnthropicVertexParametersSchema>
): Promise<Endpoint> {
const { region, projectId, model, defaultHeaders, defaultQuery, multimodal } =
endpointAnthropicVertexParametersSchema.parse(input);
let AnthropicVertex;
try {
AnthropicVertex = (await import("@anthropic-ai/vertex-sdk")).AnthropicVertex;
} catch (e) {
throw new Error("Failed to import @anthropic-ai/vertex-sdk", { cause: e });
}
const anthropic = new AnthropicVertex({
baseURL: `https://${region}-aiplatform.googleapis.com/v1`,
region,
projectId,
defaultHeaders,
defaultQuery,
});
return async ({ messages, preprompt }) => {
let system = preprompt;
if (messages?.[0]?.from === "system") {
system = messages[0].content;
}
let tokenId = 0;
return (async function* () {
const stream = anthropic.messages.stream({
model: model.id ?? model.name,
messages: (await endpointMessagesToAnthropicMessages(
messages,
multimodal
)) as MessageParam[],
max_tokens: model.parameters?.max_new_tokens,
temperature: model.parameters?.temperature,
top_p: model.parameters?.top_p,
top_k: model.parameters?.top_k,
stop_sequences: model.parameters?.stop,
system,
});
while (true) {
const result = await Promise.race([stream.emitted("text"), stream.emitted("end")]);
// Stream end
if (result === undefined) {
yield {
token: {
id: tokenId++,
text: "",
logprob: 0,
special: true,
},
generated_text: await stream.finalText(),
details: null,
} satisfies TextGenerationStreamOutput;
return;
}
// Text delta
yield {
token: {
id: tokenId++,
text: result as unknown as string,
special: false,
logprob: 0,
},
generated_text: null,
details: null,
} satisfies TextGenerationStreamOutput;
}
})();
};
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/endpoints
|
hf_public_repos/chat-ui/src/lib/server/endpoints/anthropic/utils.ts
|
import { makeImageProcessor, type ImageProcessorOptions } from "../images";
import { makeDocumentProcessor, type FileProcessorOptions } from "../document";
import type { EndpointMessage } from "../endpoints";
import type { MessageFile } from "$lib/types/Message";
import type {
BetaImageBlockParam,
BetaMessageParam,
BetaBase64PDFBlock,
} from "@anthropic-ai/sdk/resources/beta/messages/messages.mjs";
export async function fileToImageBlock(
file: MessageFile,
opts: ImageProcessorOptions<"image/png" | "image/jpeg" | "image/webp">
): Promise<BetaImageBlockParam> {
const processor = makeImageProcessor(opts);
const { image, mime } = await processor(file);
return {
type: "image",
source: {
type: "base64",
media_type: mime,
data: image.toString("base64"),
},
};
}
export async function fileToDocumentBlock(
file: MessageFile,
opts: FileProcessorOptions<"application/pdf">
): Promise<BetaBase64PDFBlock> {
const processor = makeDocumentProcessor(opts);
const { file: document, mime } = await processor(file);
return {
type: "document",
source: {
type: "base64",
media_type: mime,
data: document.toString("base64"),
},
};
}
type NonSystemMessage = EndpointMessage & { from: "user" | "assistant" };
export async function endpointMessagesToAnthropicMessages(
messages: EndpointMessage[],
multimodal: {
image: ImageProcessorOptions<"image/png" | "image/jpeg" | "image/webp">;
document?: FileProcessorOptions<"application/pdf">;
}
): Promise<BetaMessageParam[]> {
return await Promise.all(
messages
.filter((message): message is NonSystemMessage => message.from !== "system")
.map<Promise<BetaMessageParam>>(async (message) => {
return {
role: message.from,
content: [
...(await Promise.all(
(message.files ?? []).map(async (file) => {
if (file.mime.startsWith("image/")) {
return fileToImageBlock(file, multimodal.image);
} else if (file.mime === "application/pdf" && multimodal.document) {
return fileToDocumentBlock(file, multimodal.document);
} else {
throw new Error(`Unsupported file type: ${file.mime}`);
}
})
)),
{ type: "text", text: message.content },
],
};
})
);
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/endpoints
|
hf_public_repos/chat-ui/src/lib/server/endpoints/anthropic/endpointAnthropic.ts
|
import { z } from "zod";
import type { Endpoint } from "../endpoints";
import { env } from "$env/dynamic/private";
import type { TextGenerationStreamOutput } from "@huggingface/inference";
import { createImageProcessorOptionsValidator } from "../images";
import { endpointMessagesToAnthropicMessages } from "./utils";
import { createDocumentProcessorOptionsValidator } from "../document";
import type { MessageParam } from "@anthropic-ai/sdk/resources/messages.mjs";
export const endpointAnthropicParametersSchema = z.object({
weight: z.number().int().positive().default(1),
model: z.any(),
type: z.literal("anthropic"),
baseURL: z.string().url().default("https://api.anthropic.com"),
apiKey: z.string().default(env.ANTHROPIC_API_KEY ?? "sk-"),
defaultHeaders: z.record(z.string()).optional(),
defaultQuery: z.record(z.string()).optional(),
multimodal: z
.object({
image: createImageProcessorOptionsValidator({
supportedMimeTypes: ["image/png", "image/jpeg", "image/webp"],
preferredMimeType: "image/webp",
// The 4 / 3 compensates for the 33% increase in size when converting to base64
maxSizeInMB: (5 / 4) * 3,
maxWidth: 4096,
maxHeight: 4096,
}),
document: createDocumentProcessorOptionsValidator({
supportedMimeTypes: ["application/pdf"],
maxSizeInMB: 32,
}),
})
.default({}),
});
export async function endpointAnthropic(
input: z.input<typeof endpointAnthropicParametersSchema>
): Promise<Endpoint> {
const { baseURL, apiKey, model, defaultHeaders, defaultQuery, multimodal } =
endpointAnthropicParametersSchema.parse(input);
let Anthropic;
try {
Anthropic = (await import("@anthropic-ai/sdk")).default;
} catch (e) {
throw new Error("Failed to import @anthropic-ai/sdk", { cause: e });
}
const anthropic = new Anthropic({
apiKey,
baseURL,
defaultHeaders,
defaultQuery,
});
return async ({ messages, preprompt, generateSettings }) => {
let system = preprompt;
if (messages?.[0]?.from === "system") {
system = messages[0].content;
}
let tokenId = 0;
const parameters = { ...model.parameters, ...generateSettings };
return (async function* () {
const stream = anthropic.messages.stream({
model: model.id ?? model.name,
messages: (await endpointMessagesToAnthropicMessages(
messages,
multimodal
)) as MessageParam[],
max_tokens: parameters?.max_new_tokens,
temperature: parameters?.temperature,
top_p: parameters?.top_p,
top_k: parameters?.top_k,
stop_sequences: parameters?.stop,
system,
});
while (true) {
const result = await Promise.race([stream.emitted("text"), stream.emitted("end")]);
// Stream end
if (result === undefined) {
yield {
token: {
id: tokenId++,
text: "",
logprob: 0,
special: true,
},
generated_text: await stream.finalText(),
details: null,
} satisfies TextGenerationStreamOutput;
return;
}
// Text delta
yield {
token: {
id: tokenId++,
text: result as unknown as string,
special: false,
logprob: 0,
},
generated_text: null,
details: null,
} satisfies TextGenerationStreamOutput;
}
})();
};
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/endpoints
|
hf_public_repos/chat-ui/src/lib/server/endpoints/aws/endpointBedrock.ts
|
import { z } from "zod";
import type { Endpoint } from "../endpoints";
import type { TextGenerationStreamOutput } from "@huggingface/inference";
import { createImageProcessorOptionsValidator, makeImageProcessor } from "../images";
import type { EndpointMessage } from "../endpoints";
import type { MessageFile } from "$lib/types/Message";
export const endpointBedrockParametersSchema = z.object({
weight: z.number().int().positive().default(1),
type: z.literal("bedrock"),
region: z.string().default("us-east-1"),
model: z.any(),
anthropicVersion: z.string().default("bedrock-2023-05-31"),
multimodal: z
.object({
image: createImageProcessorOptionsValidator({
supportedMimeTypes: [
"image/png",
"image/jpeg",
"image/webp",
"image/avif",
"image/tiff",
"image/gif",
],
preferredMimeType: "image/webp",
maxSizeInMB: Infinity,
maxWidth: 4096,
maxHeight: 4096,
}),
})
.default({}),
});
export async function endpointBedrock(
input: z.input<typeof endpointBedrockParametersSchema>
): Promise<Endpoint> {
const { region, model, anthropicVersion, multimodal } =
endpointBedrockParametersSchema.parse(input);
let BedrockRuntimeClient, InvokeModelWithResponseStreamCommand;
try {
({ BedrockRuntimeClient, InvokeModelWithResponseStreamCommand } = await import(
"@aws-sdk/client-bedrock-runtime"
));
} catch (error) {
throw new Error("Failed to import @aws-sdk/client-bedrock-runtime. Make sure it's installed.");
}
const client = new BedrockRuntimeClient({
region,
});
const imageProcessor = makeImageProcessor(multimodal.image);
return async ({ messages, preprompt, generateSettings }) => {
let system = preprompt;
// Use the first message as the system prompt if it's of type "system"
if (messages?.[0]?.from === "system") {
system = messages[0].content;
messages = messages.slice(1); // Remove the first system message from the array
}
const formattedMessages = await prepareMessages(messages, imageProcessor);
let tokenId = 0;
const parameters = { ...model.parameters, ...generateSettings };
return (async function* () {
const command = new InvokeModelWithResponseStreamCommand({
body: Buffer.from(
JSON.stringify({
anthropic_version: anthropicVersion,
max_tokens: parameters.max_new_tokens ? parameters.max_new_tokens : 4096,
messages: formattedMessages,
system,
}),
"utf-8"
),
contentType: "application/json",
accept: "application/json",
modelId: model.id,
trace: "DISABLED",
});
const response = await client.send(command);
let text = "";
for await (const item of response.body ?? []) {
const chunk = JSON.parse(new TextDecoder().decode(item.chunk?.bytes));
const chunk_type = chunk.type;
if (chunk_type === "content_block_delta") {
text += chunk.delta.text;
yield {
token: {
id: tokenId++,
text: chunk.delta.text,
logprob: 0,
special: false,
},
generated_text: null,
details: null,
} satisfies TextGenerationStreamOutput;
} else if (chunk_type === "message_stop") {
yield {
token: {
id: tokenId++,
text: "",
logprob: 0,
special: true,
},
generated_text: text,
details: null,
} satisfies TextGenerationStreamOutput;
}
}
})();
};
}
// Prepare the messages excluding system prompts
async function prepareMessages(
messages: EndpointMessage[],
imageProcessor: ReturnType<typeof makeImageProcessor>
) {
const formattedMessages = [];
for (const message of messages) {
const content = [];
if (message.files?.length) {
content.push(...(await prepareFiles(imageProcessor, message.files)));
}
content.push({ type: "text", text: message.content });
const lastMessage = formattedMessages[formattedMessages.length - 1];
if (lastMessage && lastMessage.role === message.from) {
// If the last message has the same role, merge the content
lastMessage.content.push(...content);
} else {
formattedMessages.push({ role: message.from, content });
}
}
return formattedMessages;
}
// Process files and convert them to base64 encoded strings
async function prepareFiles(
imageProcessor: ReturnType<typeof makeImageProcessor>,
files: MessageFile[]
) {
const processedFiles = await Promise.all(files.map(imageProcessor));
return processedFiles.map((file) => ({
type: "image",
source: { type: "base64", media_type: "image/jpeg", data: file.image.toString("base64") },
}));
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/endpoints
|
hf_public_repos/chat-ui/src/lib/server/endpoints/aws/endpointAws.ts
|
import { buildPrompt } from "$lib/buildPrompt";
import { textGenerationStream } from "@huggingface/inference";
import { z } from "zod";
import type { Endpoint } from "../endpoints";
export const endpointAwsParametersSchema = z.object({
weight: z.number().int().positive().default(1),
model: z.any(),
type: z.literal("aws"),
url: z.string().url(),
accessKey: z
.string({
description:
"An AWS Access Key ID. If not provided, the default AWS identity resolution will be used",
})
.min(1)
.optional(),
secretKey: z
.string({
description:
"An AWS Access Key Secret. If not provided, the default AWS identity resolution will be used",
})
.min(1)
.optional(),
sessionToken: z.string().optional(),
service: z.union([z.literal("sagemaker"), z.literal("lambda")]).default("sagemaker"),
region: z.string().optional(),
});
export async function endpointAws(
input: z.input<typeof endpointAwsParametersSchema>
): Promise<Endpoint> {
let createSignedFetcher;
try {
createSignedFetcher = (await import("aws-sigv4-fetch")).createSignedFetcher;
} catch (e) {
throw new Error("Failed to import aws-sigv4-fetch");
}
const { url, accessKey, secretKey, sessionToken, model, region, service } =
endpointAwsParametersSchema.parse(input);
const signedFetch = createSignedFetcher({
service,
region,
credentials:
accessKey && secretKey
? { accessKeyId: accessKey, secretAccessKey: secretKey, sessionToken }
: undefined,
});
return async ({ messages, preprompt, continueMessage, generateSettings }) => {
const prompt = await buildPrompt({
messages,
continueMessage,
preprompt,
model,
});
return textGenerationStream(
{
parameters: { ...model.parameters, ...generateSettings, return_full_text: false },
model: url,
inputs: prompt,
},
{
use_cache: false,
fetch: signedFetch,
}
);
};
}
export default endpointAws;
|
0
|
hf_public_repos/chat-ui/src/lib/server/endpoints
|
hf_public_repos/chat-ui/src/lib/server/endpoints/openai/endpointOai.ts
|
import { z } from "zod";
import { openAICompletionToTextGenerationStream } from "./openAICompletionToTextGenerationStream";
import { openAIChatToTextGenerationStream } from "./openAIChatToTextGenerationStream";
import type { CompletionCreateParamsStreaming } from "openai/resources/completions";
import type {
ChatCompletionCreateParamsStreaming,
ChatCompletionTool,
} from "openai/resources/chat/completions";
import type { FunctionDefinition, FunctionParameters } from "openai/resources/shared";
import { buildPrompt } from "$lib/buildPrompt";
import { env } from "$env/dynamic/private";
import type { Endpoint } from "../endpoints";
import type OpenAI from "openai";
import { createImageProcessorOptionsValidator, makeImageProcessor } from "../images";
import type { MessageFile } from "$lib/types/Message";
import { type Tool } from "$lib/types/Tool";
import type { EndpointMessage } from "../endpoints";
import { v4 as uuidv4 } from "uuid";
function createChatCompletionToolsArray(tools: Tool[] | undefined): ChatCompletionTool[] {
const toolChoices = [] as ChatCompletionTool[];
if (tools === undefined) {
return toolChoices;
}
for (const t of tools) {
const requiredProperties = [] as string[];
const properties = {} as Record<string, unknown>;
for (const idx in t.inputs) {
const parameterDefinition = t.inputs[idx];
const parameter = {} as Record<string, unknown>;
switch (parameterDefinition.type) {
case "str":
parameter.type = "string";
break;
case "float":
case "int":
parameter.type = "number";
break;
case "bool":
parameter.type = "boolean";
break;
case "file":
throw new Error("File type's currently not supported");
default:
throw new Error(`Unknown tool IO type: ${t}`);
}
if ("description" in parameterDefinition) {
parameter.description = parameterDefinition.description;
}
if (parameterDefinition.paramType == "required") {
requiredProperties.push(t.inputs[idx].name);
}
properties[t.inputs[idx].name] = parameter;
}
const functionParameters: FunctionParameters = {
type: "object",
...(requiredProperties.length > 0 ? { required: requiredProperties } : {}),
properties,
};
const functionDefinition: FunctionDefinition = {
name: t.name,
description: t.description,
parameters: functionParameters,
};
const toolDefinition: ChatCompletionTool = {
type: "function",
function: functionDefinition,
};
toolChoices.push(toolDefinition);
}
return toolChoices;
}
export const endpointOAIParametersSchema = z.object({
weight: z.number().int().positive().default(1),
model: z.any(),
type: z.literal("openai"),
baseURL: z.string().url().default("https://api.openai.com/v1"),
apiKey: z.string().default(env.OPENAI_API_KEY || env.HF_TOKEN || "sk-"),
completion: z
.union([z.literal("completions"), z.literal("chat_completions")])
.default("chat_completions"),
defaultHeaders: z.record(z.string()).optional(),
defaultQuery: z.record(z.string()).optional(),
extraBody: z.record(z.any()).optional(),
multimodal: z
.object({
image: createImageProcessorOptionsValidator({
supportedMimeTypes: [
"image/png",
"image/jpeg",
"image/webp",
"image/avif",
"image/tiff",
"image/gif",
],
preferredMimeType: "image/webp",
maxSizeInMB: Infinity,
maxWidth: 4096,
maxHeight: 4096,
}),
})
.default({}),
});
export async function endpointOai(
input: z.input<typeof endpointOAIParametersSchema>
): Promise<Endpoint> {
const {
baseURL,
apiKey,
completion,
model,
defaultHeaders,
defaultQuery,
multimodal,
extraBody,
} = endpointOAIParametersSchema.parse(input);
let OpenAI;
try {
OpenAI = (await import("openai")).OpenAI;
} catch (e) {
throw new Error("Failed to import OpenAI", { cause: e });
}
const openai = new OpenAI({
apiKey: apiKey || "sk-",
baseURL,
defaultHeaders,
defaultQuery,
});
const imageProcessor = makeImageProcessor(multimodal.image);
if (completion === "completions") {
if (model.tools) {
throw new Error(
"Tools are not supported for 'completions' mode, switch to 'chat_completions' instead"
);
}
return async ({ messages, preprompt, continueMessage, generateSettings, conversationId }) => {
const prompt = await buildPrompt({
messages,
continueMessage,
preprompt,
model,
});
const parameters = { ...model.parameters, ...generateSettings };
const body: CompletionCreateParamsStreaming = {
model: model.id ?? model.name,
prompt,
stream: true,
max_tokens: parameters?.max_new_tokens,
stop: parameters?.stop,
temperature: parameters?.temperature,
top_p: parameters?.top_p,
frequency_penalty: parameters?.repetition_penalty,
presence_penalty: parameters?.presence_penalty,
};
const openAICompletion = await openai.completions.create(body, {
body: { ...body, ...extraBody },
headers: {
"ChatUI-Conversation-ID": conversationId?.toString() ?? "",
},
});
return openAICompletionToTextGenerationStream(openAICompletion);
};
} else if (completion === "chat_completions") {
return async ({
messages,
preprompt,
generateSettings,
tools,
toolResults,
conversationId,
}) => {
let messagesOpenAI: OpenAI.Chat.Completions.ChatCompletionMessageParam[] =
await prepareMessages(messages, imageProcessor, !model.tools && model.multimodal);
if (messagesOpenAI?.[0]?.role !== "system") {
messagesOpenAI = [{ role: "system", content: "" }, ...messagesOpenAI];
}
if (messagesOpenAI?.[0]) {
messagesOpenAI[0].content = preprompt ?? "";
}
if (toolResults && toolResults.length > 0) {
const toolCallRequests: OpenAI.Chat.Completions.ChatCompletionAssistantMessageParam = {
role: "assistant",
content: null,
tool_calls: [],
};
const responses: Array<OpenAI.Chat.Completions.ChatCompletionToolMessageParam> = [];
for (const result of toolResults) {
const id = uuidv4();
const toolCallResult: OpenAI.Chat.Completions.ChatCompletionMessageToolCall = {
type: "function",
function: {
name: result.call.name,
arguments: JSON.stringify(result.call.parameters),
},
id,
};
toolCallRequests.tool_calls?.push(toolCallResult);
const toolCallResponse: OpenAI.Chat.Completions.ChatCompletionToolMessageParam = {
role: "tool",
content: "",
tool_call_id: id,
};
if ("outputs" in result) {
toolCallResponse.content = JSON.stringify(result.outputs);
}
responses.push(toolCallResponse);
}
messagesOpenAI.push(toolCallRequests);
messagesOpenAI.push(...responses);
}
const parameters = { ...model.parameters, ...generateSettings };
const toolCallChoices = createChatCompletionToolsArray(tools);
const body: ChatCompletionCreateParamsStreaming = {
model: model.id ?? model.name,
messages: messagesOpenAI,
stream: true,
max_tokens: parameters?.max_new_tokens,
stop: parameters?.stop,
temperature: parameters?.temperature,
top_p: parameters?.top_p,
frequency_penalty: parameters?.repetition_penalty,
presence_penalty: parameters?.presence_penalty,
...(toolCallChoices.length > 0 ? { tools: toolCallChoices, tool_choice: "auto" } : {}),
};
const openChatAICompletion = await openai.chat.completions.create(body, {
body: { ...body, ...extraBody },
headers: {
"ChatUI-Conversation-ID": conversationId?.toString() ?? "",
},
});
return openAIChatToTextGenerationStream(openChatAICompletion);
};
} else {
throw new Error("Invalid completion type");
}
}
async function prepareMessages(
messages: EndpointMessage[],
imageProcessor: ReturnType<typeof makeImageProcessor>,
isMultimodal: boolean
): Promise<OpenAI.Chat.Completions.ChatCompletionMessageParam[]> {
return Promise.all(
messages.map(async (message) => {
if (message.from === "user" && isMultimodal) {
return {
role: message.from,
content: [
...(await prepareFiles(imageProcessor, message.files ?? [])),
{ type: "text", text: message.content },
],
};
}
return {
role: message.from,
content: message.content,
};
})
);
}
async function prepareFiles(
imageProcessor: ReturnType<typeof makeImageProcessor>,
files: MessageFile[]
): Promise<OpenAI.Chat.Completions.ChatCompletionContentPartImage[]> {
const processedFiles = await Promise.all(
files.filter((file) => file.mime.startsWith("image/")).map(imageProcessor)
);
return processedFiles.map((file) => ({
type: "image_url" as const,
image_url: {
url: `data:${file.mime};base64,${file.image.toString("base64")}`,
},
}));
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/endpoints
|
hf_public_repos/chat-ui/src/lib/server/endpoints/openai/openAIChatToTextGenerationStream.ts
|
import type { TextGenerationStreamOutput } from "@huggingface/inference";
import type OpenAI from "openai";
import type { Stream } from "openai/streaming";
import type { ToolCall } from "$lib/types/Tool";
type ToolCallWithParameters = {
toolCall: ToolCall;
parameterJsonString: string;
};
function prepareToolCalls(toolCallsWithParameters: ToolCallWithParameters[], tokenId: number) {
const toolCalls: ToolCall[] = [];
for (const toolCallWithParameters of toolCallsWithParameters) {
// HACK: sometimes gpt4 via azure returns the JSON with literal newlines in it
// like {\n "foo": "bar" }
const s = toolCallWithParameters.parameterJsonString.replace("\n", "");
const params = JSON.parse(s);
const toolCall = toolCallWithParameters.toolCall;
for (const name in params) {
toolCall.parameters[name] = params[name];
}
toolCalls.push(toolCall);
}
const output = {
token: {
id: tokenId,
text: "",
logprob: 0,
special: false,
toolCalls,
},
generated_text: null,
details: null,
};
return output;
}
/**
* Transform a stream of OpenAI.Chat.ChatCompletion into a stream of TextGenerationStreamOutput
*/
export async function* openAIChatToTextGenerationStream(
completionStream: Stream<OpenAI.Chat.Completions.ChatCompletionChunk>
) {
let generatedText = "";
let tokenId = 0;
const toolCalls: ToolCallWithParameters[] = [];
for await (const completion of completionStream) {
const { choices } = completion;
const content = choices[0]?.delta?.content ?? "";
const last = choices[0]?.finish_reason === "stop" || choices[0]?.finish_reason === "length";
if (content) {
generatedText = generatedText + content;
}
const output: TextGenerationStreamOutput = {
token: {
id: tokenId++,
text: content ?? "",
logprob: 0,
special: last,
},
generated_text: last ? generatedText : null,
details: null,
};
yield output;
const tools = completion.choices[0]?.delta?.tool_calls || [];
for (const tool of tools) {
if (tool.id) {
if (!tool.function?.name) {
throw new Error("Tool call without function name");
}
const toolCallWithParameters: ToolCallWithParameters = {
toolCall: {
name: tool.function.name,
parameters: {},
},
parameterJsonString: "",
};
toolCalls.push(toolCallWithParameters);
}
if (toolCalls.length > 0 && tool.function?.arguments) {
toolCalls[toolCalls.length - 1].parameterJsonString += tool.function.arguments;
}
}
if (choices[0]?.finish_reason === "tool_calls") {
yield prepareToolCalls(toolCalls, tokenId++);
}
}
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/endpoints
|
hf_public_repos/chat-ui/src/lib/server/endpoints/openai/openAICompletionToTextGenerationStream.ts
|
import type { TextGenerationStreamOutput } from "@huggingface/inference";
import type OpenAI from "openai";
import type { Stream } from "openai/streaming";
/**
* Transform a stream of OpenAI.Completions.Completion into a stream of TextGenerationStreamOutput
*/
export async function* openAICompletionToTextGenerationStream(
completionStream: Stream<OpenAI.Completions.Completion>
) {
let generatedText = "";
let tokenId = 0;
for await (const completion of completionStream) {
const { choices } = completion;
const text = choices[0]?.text ?? "";
const last = choices[0]?.finish_reason === "stop" || choices[0]?.finish_reason === "length";
if (text) {
generatedText = generatedText + text;
}
const output: TextGenerationStreamOutput = {
token: {
id: tokenId++,
text,
logprob: 0,
special: last,
},
generated_text: last ? generatedText : null,
details: null,
};
yield output;
}
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/endpoints
|
hf_public_repos/chat-ui/src/lib/server/endpoints/cohere/endpointCohere.ts
|
import { z } from "zod";
import { env } from "$env/dynamic/private";
import type { Endpoint } from "../endpoints";
import type { TextGenerationStreamOutput } from "@huggingface/inference";
import type { Cohere, CohereClient } from "cohere-ai";
import { buildPrompt } from "$lib/buildPrompt";
import { ToolResultStatus, type ToolCall } from "$lib/types/Tool";
import { pipeline, Writable, type Readable } from "node:stream";
import { toolHasName } from "$lib/utils/tools";
export const endpointCohereParametersSchema = z.object({
weight: z.number().int().positive().default(1),
model: z.any(),
type: z.literal("cohere"),
apiKey: z.string().default(env.COHERE_API_TOKEN),
clientName: z.string().optional(),
raw: z.boolean().default(false),
});
export async function endpointCohere(
input: z.input<typeof endpointCohereParametersSchema>
): Promise<Endpoint> {
const { apiKey, clientName, model, raw } = endpointCohereParametersSchema.parse(input);
let cohere: CohereClient;
try {
cohere = new (await import("cohere-ai")).CohereClient({
token: apiKey,
clientName,
});
} catch (e) {
throw new Error("Failed to import cohere-ai", { cause: e });
}
return async ({ messages, preprompt, generateSettings, continueMessage, tools, toolResults }) => {
let system = preprompt;
if (messages?.[0]?.from === "system") {
system = messages[0].content;
}
// Tools must use [A-z_] for their names and directly_answer is banned
// It's safe to convert the tool names because we treat - and _ the same
tools = tools
?.filter((tool) => !toolHasName("directly_answer", tool))
.map((tool) => ({ ...tool, name: tool.name.replaceAll("-", "_") }));
const parameters = { ...model.parameters, ...generateSettings };
return (async function* () {
let stream;
let tokenId = 0;
if (raw) {
const prompt = await buildPrompt({
messages,
model,
preprompt: system,
continueMessage,
tools,
toolResults,
});
stream = await cohere.chatStream({
forceSingleStep: true,
message: prompt,
rawPrompting: true,
model: model.id ?? model.name,
p: parameters?.top_p,
k: parameters?.top_k,
maxTokens: parameters?.max_new_tokens,
temperature: parameters?.temperature,
stopSequences: parameters?.stop,
frequencyPenalty: parameters?.frequency_penalty,
});
} else {
const formattedMessages = messages
.filter((message) => message.from !== "system")
.map((message) => ({
role: message.from === "user" ? "USER" : "CHATBOT",
message: message.content,
})) satisfies Cohere.Message[];
stream = await cohere
.chatStream({
forceSingleStep: true,
model: model.id ?? model.name,
chatHistory: formattedMessages.slice(0, -1),
message: formattedMessages[formattedMessages.length - 1].message,
preamble: system,
p: parameters?.top_p,
k: parameters?.top_k,
maxTokens: parameters?.max_new_tokens,
temperature: parameters?.temperature,
stopSequences: parameters?.stop,
frequencyPenalty: parameters?.frequency_penalty,
tools,
toolResults:
toolResults?.length && toolResults?.length > 0
? toolResults?.map((toolResult) => {
if (toolResult.status === ToolResultStatus.Error) {
return { call: toolResult.call, outputs: [{ error: toolResult.message }] };
}
return { call: toolResult.call, outputs: toolResult.outputs };
})
: undefined,
})
.catch(async (err) => {
if (!err.body) throw err;
// Decode the error message and throw
const message = await convertStreamToBuffer(err.body).catch(() => {
throw err;
});
throw Error(message, { cause: err });
});
}
for await (const output of stream) {
if (output.eventType === "text-generation") {
yield {
token: {
id: tokenId++,
text: output.text,
logprob: 0,
special: false,
},
generated_text: null,
details: null,
} satisfies TextGenerationStreamOutput;
} else if (output.eventType === "tool-calls-generation") {
yield {
token: {
id: tokenId++,
text: "",
logprob: 0,
special: true,
toolCalls: output.toolCalls as ToolCall[],
},
generated_text: null,
details: null,
};
} else if (output.eventType === "stream-end") {
if (["ERROR", "ERROR_TOXIC", "ERROR_LIMIT"].includes(output.finishReason)) {
throw new Error(output.finishReason);
}
yield {
token: {
id: tokenId++,
text: "",
logprob: 0,
special: true,
},
generated_text: output.response.text,
details: null,
};
}
}
})();
};
}
async function convertStreamToBuffer(webReadableStream: Readable) {
return new Promise<string>((resolve, reject) => {
const chunks: Buffer[] = [];
pipeline(
webReadableStream,
new Writable({
write(chunk, _, callback) {
chunks.push(chunk);
callback();
},
}),
(err) => {
if (err) {
reject(err);
} else {
resolve(Buffer.concat(chunks).toString("utf-8"));
}
}
);
});
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/endpoints
|
hf_public_repos/chat-ui/src/lib/server/endpoints/ollama/endpointOllama.ts
|
import { buildPrompt } from "$lib/buildPrompt";
import type { TextGenerationStreamOutput } from "@huggingface/inference";
import type { Endpoint } from "../endpoints";
import { z } from "zod";
export const endpointOllamaParametersSchema = z.object({
weight: z.number().int().positive().default(1),
model: z.any(),
type: z.literal("ollama"),
url: z.string().url().default("http://127.0.0.1:11434"),
ollamaName: z.string().min(1).optional(),
});
export function endpointOllama(input: z.input<typeof endpointOllamaParametersSchema>): Endpoint {
const { url, model, ollamaName } = endpointOllamaParametersSchema.parse(input);
return async ({ messages, preprompt, continueMessage, generateSettings }) => {
const prompt = await buildPrompt({
messages,
continueMessage,
preprompt,
model,
});
const parameters = { ...model.parameters, ...generateSettings };
const requestInfo = await fetch(`${url}/api/tags`, {
method: "GET",
headers: {
"Content-Type": "application/json",
},
});
const tags = await requestInfo.json();
if (!tags.models.some((m: { name: string }) => m.name === ollamaName)) {
// if its not in the tags, pull but dont wait for the answer
fetch(`${url}/api/pull`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
name: ollamaName ?? model.name,
stream: false,
}),
});
throw new Error("Currently pulling model from Ollama, please try again later.");
}
const r = await fetch(`${url}/api/generate`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
prompt,
model: ollamaName ?? model.name,
raw: true,
options: {
top_p: parameters.top_p,
top_k: parameters.top_k,
temperature: parameters.temperature,
repeat_penalty: parameters.repetition_penalty,
stop: parameters.stop,
num_predict: parameters.max_new_tokens,
},
}),
});
if (!r.ok) {
throw new Error(`Failed to generate text: ${await r.text()}`);
}
const encoder = new TextDecoderStream();
const reader = r.body?.pipeThrough(encoder).getReader();
return (async function* () {
let generatedText = "";
let tokenId = 0;
let stop = false;
while (!stop) {
// read the stream and log the outputs to console
const out = (await reader?.read()) ?? { done: false, value: undefined };
// we read, if it's done we cancel
if (out.done) {
reader?.cancel();
return;
}
if (!out.value) {
return;
}
let data = null;
try {
data = JSON.parse(out.value);
} catch (e) {
return;
}
if (!data.done) {
generatedText += data.response;
yield {
token: {
id: tokenId++,
text: data.response ?? "",
logprob: 0,
special: false,
},
generated_text: null,
details: null,
} satisfies TextGenerationStreamOutput;
} else {
stop = true;
yield {
token: {
id: tokenId++,
text: data.response ?? "",
logprob: 0,
special: true,
},
generated_text: generatedText,
details: null,
} satisfies TextGenerationStreamOutput;
}
}
})();
};
}
export default endpointOllama;
|
0
|
hf_public_repos/chat-ui/src/lib/server/endpoints
|
hf_public_repos/chat-ui/src/lib/server/endpoints/google/endpointVertex.ts
|
import {
VertexAI,
HarmCategory,
HarmBlockThreshold,
type Content,
type TextPart,
} from "@google-cloud/vertexai";
import type { Endpoint, TextGenerationStreamOutputWithToolsAndWebSources } from "../endpoints";
import { z } from "zod";
import type { Message } from "$lib/types/Message";
import { createImageProcessorOptionsValidator, makeImageProcessor } from "../images";
import { createDocumentProcessorOptionsValidator, makeDocumentProcessor } from "../document";
export const endpointVertexParametersSchema = z.object({
weight: z.number().int().positive().default(1),
model: z.any(), // allow optional and validate against emptiness
type: z.literal("vertex"),
location: z.string().default("europe-west1"),
extraBody: z.object({ model_version: z.string() }).optional(),
project: z.string(),
apiEndpoint: z.string().optional(),
safetyThreshold: z
.enum([
HarmBlockThreshold.HARM_BLOCK_THRESHOLD_UNSPECIFIED,
HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
HarmBlockThreshold.BLOCK_NONE,
HarmBlockThreshold.BLOCK_ONLY_HIGH,
])
.optional(),
tools: z.array(z.any()).optional(),
multimodal: z
.object({
image: createImageProcessorOptionsValidator({
supportedMimeTypes: [
"image/png",
"image/jpeg",
"image/webp",
"image/avif",
"image/tiff",
"image/gif",
],
preferredMimeType: "image/webp",
maxSizeInMB: 20,
maxWidth: 4096,
maxHeight: 4096,
}),
document: createDocumentProcessorOptionsValidator({
supportedMimeTypes: ["application/pdf", "text/plain"],
maxSizeInMB: 20,
}),
})
.default({}),
});
export function endpointVertex(input: z.input<typeof endpointVertexParametersSchema>): Endpoint {
const { project, location, model, apiEndpoint, safetyThreshold, tools, multimodal, extraBody } =
endpointVertexParametersSchema.parse(input);
const vertex_ai = new VertexAI({
project,
location,
apiEndpoint,
});
return async ({ messages, preprompt, generateSettings }) => {
const parameters = { ...model.parameters, ...generateSettings };
const hasFiles = messages.some((message) => message.files && message.files.length > 0);
const generativeModel = vertex_ai.getGenerativeModel({
model: extraBody?.model_version ?? model.id ?? model.name,
safetySettings: safetyThreshold
? [
{
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
threshold: safetyThreshold,
},
{
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
threshold: safetyThreshold,
},
{
category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
threshold: safetyThreshold,
},
{
category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold: safetyThreshold,
},
{
category: HarmCategory.HARM_CATEGORY_UNSPECIFIED,
threshold: safetyThreshold,
},
]
: undefined,
generationConfig: {
maxOutputTokens: parameters?.max_new_tokens ?? 4096,
stopSequences: parameters?.stop,
temperature: parameters?.temperature ?? 1,
},
// tools and multimodal are mutually exclusive
tools: !hasFiles ? tools : undefined,
});
// Preprompt is the same as the first system message.
let systemMessage = preprompt;
if (messages[0].from === "system") {
systemMessage = messages[0].content;
messages.shift();
}
const vertexMessages = await Promise.all(
messages.map(async ({ from, content, files }: Omit<Message, "id">): Promise<Content> => {
const imageProcessor = makeImageProcessor(multimodal.image);
const documentProcessor = makeDocumentProcessor(multimodal.document);
const processedFilesWithNull =
files && files.length > 0
? await Promise.all(
files.map(async (file) => {
if (file.mime.includes("image")) {
const { image, mime } = await imageProcessor(file);
return { file: image, mime };
} else if (file.mime === "application/pdf" || file.mime === "text/plain") {
return documentProcessor(file);
}
return null;
})
)
: [];
const processedFiles = processedFilesWithNull.filter((file) => file !== null);
return {
role: from === "user" ? "user" : "model",
parts: [
...processedFiles.map((processedFile) => ({
inlineData: {
data: processedFile.file.toString("base64"),
mimeType: processedFile.mime,
},
})),
{
text: content,
},
],
};
})
);
const result = await generativeModel.generateContentStream({
contents: vertexMessages,
systemInstruction: systemMessage
? {
role: "system",
parts: [
{
text: systemMessage,
},
],
}
: undefined,
});
let tokenId = 0;
return (async function* () {
let generatedText = "";
const webSources = [];
for await (const data of result.stream) {
if (!data?.candidates?.length) break; // Handle case where no candidates are present
const candidate = data.candidates[0];
if (!candidate.content?.parts?.length) continue; // Skip if no parts are present
const firstPart = candidate.content.parts.find((part) => "text" in part) as
| TextPart
| undefined;
if (!firstPart) continue; // Skip if no text part is found
const isLastChunk = !!candidate.finishReason;
const candidateWebSources = candidate.groundingMetadata?.groundingChunks
?.map((chunk) => {
const uri = chunk.web?.uri ?? chunk.retrievedContext?.uri;
const title = chunk.web?.title ?? chunk.retrievedContext?.title;
if (!uri || !title) {
return null;
}
return {
uri,
title,
};
})
.filter((source) => source !== null);
if (candidateWebSources) {
webSources.push(...candidateWebSources);
}
const content = firstPart.text;
generatedText += content;
const output: TextGenerationStreamOutputWithToolsAndWebSources = {
token: {
id: tokenId++,
text: content,
logprob: 0,
special: isLastChunk,
},
generated_text: isLastChunk ? generatedText : null,
details: null,
webSources,
};
yield output;
if (isLastChunk) break;
}
})();
};
}
export default endpointVertex;
|
0
|
hf_public_repos/chat-ui/src/lib/server/endpoints
|
hf_public_repos/chat-ui/src/lib/server/endpoints/google/endpointGenAI.ts
|
import { GoogleGenerativeAI, HarmBlockThreshold, HarmCategory } from "@google/generative-ai";
import type { Content, Part, SafetySetting, TextPart } from "@google/generative-ai";
import { z } from "zod";
import type { Message, MessageFile } from "$lib/types/Message";
import type { TextGenerationStreamOutput } from "@huggingface/inference";
import type { Endpoint } from "../endpoints";
import { createImageProcessorOptionsValidator, makeImageProcessor } from "../images";
import type { ImageProcessorOptions } from "../images";
import { env } from "$env/dynamic/private";
export const endpointGenAIParametersSchema = z.object({
weight: z.number().int().positive().default(1),
model: z.any(),
type: z.literal("genai"),
apiKey: z.string().default(env.GOOGLE_GENAI_API_KEY),
safetyThreshold: z
.enum([
HarmBlockThreshold.HARM_BLOCK_THRESHOLD_UNSPECIFIED,
HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
HarmBlockThreshold.BLOCK_NONE,
HarmBlockThreshold.BLOCK_ONLY_HIGH,
])
.optional(),
multimodal: z
.object({
image: createImageProcessorOptionsValidator({
supportedMimeTypes: ["image/png", "image/jpeg", "image/webp"],
preferredMimeType: "image/webp",
// The 4 / 3 compensates for the 33% increase in size when converting to base64
maxSizeInMB: (5 / 4) * 3,
maxWidth: 4096,
maxHeight: 4096,
}),
})
.default({}),
});
export function endpointGenAI(input: z.input<typeof endpointGenAIParametersSchema>): Endpoint {
const { model, apiKey, safetyThreshold, multimodal } = endpointGenAIParametersSchema.parse(input);
const genAI = new GoogleGenerativeAI(apiKey);
const safetySettings = safetyThreshold
? Object.keys(HarmCategory)
.filter((cat) => cat !== HarmCategory.HARM_CATEGORY_UNSPECIFIED)
.reduce((acc, val) => {
acc.push({
category: val as HarmCategory,
threshold: safetyThreshold,
});
return acc;
}, [] as SafetySetting[])
: undefined;
return async ({ messages, preprompt, generateSettings }) => {
const parameters = { ...model.parameters, ...generateSettings };
const generativeModel = genAI.getGenerativeModel({
model: model.id ?? model.name,
safetySettings,
generationConfig: {
maxOutputTokens: parameters?.max_new_tokens ?? 4096,
stopSequences: parameters?.stop,
temperature: parameters?.temperature ?? 1,
},
});
let systemMessage = preprompt;
if (messages[0].from === "system") {
systemMessage = messages[0].content;
messages.shift();
}
const genAIMessages = await Promise.all(
messages.map(async ({ from, content, files }: Omit<Message, "id">): Promise<Content> => {
return {
role: from === "user" ? "user" : "model",
parts: [
...(await Promise.all(
(files ?? []).map((file) => fileToImageBlock(file, multimodal.image))
)),
{ text: content },
],
};
})
);
const result = await generativeModel.generateContentStream({
contents: genAIMessages,
systemInstruction:
systemMessage && systemMessage.trim() !== ""
? {
role: "system",
parts: [{ text: systemMessage }],
}
: undefined,
});
let tokenId = 0;
return (async function* () {
let generatedText = "";
for await (const data of result.stream) {
if (!data?.candidates?.length) break; // Handle case where no candidates are present
const candidate = data.candidates[0];
if (!candidate.content?.parts?.length) continue; // Skip if no parts are present
const firstPart = candidate.content.parts.find((part) => "text" in part) as
| TextPart
| undefined;
if (!firstPart) continue; // Skip if no text part is found
const content = firstPart.text;
generatedText += content;
const output: TextGenerationStreamOutput = {
token: {
id: tokenId++,
text: content,
logprob: 0,
special: false,
},
generated_text: null,
details: null,
};
yield output;
}
const output: TextGenerationStreamOutput = {
token: {
id: tokenId++,
text: "",
logprob: 0,
special: true,
},
generated_text: generatedText,
details: null,
};
yield output;
})();
};
}
async function fileToImageBlock(
file: MessageFile,
opts: ImageProcessorOptions<"image/png" | "image/jpeg" | "image/webp">
): Promise<Part> {
const processor = makeImageProcessor(opts);
const { image, mime } = await processor(file);
return {
inlineData: {
mimeType: mime,
data: image.toString("base64"),
},
};
}
export default endpointGenAI;
|
0
|
hf_public_repos/chat-ui/src/lib/server/endpoints
|
hf_public_repos/chat-ui/src/lib/server/endpoints/tgi/endpointTgi.ts
|
import { env } from "$env/dynamic/private";
import { buildPrompt } from "$lib/buildPrompt";
import { textGenerationStream } from "@huggingface/inference";
import type { Endpoint, EndpointMessage } from "../endpoints";
import { z } from "zod";
import {
createImageProcessorOptionsValidator,
makeImageProcessor,
type ImageProcessor,
} from "../images";
export const endpointTgiParametersSchema = z.object({
weight: z.number().int().positive().default(1),
model: z.any(),
type: z.literal("tgi"),
url: z.string().url(),
accessToken: z.string().default(env.HF_TOKEN ?? env.HF_ACCESS_TOKEN),
authorization: z.string().optional(),
multimodal: z
.object({
// Assumes IDEFICS
image: createImageProcessorOptionsValidator({
supportedMimeTypes: ["image/jpeg", "image/webp"],
preferredMimeType: "image/webp",
maxSizeInMB: 5,
maxWidth: 378,
maxHeight: 980,
}),
})
.default({}),
});
export function endpointTgi(input: z.input<typeof endpointTgiParametersSchema>): Endpoint {
const { url, accessToken, model, authorization, multimodal } =
endpointTgiParametersSchema.parse(input);
const imageProcessor = makeImageProcessor(multimodal.image);
return async ({
messages,
preprompt,
continueMessage,
generateSettings,
tools,
toolResults,
isMultimodal,
conversationId,
}) => {
const messagesWithResizedFiles = await Promise.all(
messages.map((message) => prepareMessage(Boolean(isMultimodal), message, imageProcessor))
);
const prompt = await buildPrompt({
messages: messagesWithResizedFiles,
preprompt,
model,
continueMessage,
tools,
toolResults,
});
return textGenerationStream(
{
parameters: { ...model.parameters, ...generateSettings, return_full_text: false },
model: url,
inputs: prompt,
accessToken,
},
{
use_cache: false,
fetch: async (endpointUrl, info) => {
if (info && authorization && !accessToken) {
// Set authorization header if it is defined and HF_TOKEN is empty
info.headers = {
...info.headers,
Authorization: authorization,
"ChatUI-Conversation-ID": conversationId?.toString() ?? "",
};
}
return fetch(endpointUrl, info);
},
}
);
};
}
async function prepareMessage(
isMultimodal: boolean,
message: EndpointMessage,
imageProcessor: ImageProcessor
): Promise<EndpointMessage> {
if (!isMultimodal) return message;
const files = await Promise.all(message.files?.map(imageProcessor) ?? []);
const markdowns = files.map(
(file) => `})`
);
const content = message.content + "\n" + markdowns.join("\n ");
return { ...message, content };
}
|
0
|
hf_public_repos/chat-ui/src/lib/server/endpoints
|
hf_public_repos/chat-ui/src/lib/server/endpoints/cloudflare/endpointCloudflare.ts
|
import { z } from "zod";
import type { Endpoint } from "../endpoints";
import type { TextGenerationStreamOutput } from "@huggingface/inference";
import { env } from "$env/dynamic/private";
import { logger } from "$lib/server/logger";
export const endpointCloudflareParametersSchema = z.object({
weight: z.number().int().positive().default(1),
model: z.any(),
type: z.literal("cloudflare"),
accountId: z.string().default(env.CLOUDFLARE_ACCOUNT_ID),
apiToken: z.string().default(env.CLOUDFLARE_API_TOKEN),
});
export async function endpointCloudflare(
input: z.input<typeof endpointCloudflareParametersSchema>
): Promise<Endpoint> {
const { accountId, apiToken, model } = endpointCloudflareParametersSchema.parse(input);
if (!model.id.startsWith("@")) {
model.id = "@hf/" + model.id;
}
const apiURL = `https://api.cloudflare.com/client/v4/accounts/${accountId}/ai/run/${model.id}`;
return async ({ messages, preprompt, generateSettings }) => {
let messagesFormatted = messages.map((message) => ({
role: message.from,
content: message.content,
}));
if (messagesFormatted?.[0]?.role !== "system") {
messagesFormatted = [{ role: "system", content: preprompt ?? "" }, ...messagesFormatted];
}
const parameters = { ...model.parameters, ...generateSettings };
const payload = JSON.stringify({
messages: messagesFormatted,
stream: true,
max_tokens: parameters?.max_new_tokens,
temperature: parameters?.temperature,
top_p: parameters?.top_p,
top_k: parameters?.top_k,
repetition_penalty: parameters?.repetition_penalty,
});
const res = await fetch(apiURL, {
method: "POST",
headers: {
Authorization: `Bearer ${apiToken}`,
"Content-Type": "application/json",
},
body: payload,
});
if (!res.ok) {
throw new Error(`Failed to generate text: ${await res.text()}`);
}
const encoder = new TextDecoderStream();
const reader = res.body?.pipeThrough(encoder).getReader();
return (async function* () {
let stop = false;
let generatedText = "";
let tokenId = 0;
let accumulatedData = ""; // Buffer to accumulate data chunks
while (!stop) {
const out = await reader?.read();
// If it's done, we cancel
if (out?.done) {
reader?.cancel();
return;
}
if (!out?.value) {
return;
}
// Accumulate the data chunk
accumulatedData += out.value;
// Process each complete JSON object in the accumulated data
while (accumulatedData.includes("\n")) {
// Assuming each JSON object ends with a newline
const endIndex = accumulatedData.indexOf("\n");
let jsonString = accumulatedData.substring(0, endIndex).trim();
// Remove the processed part from the buffer
accumulatedData = accumulatedData.substring(endIndex + 1);
if (jsonString.startsWith("data: ")) {
jsonString = jsonString.slice(6);
let data = null;
if (jsonString === "[DONE]") {
stop = true;
yield {
token: {
id: tokenId++,
text: "",
logprob: 0,
special: true,
},
generated_text: generatedText,
details: null,
} satisfies TextGenerationStreamOutput;
reader?.cancel();
continue;
}
try {
data = JSON.parse(jsonString);
} catch (e) {
logger.error(e, "Failed to parse JSON");
logger.error(jsonString, "Problematic JSON string:");
continue; // Skip this iteration and try the next chunk
}
// Handle the parsed data
if (data.response) {
generatedText += data.response ?? "";
const output: TextGenerationStreamOutput = {
token: {
id: tokenId++,
text: data.response ?? "",
logprob: 0,
special: false,
},
generated_text: null,
details: null,
};
yield output;
}
}
}
}
})();
};
}
export default endpointCloudflare;
|
0
|
hf_public_repos/chat-ui/src/lib/server/endpoints
|
hf_public_repos/chat-ui/src/lib/server/endpoints/llamacpp/endpointLlamacpp.ts
|
import { env } from "$env/dynamic/private";
import { buildPrompt } from "$lib/buildPrompt";
import type { TextGenerationStreamOutput } from "@huggingface/inference";
import type { Endpoint } from "../endpoints";
import { z } from "zod";
import { logger } from "$lib/server/logger";
export const endpointLlamacppParametersSchema = z.object({
weight: z.number().int().positive().default(1),
model: z.any(),
type: z.literal("llamacpp"),
url: z.string().url().default("http://127.0.0.1:8080"), // legacy, feel free to remove in breaking change update
baseURL: z.string().url().optional(),
accessToken: z.string().default(env.HF_TOKEN ?? env.HF_ACCESS_TOKEN),
});
export function endpointLlamacpp(
input: z.input<typeof endpointLlamacppParametersSchema>
): Endpoint {
const { baseURL, url, model } = endpointLlamacppParametersSchema.parse(input);
return async ({ messages, preprompt, continueMessage, generateSettings }) => {
const prompt = await buildPrompt({
messages,
continueMessage,
preprompt,
model,
});
const parameters = { ...model.parameters, ...generateSettings };
const r = await fetch(`${baseURL ?? url}/completion`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
prompt,
stream: true,
temperature: parameters.temperature,
top_p: parameters.top_p,
top_k: parameters.top_k,
stop: parameters.stop,
repeat_penalty: parameters.repetition_penalty,
n_predict: parameters.max_new_tokens,
cache_prompt: true,
}),
});
if (!r.ok) {
throw new Error(`Failed to generate text: ${await r.text()}`);
}
const encoder = new TextDecoderStream();
const reader = r.body?.pipeThrough(encoder).getReader();
return (async function* () {
let stop = false;
let generatedText = "";
let tokenId = 0;
let accumulatedData = ""; // Buffer to accumulate data chunks
while (!stop) {
// Read the stream and log the outputs to console
const out = (await reader?.read()) ?? { done: false, value: undefined };
// If it's done, we cancel
if (out.done) {
reader?.cancel();
return;
}
if (!out.value) {
return;
}
// Accumulate the data chunk
accumulatedData += out.value;
// Process each complete JSON object in the accumulated data
while (accumulatedData.includes("\n")) {
// Assuming each JSON object ends with a newline
const endIndex = accumulatedData.indexOf("\n");
let jsonString = accumulatedData.substring(0, endIndex).trim();
// Remove the processed part from the buffer
accumulatedData = accumulatedData.substring(endIndex + 1);
if (jsonString.startsWith("data: ")) {
jsonString = jsonString.slice(6);
let data = null;
try {
data = JSON.parse(jsonString);
} catch (e) {
logger.error(e, "Failed to parse JSON");
logger.error(jsonString, "Problematic JSON string:");
continue; // Skip this iteration and try the next chunk
}
// Handle the parsed data
if (data.content || data.stop) {
generatedText += data.content;
const output: TextGenerationStreamOutput = {
token: {
id: tokenId++,
text: data.content ?? "",
logprob: 0,
special: false,
},
generated_text: data.stop ? generatedText : null,
details: null,
};
if (data.stop) {
stop = true;
output.token.special = true;
reader?.cancel();
}
yield output;
}
}
}
}
})();
};
}
export default endpointLlamacpp;
|
0
|
hf_public_repos/chat-ui/src/lib/server/endpoints
|
hf_public_repos/chat-ui/src/lib/server/endpoints/langserve/endpointLangserve.ts
|
import { buildPrompt } from "$lib/buildPrompt";
import { z } from "zod";
import type { Endpoint } from "../endpoints";
import type { TextGenerationStreamOutput } from "@huggingface/inference";
import { logger } from "$lib/server/logger";
export const endpointLangserveParametersSchema = z.object({
weight: z.number().int().positive().default(1),
model: z.any(),
type: z.literal("langserve"),
url: z.string().url(),
});
export function endpointLangserve(
input: z.input<typeof endpointLangserveParametersSchema>
): Endpoint {
const { url, model } = endpointLangserveParametersSchema.parse(input);
return async ({ messages, preprompt, continueMessage }) => {
const prompt = await buildPrompt({
messages,
continueMessage,
preprompt,
model,
});
const r = await fetch(`${url}/stream`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
input: { text: prompt },
}),
});
if (!r.ok) {
throw new Error(`Failed to generate text: ${await r.text()}`);
}
const encoder = new TextDecoderStream();
const reader = r.body?.pipeThrough(encoder).getReader();
return (async function* () {
let stop = false;
let generatedText = "";
let tokenId = 0;
let accumulatedData = ""; // Buffer to accumulate data chunks
while (!stop) {
// Read the stream and log the outputs to console
const out = (await reader?.read()) ?? { done: false, value: undefined };
// If it's done, we cancel
if (out.done) {
reader?.cancel();
return;
}
if (!out.value) {
return;
}
// Accumulate the data chunk
accumulatedData += out.value;
// Keep read data to check event type
const eventData = out.value;
// Process each complete JSON object in the accumulated data
while (accumulatedData.includes("\n")) {
// Assuming each JSON object ends with a newline
const endIndex = accumulatedData.indexOf("\n");
let jsonString = accumulatedData.substring(0, endIndex).trim();
// Remove the processed part from the buffer
accumulatedData = accumulatedData.substring(endIndex + 1);
// Stopping with end event
if (eventData.startsWith("event: end")) {
stop = true;
yield {
token: {
id: tokenId++,
text: "",
logprob: 0,
special: true,
},
generated_text: generatedText,
details: null,
} satisfies TextGenerationStreamOutput;
reader?.cancel();
continue;
}
if (eventData.startsWith("event: data") && jsonString.startsWith("data: ")) {
jsonString = jsonString.slice(6);
let data = null;
// Handle the parsed data
try {
data = JSON.parse(jsonString);
} catch (e) {
logger.error(e, "Failed to parse JSON");
logger.error(jsonString, "Problematic JSON string:");
continue; // Skip this iteration and try the next chunk
}
// Assuming content within data is a plain string
if (data) {
generatedText += data;
const output: TextGenerationStreamOutput = {
token: {
id: tokenId++,
text: data,
logprob: 0,
special: false,
},
generated_text: null,
details: null,
};
yield output;
}
}
}
}
})();
};
}
export default endpointLangserve;
|
0
|
hf_public_repos/chat-ui/static
|
hf_public_repos/chat-ui/static/chatui/logo.svg
|
<svg xmlns="http://www.w3.org/2000/svg" width="32" height="32" fill="none">
<path
fill="#2063EC"
d="M4 15.55C4 9.72 8.72 5 14.55 5h4.11a9.34 9.34 0 1 1 0 18.68H7.58l-2.89 2.8a.41.41 0 0 1-.69-.3V15.55Z"
/>
</svg>
|
0
|
hf_public_repos/chat-ui/static
|
hf_public_repos/chat-ui/static/chatui/icon.svg
|
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M1 16.7844C1 8.61919 7.61919 2 15.7844 2L17.1989 2C24.4238 2 30.2808 7.85698 30.2808 15.0819C30.2808 22.3069 24.4238 28.1638 17.1989 28.1638L6.01658 28.1638L2.06037 30.1846C2.00283 30.214 1.95147 30.254 1.89847 30.291C1.53157 30.5467 0.999999 30.2903 1 29.8167L1 24.3028V16.7844Z" fill="#2063EC"/>
</svg>
|
0
|
hf_public_repos/chat-ui/static
|
hf_public_repos/chat-ui/static/chatui/favicon.svg
|
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M1 16.7844C1 8.61919 7.61919 2 15.7844 2L17.1989 2C24.4238 2 30.2808 7.85698 30.2808 15.0819C30.2808 22.3069 24.4238 28.1638 17.1989 28.1638L6.01658 28.1638L2.06037 30.1846C2.00283 30.214 1.95147 30.254 1.89847 30.291C1.53157 30.5467 0.999999 30.2903 1 29.8167L1 24.3028V16.7844Z" fill="#2063EC"/>
</svg>
|
0
|
hf_public_repos/chat-ui/static
|
hf_public_repos/chat-ui/static/chatui/manifest.json
|
{
"background_color": "#ffffff",
"name": "Chat UI",
"short_name": "Chat UI",
"display": "standalone",
"start_url": "/",
"icons": [
{
"src": "/chatui/icon-128x128.png",
"sizes": "128x128",
"type": "image/png"
},
{
"src": "/chatui/icon-256x256.png",
"sizes": "256x256",
"type": "image/png"
},
{
"src": "/chatui/icon-512x512.png",
"sizes": "512x512",
"type": "image/png"
}
]
}
|
0
|
hf_public_repos/chat-ui/static
|
hf_public_repos/chat-ui/static/huggingchat/logo.svg
|
<svg xmlns="http://www.w3.org/2000/svg" width="32" height="32" fill="none">
<path
fill="#FFD21E"
d="M4 15.55C4 9.72 8.72 5 14.55 5h4.11a9.34 9.34 0 1 1 0 18.68H7.58l-2.89 2.8a.41.41 0 0 1-.69-.3V15.55Z"
/>
<path
fill="#32343D"
d="M19.63 12.48c.37.14.52.9.9.7.71-.38.98-1.27.6-1.98a1.46 1.46 0 0 0-1.98-.61 1.47 1.47 0 0 0-.6 1.99c.17.34.74-.21 1.08-.1ZM12.72 12.48c-.37.14-.52.9-.9.7a1.47 1.47 0 0 1-.6-1.98 1.46 1.46 0 0 1 1.98-.61c.71.38.98 1.27.6 1.99-.18.34-.74-.21-1.08-.1ZM16.24 19.55c2.89 0 3.82-2.58 3.82-3.9 0-1.33-1.71.7-3.82.7-2.1 0-3.8-2.03-3.8-.7 0 1.32.92 3.9 3.8 3.9Z"
/>
<path
fill="#FF323D"
d="M18.56 18.8c-.57.44-1.33.75-2.32.75-.92 0-1.65-.27-2.2-.68.3-.63.87-1.11 1.55-1.32.12-.03.24.17.36.38.12.2.24.4.37.4s.26-.2.39-.4.26-.4.38-.36a2.56 2.56 0 0 1 1.47 1.23Z"
/>
</svg>
|
0
|
hf_public_repos/chat-ui/static
|
hf_public_repos/chat-ui/static/huggingchat/icon.svg
|
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M1 16.7844C1 8.61919 7.61919 2 15.7844 2L17.1989 2C24.4238 2 30.2808 7.85698 30.2808 15.0819C30.2808 22.3069 24.4238 28.1638 17.1989 28.1638L6.01658 28.1638L2.06037 30.1846C2.00283 30.214 1.95147 30.254 1.89847 30.291C1.53157 30.5467 0.999999 30.2903 1 29.8167L1 24.3028V16.7844Z" fill="#FFD21E"/>
<path d="M20.1087 13.0502C20.5981 13.2237 20.7928 14.2338 21.2872 13.9699C22.2235 13.4701 22.579 12.3028 22.0811 11.3628C21.5833 10.4227 20.4206 10.0658 19.4843 10.5657C18.5479 11.0655 18.1925 12.2328 18.6903 13.1728C18.9253 13.6165 19.6712 12.8951 20.1087 13.0502Z" fill="#32343D"/>
<path d="M11.0621 13.0502C10.5727 13.2237 10.3779 14.2338 9.88352 13.9699C8.94717 13.4701 8.59171 12.3028 9.08958 11.3628C9.58744 10.4227 10.7501 10.0658 11.6864 10.5657C12.6228 11.0655 12.9783 12.2328 12.4804 13.1728C12.2454 13.6165 11.4996 12.8951 11.0621 13.0502Z" fill="#32343D"/>
<path d="M15.6767 22.3026C19.4512 22.3026 20.6692 18.924 20.6692 17.189C20.6692 15.4541 18.434 18.1081 15.6767 18.1081C12.9195 18.1081 10.6843 15.4541 10.6843 17.189C10.6843 18.924 11.9023 22.3026 15.6767 22.3026Z" fill="#32343D"/>
<path d="M18.7088 21.3155C17.9621 21.9063 16.9693 22.3025 15.6767 22.3025C14.4622 22.3025 13.5124 21.9527 12.7827 21.4205C13.1887 20.5925 13.9259 19.9576 14.8208 19.6902C14.9743 19.6443 15.1324 19.9097 15.2943 20.1814C15.4505 20.4435 15.6102 20.7115 15.7727 20.7115C15.9458 20.7115 16.1159 20.4472 16.2819 20.1893C16.4553 19.9199 16.6243 19.6573 16.7878 19.7098C16.8355 19.7251 16.8827 19.7415 16.9294 19.7588C17.6944 20.0433 18.327 20.6017 18.7088 21.3155Z" fill="#FF323D"/>
</svg>
|
0
|
hf_public_repos/chat-ui/static
|
hf_public_repos/chat-ui/static/huggingchat/favicon.svg
|
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M1 16.7844C1 8.61919 7.61919 2 15.7844 2L17.1989 2C24.4238 2 30.2808 7.85698 30.2808 15.0819C30.2808 22.3069 24.4238 28.1638 17.1989 28.1638L6.01658 28.1638L2.06037 30.1846C2.00283 30.214 1.95147 30.254 1.89847 30.291C1.53157 30.5467 0.999999 30.2903 1 29.8167L1 24.3028V16.7844Z" fill="#FFD21E"/>
<path d="M20.1086 13.0502C20.598 13.2237 20.7928 14.2338 21.2872 13.9699C22.2235 13.4701 22.579 12.3028 22.0811 11.3628C21.5833 10.4227 20.4206 10.0658 19.4843 10.5657C18.5479 11.0655 18.1924 12.2328 18.6903 13.1728C18.9253 13.6165 19.6711 12.8951 20.1086 13.0502Z" fill="#32343D"/>
<path d="M11.0621 13.0502C10.5727 13.2237 10.3779 14.2338 9.88354 13.9699C8.94719 13.4701 8.59173 12.3028 9.08959 11.3628C9.58746 10.4227 10.7501 10.0658 11.6865 10.5657C12.6228 11.0655 12.9783 12.2328 12.4804 13.1728C12.2454 13.6165 11.4996 12.8951 11.0621 13.0502Z" fill="#32343D"/>
<path d="M15.6767 22.3026C19.4512 22.3026 20.6692 18.924 20.6692 17.189C20.6692 15.4541 18.434 18.1081 15.6767 18.1081C12.9195 18.1081 10.6843 15.4541 10.6843 17.189C10.6843 18.924 11.9023 22.3026 15.6767 22.3026Z" fill="#32343D"/>
<path d="M18.7088 21.3155C17.9621 21.9063 16.9693 22.3025 15.6767 22.3025C14.4622 22.3025 13.5124 21.9527 12.7827 21.4205C13.1887 20.5925 13.9259 19.9576 14.8208 19.6902C14.9743 19.6443 15.1324 19.9097 15.2943 20.1814C15.4505 20.4435 15.6102 20.7115 15.7727 20.7115C15.9458 20.7115 16.1159 20.4472 16.2819 20.1893C16.4553 19.9199 16.6243 19.6573 16.7878 19.7098C16.8355 19.7251 16.8827 19.7415 16.9294 19.7588C17.6944 20.0433 18.327 20.6017 18.7088 21.3155Z" fill="#FF323D"/>
</svg>
|
0
|
hf_public_repos/chat-ui/static
|
hf_public_repos/chat-ui/static/huggingchat/manifest.json
|
{
"background_color": "#ffffff",
"name": "HuggingChat",
"short_name": "HuggingChat",
"display": "standalone",
"start_url": "/chat",
"icons": [
{
"src": "/chat/huggingchat/icon-36x36.png",
"sizes": "36x36",
"type": "image/png"
},
{
"src": "/chat/huggingchat/icon-48x48.png",
"sizes": "48x48",
"type": "image/png"
},
{
"src": "/chat/huggingchat/icon-72x72.png",
"sizes": "72x72",
"type": "image/png"
},
{
"src": "/chat/huggingchat/icon-96x96.png",
"sizes": "96x96",
"type": "image/png"
},
{
"src": "/chat/huggingchat/icon-128x128.png",
"sizes": "128x128",
"type": "image/png"
},
{
"src": "/chat/huggingchat/icon-144x144.png",
"sizes": "144x144",
"type": "image/png"
},
{
"src": "/chat/huggingchat/icon-192x192.png",
"sizes": "192x192",
"type": "image/png"
},
{
"src": "/chat/huggingchat/icon-256x256.png",
"sizes": "256x256",
"type": "image/png"
},
{
"src": "/chat/huggingchat/icon-512x512.png",
"sizes": "512x512",
"type": "image/png"
}
]
}
|
0
|
hf_public_repos/chat-ui
|
hf_public_repos/chat-ui/.vscode/launch.json
|
{
"version": "0.2.0",
"configurations": [
{
"command": "npm run dev",
"name": "Run development server",
"request": "launch",
"type": "node-terminal"
}
]
}
|
0
|
hf_public_repos/chat-ui
|
hf_public_repos/chat-ui/.vscode/settings.json
|
{
"editor.formatOnSave": true,
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.codeActionsOnSave": {
"source.fixAll": "explicit"
},
"eslint.validate": ["javascript", "svelte"],
"[svelte]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
}
}
|
0
|
hf_public_repos/chat-ui
|
hf_public_repos/chat-ui/scripts/updateLocalEnv.ts
|
import fs from "fs";
import yaml from "js-yaml";
const file = fs.readFileSync("chart/env/prod.yaml", "utf8");
// have to do a weird stringify/parse because of some node error
const prod = JSON.parse(JSON.stringify(yaml.load(file)));
const vars = prod.envVars as Record<string, string>;
let PUBLIC_CONFIG = "";
Object.entries(vars).forEach(([key, value]) => {
PUBLIC_CONFIG += `${key}=\`${value}\`\n`;
});
const SECRET_CONFIG =
(fs.existsSync(".env.SECRET_CONFIG")
? fs.readFileSync(".env.SECRET_CONFIG", "utf8")
: process.env.SECRET_CONFIG) ?? "";
// Prepend the content of the env variable SECRET_CONFIG
const full_config = `${PUBLIC_CONFIG}\n${SECRET_CONFIG}`;
// Write full_config to .env.local
fs.writeFileSync(".env.local", full_config);
|
0
|
hf_public_repos/chat-ui
|
hf_public_repos/chat-ui/scripts/setupTest.ts
|
import { vi, afterAll } from "vitest";
import dotenv from "dotenv";
import { resolve } from "path";
import fs from "fs";
import { MongoMemoryServer } from "mongodb-memory-server";
let mongoServer: MongoMemoryServer;
// Load the .env file
const envPath = resolve(__dirname, "../.env");
dotenv.config({ path: envPath });
// Read the .env file content
const envContent = fs.readFileSync(envPath, "utf-8");
// Parse the .env content
const envVars = dotenv.parse(envContent);
// Separate public and private variables
const publicEnv = {};
const privateEnv = {};
for (const [key, value] of Object.entries(envVars)) {
if (key.startsWith("PUBLIC_")) {
publicEnv[key] = value;
} else {
privateEnv[key] = value;
}
}
vi.mock("$env/dynamic/public", () => ({
env: publicEnv,
}));
vi.mock("$env/dynamic/private", async () => {
mongoServer = await MongoMemoryServer.create();
return {
env: {
...privateEnv,
MONGODB_URL: mongoServer.getUri(),
},
};
});
afterAll(async () => {
if (mongoServer) {
await mongoServer.stop();
}
});
|
0
|
hf_public_repos/chat-ui
|
hf_public_repos/chat-ui/scripts/populate.ts
|
import readline from "readline";
import minimist from "minimist";
// @ts-expect-error: vite-node makes the var available but the typescript compiler doesn't see them
import { env } from "$env/dynamic/private";
import { faker } from "@faker-js/faker";
import { ObjectId } from "mongodb";
// @ts-expect-error: vite-node makes the var available but the typescript compiler doesn't see them
import { collections } from "$lib/server/database";
import { models } from "../src/lib/server/models.ts";
import type { User } from "../src/lib/types/User";
import type { Assistant } from "../src/lib/types/Assistant";
import type { Conversation } from "../src/lib/types/Conversation";
import type { Settings } from "../src/lib/types/Settings";
import type { CommunityToolDB, ToolLogoColor, ToolLogoIcon } from "../src/lib/types/Tool";
import { defaultEmbeddingModel } from "../src/lib/server/embeddingModels.ts";
import { Message } from "../src/lib/types/Message.ts";
import { addChildren } from "../src/lib/utils/tree/addChildren.ts";
import { generateSearchTokens } from "../src/lib/utils/searchTokens.ts";
import { ReviewStatus } from "../src/lib/types/Review.ts";
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout,
});
rl.on("close", function () {
process.exit(0);
});
const possibleFlags = ["reset", "all", "users", "settings", "assistants", "conversations", "tools"];
const argv = minimist(process.argv.slice(2));
const flags = argv["_"].filter((flag) => possibleFlags.includes(flag));
async function generateMessages(preprompt?: string): Promise<Message[]> {
const isLinear = faker.datatype.boolean(0.5);
const isInterrupted = faker.datatype.boolean(0.05);
const messages: Message[] = [];
messages.push({
id: crypto.randomUUID(),
from: "system",
content: preprompt ?? "",
createdAt: faker.date.recent({ days: 30 }),
updatedAt: faker.date.recent({ days: 30 }),
});
let isUser = true;
let lastId = messages[0].id;
if (isLinear) {
const convLength = faker.number.int({ min: 1, max: 25 }) * 2; // must always be even
for (let i = 0; i < convLength; i++) {
lastId = addChildren(
{
messages,
rootMessageId: messages[0].id,
},
{
from: isUser ? "user" : "assistant",
content: faker.lorem.sentence({
min: 10,
max: isUser ? 50 : 200,
}),
createdAt: faker.date.recent({ days: 30 }),
updatedAt: faker.date.recent({ days: 30 }),
interrupted: i === convLength - 1 && isInterrupted,
},
lastId
);
isUser = !isUser;
}
} else {
const convLength = faker.number.int({ min: 2, max: 200 });
for (let i = 0; i < convLength; i++) {
addChildren(
{
messages,
rootMessageId: messages[0].id,
},
{
from: isUser ? "user" : "assistant",
content: faker.lorem.sentence({
min: 10,
max: isUser ? 50 : 200,
}),
createdAt: faker.date.recent({ days: 30 }),
updatedAt: faker.date.recent({ days: 30 }),
interrupted: i === convLength - 1 && isInterrupted,
},
faker.helpers.arrayElement([
messages[0].id,
...messages.filter((m) => m.from === (isUser ? "assistant" : "user")).map((m) => m.id),
])
);
isUser = !isUser;
}
}
return messages;
}
async function seed() {
console.log("Seeding...");
const modelIds = models.map((model) => model.id);
if (flags.includes("reset")) {
console.log("Starting reset of DB");
await collections.users.deleteMany({});
await collections.settings.deleteMany({});
await collections.assistants.deleteMany({});
await collections.conversations.deleteMany({});
await collections.tools.deleteMany({});
await collections.migrationResults.deleteMany({});
await collections.semaphores.deleteMany({});
console.log("Reset done");
}
if (flags.includes("users") || flags.includes("all")) {
console.log("Creating 100 new users");
const newUsers: User[] = Array.from({ length: 100 }, () => ({
_id: new ObjectId(),
createdAt: faker.date.recent({ days: 30 }),
updatedAt: faker.date.recent({ days: 30 }),
username: faker.internet.userName(),
name: faker.person.fullName(),
hfUserId: faker.string.alphanumeric(24),
avatarUrl: faker.image.avatar(),
}));
await collections.users.insertMany(newUsers);
console.log("Done creating users.");
}
const users = await collections.users.find().toArray();
if (flags.includes("settings") || flags.includes("all")) {
console.log("Updating settings for all users");
users.forEach(async (user) => {
const settings: Settings = {
userId: user._id,
shareConversationsWithModelAuthors: faker.datatype.boolean(0.25),
hideEmojiOnSidebar: faker.datatype.boolean(0.25),
ethicsModalAcceptedAt: faker.date.recent({ days: 30 }),
activeModel: faker.helpers.arrayElement(modelIds),
createdAt: faker.date.recent({ days: 30 }),
updatedAt: faker.date.recent({ days: 30 }),
disableStream: faker.datatype.boolean(0.25),
directPaste: faker.datatype.boolean(0.25),
customPrompts: {},
assistants: [],
};
await collections.settings.updateOne(
{ userId: user._id },
{ $set: { ...settings } },
{ upsert: true }
);
});
console.log("Done updating settings.");
}
if (flags.includes("assistants") || flags.includes("all")) {
console.log("Creating assistants for all users");
await Promise.all(
users.map(async (user) => {
const name = faker.animal.insect();
const assistants = faker.helpers.multiple<Assistant>(
() => ({
_id: new ObjectId(),
name,
createdById: user._id,
createdByName: user.username,
createdAt: faker.date.recent({ days: 30 }),
updatedAt: faker.date.recent({ days: 30 }),
userCount: faker.number.int({ min: 1, max: 100000 }),
review: faker.helpers.enumValue(ReviewStatus),
modelId: faker.helpers.arrayElement(modelIds),
description: faker.lorem.sentence(),
preprompt: faker.hacker.phrase(),
exampleInputs: faker.helpers.multiple(() => faker.lorem.sentence(), {
count: faker.number.int({ min: 0, max: 4 }),
}),
searchTokens: generateSearchTokens(name),
last24HoursCount: faker.number.int({ min: 0, max: 1000 }),
}),
{ count: faker.number.int({ min: 3, max: 10 }) }
);
await collections.assistants.insertMany(assistants);
await collections.settings.updateOne(
{ userId: user._id },
{ $set: { assistants: assistants.map((a) => a._id.toString()) } },
{ upsert: true }
);
})
);
console.log("Done creating assistants.");
}
if (flags.includes("conversations") || flags.includes("all")) {
console.log("Creating conversations for all users");
await Promise.all(
users.map(async (user) => {
const conversations = faker.helpers.multiple(
async () => {
const settings = await collections.settings.findOne<Settings>({ userId: user._id });
const assistantId =
settings?.assistants && settings.assistants.length > 0 && faker.datatype.boolean(0.1)
? faker.helpers.arrayElement<ObjectId>(settings.assistants)
: undefined;
const preprompt =
(assistantId
? await collections.assistants
.findOne({ _id: assistantId })
.then((assistant: Assistant) => assistant?.preprompt ?? "")
: faker.helpers.maybe(() => faker.hacker.phrase(), { probability: 0.5 })) ?? "";
const messages = await generateMessages(preprompt);
const conv = {
_id: new ObjectId(),
userId: user._id,
assistantId,
preprompt,
createdAt: faker.date.recent({ days: 145 }),
updatedAt: faker.date.recent({ days: 145 }),
model: faker.helpers.arrayElement(modelIds),
title: faker.internet.emoji() + " " + faker.hacker.phrase(),
embeddingModel: defaultEmbeddingModel.id,
messages,
rootMessageId: messages[0].id,
} satisfies Conversation;
return conv;
},
{ count: faker.number.int({ min: 10, max: 200 }) }
);
await collections.conversations.insertMany(await Promise.all(conversations));
})
);
console.log("Done creating conversations.");
}
// generate Community Tools
if (flags.includes("tools") || flags.includes("all")) {
const tools = await Promise.all(
faker.helpers.multiple(
() => {
const _id = new ObjectId();
const displayName = faker.company.catchPhrase();
const description = faker.company.catchPhrase();
const color = faker.helpers.arrayElement([
"purple",
"blue",
"green",
"yellow",
"red",
]) satisfies ToolLogoColor;
const icon = faker.helpers.arrayElement([
"wikis",
"tools",
"camera",
"code",
"email",
"cloud",
"terminal",
"game",
"chat",
"speaker",
"video",
]) satisfies ToolLogoIcon;
const baseUrl = faker.helpers.arrayElement([
"stabilityai/stable-diffusion-3-medium",
"multimodalart/cosxl",
"gokaygokay/SD3-Long-Captioner",
"xichenhku/MimicBrush",
]);
// keep empty for populate for now
const user: User = faker.helpers.arrayElement(users);
const createdById = user._id;
const createdByName = user.username ?? user.name;
return {
type: "community" as const,
_id,
createdById,
createdByName,
displayName,
name: displayName.toLowerCase().replace(" ", "_"),
endpoint: "/test",
description,
color,
icon,
baseUrl,
inputs: [],
outputPath: null,
outputType: "str" as const,
showOutput: false,
useCount: faker.number.int({ min: 0, max: 100000 }),
last24HoursUseCount: faker.number.int({ min: 0, max: 1000 }),
createdAt: faker.date.recent({ days: 30 }),
updatedAt: faker.date.recent({ days: 30 }),
searchTokens: generateSearchTokens(displayName),
review: faker.helpers.enumValue(ReviewStatus),
outputComponent: null,
outputComponentIdx: null,
};
},
{ count: faker.number.int({ min: 10, max: 200 }) }
)
);
await collections.tools.insertMany(tools satisfies CommunityToolDB[]);
}
}
// run seed
(async () => {
try {
rl.question(
"You're about to run a seeding script on the following MONGODB_URL: \x1b[31m" +
env.MONGODB_URL +
"\x1b[0m\n\n With the following flags: \x1b[31m" +
flags.join("\x1b[0m , \x1b[31m") +
"\x1b[0m\n \n\n Are you sure you want to continue? (yes/no): ",
async (confirm) => {
if (confirm !== "yes") {
console.log("Not 'yes', exiting.");
rl.close();
process.exit(0);
}
console.log("Starting seeding...");
await seed();
console.log("Seeding done.");
rl.close();
}
);
} catch (e) {
console.error(e);
process.exit(1);
}
})();
|
0
|
hf_public_repos
|
hf_public_repos/api-inference-community/setup.cfg
|
[isort]
default_section = FIRSTPARTY
ensure_newline_before_comments = True
force_grid_wrap = 0
line_length = 88
include_trailing_comma = True
known_first_party = main
lines_after_imports = 2
multi_line_output = 3
use_parentheses = True
[flake8]
ignore = E203, E501, E741, W503, W605
max-line-length = 88
per-file-ignores = __init__.py:F401
|
0
|
hf_public_repos
|
hf_public_repos/api-inference-community/setup.py
|
from setuptools import setup
setup(
name="api_inference_community",
version="0.0.36",
description="A package with helper tools to build an API Inference docker app for Hugging Face API inference using huggingface_hub",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
url="http://github.com/huggingface/api-inference-community",
author="Nicolas Patry",
author_email="nicolas@huggingface.co",
license="MIT",
packages=["api_inference_community"],
python_requires=">=3.6.0",
zip_safe=False,
install_requires=list(line for line in open("requirements.txt", "r")),
extras_require={
"test": [
"httpx>=0.18",
"Pillow>=8.2",
"httpx>=0.18",
"torch>=1.9.0",
"pytest>=6.2",
],
"quality": ["black==22.3.0", "isort", "flake8", "mypy"],
},
)
|
0
|
hf_public_repos
|
hf_public_repos/api-inference-community/Makefile
|
.PHONY: quality style
check_dirs := api_inference_community tests docker_images
quality:
black --check $(check_dirs)
isort --check-only $(check_dirs)
flake8 $(check_dirs)
style:
black $(check_dirs)
isort $(check_dirs)
test:
pytest -sv --log-level=DEBUG tests/
|
0
|
hf_public_repos
|
hf_public_repos/api-inference-community/LICENSE
|
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
|
0
|
hf_public_repos
|
hf_public_repos/api-inference-community/requirements.txt
|
starlette>=0.14.2
numpy>=1.18.0
pydantic>=2
parameterized>=0.8.1
pillow>=8.2.0
huggingface_hub>=0.20.2
datasets>=2.2
psutil>=6.0.0
pytest
httpx
uvicorn
black
isort
flake8
|
0
|
hf_public_repos
|
hf_public_repos/api-inference-community/MANIFEST.in
|
include README.md requirements.txt
|
0
|
hf_public_repos
|
hf_public_repos/api-inference-community/README.md
|
This repositories enable third-party libraries integrated with [huggingface_hub](https://github.com/huggingface/huggingface_hub/) to create
their own docker so that the widgets on the hub can work as the `transformers` one do.
The hardware to run the API will be provided by Hugging Face for now.
The `docker_images/common` folder is intended to be a starter point for all new libs that
want to be integrated.
### Adding a new container from a new lib.
1. Copy the `docker_images/common` folder into your library's name `docker_images/example`.
2. Edit:
- `docker_images/example/requirements.txt`
- `docker_images/example/app/main.py`
- `docker_images/example/app/pipelines/{task_name}.py`
to implement the desired functionality. All required code is marked with `IMPLEMENT_THIS` markup.
3. Remove:
- Any pipeline files in `docker_images/example/app/pipelines/` that are not used.
- Any tests associated with deleted pipelines in `docker_images/example/tests`.
- Any imports of the pipelines you deleted from `docker_images/example/app/pipelines/__init__.py`
4. Feel free to customize anything required by your lib everywhere you want. The only real requirements, are to honor the HTTP endpoints, in the same fashion as the `common` folder for all your supported tasks.
5. Edit `example/tests/test_api.py` to add TESTABLE_MODELS.
6. Pass the test suite `pytest -sv --rootdir docker_images/example/ docker_images/example/`
7. Submit your PR and enjoy !
### Going the full way
Doing the first 7 steps is good enough to get started, however in the process
you can anticipate some problems corrections early on. Maintainers will help you
along the way if you don't feel confident to follow those steps yourself
1. Test your creation within a docker
```python
./manage.py docker MY_MODEL
```
should work and responds on port 8000. `curl -X POST -d "test" http://localhost:8000` for instance if
the pipeline deals with simple text.
If it doesn't work out of the box and/or docker is slow for some reason you
can test locally (using your local python environment) with :
`./manage.py start MY_MODEL`
2. Test your docker uses cache properly.
When doing subsequent docker launch with the same model_id, the docker should start up very fast and not redownload the whole model file. If you see the model/repo being downloaded over and over, it means the cache is not being used correctly.
You can edit the `docker_images/{framework}/Dockerfile` and add an environment variable (by default it assumes `HUGGINGFACE_HUB_CACHE`), or your code directly to put
the model files in the `/data` folder.
3. Add a docker test.
Edit the `tests/test_dockers.py` file to add a new test with your new framework
in it (`def test_{framework}(self):` for instance). As a basic you should have 1 line per task in this test function with a real working model on the hub. Those tests are relatively slow but will check automatically that correct errors are replied by your API and that the cache works properly. To run those tests your can simply do:
```bash
RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_{framework}
```
### Modifying files within `api-inference-community/{routes,validation,..}.py`.
If you ever come across a bug within `api-inference-community/` package or want to update it
the development process is slightly more involved.
- First, make sure you need to change this package, each framework is very autonomous
so if your code can get away by being standalone go that way first as it's much simpler.
- If you can make the change only in `api-inference-community` without depending on it
that's also a great option. Make sure to add the proper tests to your PR.
- Finally, the best way to go is to develop locally using `manage.py` command:
- Do the necessary modifications within `api-inference-community` first.
- Install it locally in your environment with `pip install -e .`
- Install your package dependencies locally.
- Run your webserver locally: `./manage.py start --framework example --task audio-source-separation --model-id MY_MODEL`
- When everything is working, you will need to split your PR in two, 1 for the `api-inference-community` part.
The second one will be for your package specific modifications and will only land once the `api-inference-community` tag has landed.
- This workflow is still work in progress, don't hesitate to ask questions to maintainers.
Another similar command `./manage.py docker --framework example --task audio-source-separation --model-id MY_MODEL`
Will launch the server, but this time in a protected, controlled docker environment making sure the behavior
will be exactly the one in the API.
### Available tasks
- **Automatic speech recognition**: Input is a file, output is a dict of understood words being said within the file
- **Text generation**: Input is a text, output is a dict of generated text
- **Image recognition**: Input is an image, output is a dict of generated text
- **Question answering**: Input is a question + some context, output is a dict containing necessary information to locate the answer to the `question` within the `context`.
- **Audio source separation**: Input is some audio, and the output is n audio files that sum up to the original audio but contain individual sources of sound (either speakers or instruments for instant).
- **Token classification**: Input is some text, and the output is a list of entities mentioned in the text. Entities can be anything remarkable like locations, organisations, persons, times etc...
- **Text to speech**: Input is some text, and the output is an audio file saying the text...
- **Sentence Similarity**: Input is some sentence and a list of reference sentences, and the list of similarity scores.
|
0
|
hf_public_repos
|
hf_public_repos/api-inference-community/build_docker.py
|
#!/usr/bin/env python
import argparse
import os
import subprocess
import sys
import uuid
def run(command):
print(" ".join(command))
p = subprocess.run(command)
if p.returncode != 0:
sys.exit(p.returncode)
def build(framework: str, is_gpu: bool):
DEFAULT_HOSTNAME = os.getenv("DEFAULT_HOSTNAME")
hostname = DEFAULT_HOSTNAME
tag_id = str(uuid.uuid4())[:5]
tag = f"{framework}-{tag_id}"
container_tag = f"{hostname}/api-inference/community:{tag}"
command = ["docker", "build", f"docker_images/{framework}", "-t", container_tag]
run(command)
password = os.environ["REGISTRY_PASSWORD"]
username = os.environ["REGISTRY_USERNAME"]
command = ["echo", password]
ecr_login = subprocess.Popen(command, stdout=subprocess.PIPE)
docker_login = subprocess.Popen(
["docker", "login", "-u", username, "--password-stdin", hostname],
stdin=ecr_login.stdout,
stdout=subprocess.PIPE,
)
docker_login.communicate()
command = ["docker", "push", container_tag]
run(command)
return tag
def main():
frameworks = {
dirname for dirname in os.listdir("docker_images") if dirname != "common"
}
framework_choices = frameworks.copy()
framework_choices.add("all")
parser = argparse.ArgumentParser()
parser.add_argument(
"framework",
type=str,
choices=framework_choices,
help="Which framework image to build.",
)
parser.add_argument(
"--out",
type=str,
help="Where to store the new tags",
)
parser.add_argument(
"--gpu",
action="store_true",
help="Build the GPU version of the model",
)
args = parser.parse_args()
branch = (
subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"])
.decode("utf-8")
.strip()
)
if branch != "main":
raise Exception(f"Go to branch `main` ({branch})")
print("Pulling")
subprocess.run(["git", "pull"])
if args.framework == "all":
outputs = []
for framework in frameworks:
tag = build(framework, args.gpu)
outputs.append((framework, tag))
else:
tag = build(args.framework, args.gpu)
outputs = [(args.framework, tag)]
for (framework, tag) in outputs:
compute = "GPU" if args.gpu else "CPU"
name = f"{framework.upper()}_{compute}_TAG"
print(name, tag)
if args.out:
with open(args.out, "w") as f:
f.write(f"{name}={tag}\n")
if __name__ == "__main__":
main()
|
0
|
hf_public_repos
|
hf_public_repos/api-inference-community/manage.py
|
#!/usr/bin/env python
import argparse
import ast
import hashlib
import os
import subprocess
import sys
import uuid
from huggingface_hub import HfApi
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerPopen(subprocess.Popen):
def __exit__(self, exc_type, exc_val, traceback):
self.terminate()
self.wait(5)
return super().__exit__(exc_type, exc_val, traceback)
def create_docker(name: str, is_gpu: bool) -> str:
rand = str(uuid.uuid4())[:5]
tag = f"{name}:{rand}"
with cd(
os.path.join(os.path.dirname(os.path.normpath(__file__)), "docker_images", name)
):
subprocess.run(["docker", "build", ".", "-t", tag])
return tag
def resolve_dataset(args, task: str):
import datasets
builder = datasets.load_dataset_builder(
args.dataset_name, use_auth_token=args.token
)
if args.dataset_config is None:
args.dataset_config = builder.config_id
print(f"Inferred dataset_config {args.dataset_config}")
splits = builder.info.splits
if splits is not None:
if args.dataset_split not in splits:
raise ValueError(
f"The split `{args.dataset_split}` is not a valid split, please choose from {','.join(splits.keys())}"
)
task_templates = builder.info.task_templates
if task_templates is not None:
for task_template in task_templates:
if task_template.task == task:
args.dataset_column = task_template.audio_file_path_column
print(f"Inferred dataset_column {args.dataset_column}")
return (
args.dataset_name,
args.dataset_config,
args.dataset_split,
args.dataset_column,
)
def get_repo_name(model_id: str, dataset_name: str) -> str:
# Hash needs to have the fully qualified name to disambiguate.
hash_ = hashlib.md5((model_id + dataset_name).encode("utf-8")).hexdigest()
model_name = model_id.split("/")[-1]
dataset_name = dataset_name.split("/")[-1]
return f"bulk-{model_name[:10]}-{dataset_name[:10]}-{hash_[:5]}"
def show(args):
directory = os.path.join(
os.path.dirname(os.path.normpath(__file__)), "docker_images"
)
for framework in sorted(os.listdir(directory)):
print(f"{framework}")
local_path = os.path.join(
os.path.dirname(os.path.normpath(__file__)),
"docker_images",
framework,
"app",
"main.py",
)
# Using ast to prevent import issues with missing dependencies.
# and slow loads.
with open(local_path, "r") as source:
tree = ast.parse(source.read())
for item in tree.body:
if (
isinstance(item, ast.AnnAssign)
and item.target.id == "ALLOWED_TASKS"
):
for key in item.value.keys:
print(" " * 4, key.value)
def resolve(model_id: str) -> [str, str]:
try:
info = HfApi().model_info(model_id)
except Exception as e:
raise ValueError(
f"The hub has no information on {model_id}, does it exist: {e}"
)
try:
task = info.pipeline_tag
except Exception:
raise ValueError(
f"The hub has no `pipeline_tag` on {model_id}, you can set it in the `README.md` yaml header"
)
try:
framework = info.library_name
except Exception:
raise ValueError(
f"The hub has no `library_name` on {model_id}, you can set it in the `README.md` yaml header"
)
return task, framework.replace("-", "_")
def resolve_task_framework(args):
model_id = args.model
task = args.task
framework = args.framework
if task is None or framework is None:
rtask, rframework = resolve(model_id)
if task is None:
task = rtask
print(f"Inferred task : {task}")
if framework is None:
framework = rframework
print(f"Inferred framework : {framework}")
return model_id, task, framework
def start(args):
import uvicorn
model_id, task, framework = resolve_task_framework(args)
local_path = os.path.join(
os.path.dirname(os.path.normpath(__file__)), "docker_images", framework
)
sys.path.append(local_path)
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = task
if args.gpu:
os.environ["COMPUTE_TYPE"] = "gpu"
uvicorn.run("app.main:app", host="127.0.0.1", port=8000, log_level="info")
def docker(args):
model_id, task, framework = resolve_task_framework(args)
tag = create_docker(framework, is_gpu=args.gpu)
gpu = ["--gpus", "all"] if args.gpu else []
run_docker_command = [
"docker",
"run",
*gpu,
"-p",
"8000:80",
"-e",
f"TASK={task}",
"-e",
f"MODEL_ID={model_id}",
"-e",
f"COMPUTE_TYPE={'gpu' if args.gpu else 'cpu'}",
"-e",
f"DEBUG={os.getenv('DEBUG', '0')}",
"-v",
"/tmp:/data",
"-t",
tag,
]
print(" ".join(run_docker_command))
with DockerPopen(run_docker_command) as proc:
try:
proc.wait()
except KeyboardInterrupt:
proc.terminate()
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
parser_start = subparsers.add_parser(
"start", help="Start a local version of a model inference"
)
parser_start.add_argument(
"model",
type=str,
help="Which model_id to start.",
)
parser_start.add_argument(
"--task",
type=str,
help="Which task to load",
)
parser_start.add_argument(
"--framework",
type=str,
help="Which framework to load",
)
parser_start.add_argument(
"--gpu",
action="store_true",
help="Using gpu ?",
)
parser_start.set_defaults(func=start)
parser_docker = subparsers.add_parser(
"docker", help="Start a docker version of a model inference"
)
parser_docker.add_argument(
"model",
type=str,
help="Which model_id to docker.",
)
parser_docker.add_argument(
"--task",
type=str,
help="Which task to load",
)
parser_docker.add_argument(
"--framework",
type=str,
help="Which framework to load",
)
parser_docker.add_argument(
"--gpu",
action="store_true",
help="Using gpu ?",
)
parser_docker.set_defaults(func=docker)
parser_show = subparsers.add_parser(
"show", help="Show dockers and the various pipelines they implement"
)
parser_show.set_defaults(func=show)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
|
0
|
hf_public_repos
|
hf_public_repos/api-inference-community/.pre-commit-config.yaml
|
repos:
- repo: https://github.com/psf/black
rev: 22.3.0
hooks:
- id: black
- repo: https://github.com/PyCQA/flake8
rev: 5.0.4
hooks:
- id: flake8
- repo: https://github.com/pre-commit/mirrors-isort
rev: v5.7.0 # Use the revision sha / tag you want to point at
hooks:
- id: isort
- repo: https://github.com/pre-commit/mirrors-mypy
rev: "b84ce099a2fd3c5216b6ccf3fd176c3828b075fb" # Use the sha / tag you want to point at
hooks:
- id: mypy
args: [--ignore-missing-imports]
additional_dependencies: [tokenize-rt==3.2.0]
files: ^docker_images/common/
entry: mypy docker_images/common/
pass_filenames: false
- id: mypy
args: [--ignore-missing-imports]
additional_dependencies: [tokenize-rt==3.2.0]
files: ^docker_images/speechbrain/
entry: mypy docker_images/speechbrain/
pass_filenames: false
- id: mypy
args: [--ignore-missing-imports]
additional_dependencies: [tokenize-rt==3.2.0]
files: ^docker_images/kasteroid/
entry: mypy docker_images/asteroid/
pass_filenames: false
- id: mypy
args: [--ignore-missing-imports]
additional_dependencies: [tokenize-rt==3.2.0]
files: ^docker_images/allennlp/
entry: mypy docker_images/allennlp/
pass_filenames: false
- id: mypy
args: [--ignore-missing-imports]
additional_dependencies: [tokenize-rt==3.2.0]
files: ^docker_images/espnet/
entry: mypy docker_images/espnet/
pass_filenames: false
- id: mypy
args: [--ignore-missing-imports]
additional_dependencies: [tokenize-rt==3.2.0]
files: ^docker_images/timm/
entry: mypy docker_images/timm/
pass_filenames: false
- id: mypy
args: [--ignore-missing-imports]
additional_dependencies: [tokenize-rt==3.2.0]
files: ^docker_images/flair/
entry: mypy docker_images/flair/
pass_filenames: false
- id: mypy
args: [--ignore-missing-imports]
additional_dependencies: [tokenize-rt==3.2.0]
files: ^docker_images/sentence_transformers/
entry: mypy docker_images/sentence_transformers/
pass_filenames: false
|
0
|
hf_public_repos
|
hf_public_repos/api-inference-community/build.sh
|
pip install -U pip build twine
python -m build
python -m twine upload dist/*
|
0
|
hf_public_repos/api-inference-community
|
hf_public_repos/api-inference-community/api_inference_community/normalizers.py
|
"""
Helper classes to modify pipeline outputs from tensors to expected pipeline output
"""
from typing import TYPE_CHECKING, Dict, List, Union
Classes = Dict[str, Union[str, float]]
if TYPE_CHECKING:
try:
import torch
except Exception:
pass
def speaker_diarization_normalize(
tensor: "torch.Tensor", sampling_rate: int, classnames: List[str]
) -> List[Classes]:
N = tensor.shape[1]
if len(classnames) != N:
raise ValueError(
f"There is a mismatch between classnames ({len(classnames)}) and number of speakers ({N})"
)
classes = []
for i in range(N):
values, counts = tensor[:, i].unique_consecutive(return_counts=True)
offset = 0
for v, c in zip(values, counts):
if v == 1:
classes.append(
{
"class": classnames[i],
"start": offset / sampling_rate,
"end": (offset + c.item()) / sampling_rate,
}
)
offset += c.item()
classes = sorted(classes, key=lambda x: x["start"])
return classes
|
0
|
hf_public_repos/api-inference-community
|
hf_public_repos/api-inference-community/api_inference_community/hub.py
|
import json
import logging
import os
import pathlib
import re
from typing import List, Optional
from huggingface_hub import ModelCard, constants, hf_api, try_to_load_from_cache
from huggingface_hub.file_download import repo_folder_name
logger = logging.getLogger(__name__)
def _cached_repo_root_path(cache_dir: pathlib.Path, repo_id: str) -> pathlib.Path:
folder = pathlib.Path(repo_folder_name(repo_id=repo_id, repo_type="model"))
return cache_dir / folder
def cached_revision_path(cache_dir, repo_id, revision) -> pathlib.Path:
error_msg = f"No revision path found for {repo_id}, revision {revision}"
if revision is None:
revision = "main"
repo_cache = _cached_repo_root_path(cache_dir, repo_id)
if not repo_cache.is_dir():
msg = f"Local repo {repo_cache} does not exist"
logger.error(msg)
raise Exception(msg)
refs_dir = repo_cache / "refs"
snapshots_dir = repo_cache / "snapshots"
# Resolve refs (for instance to convert main to the associated commit sha)
if refs_dir.is_dir():
revision_file = refs_dir / revision
if revision_file.exists():
with revision_file.open() as f:
revision = f.read()
# Check if revision folder exists
if not snapshots_dir.exists():
msg = f"No local revision path {snapshots_dir} found for {repo_id}, revision {revision}"
logger.error(msg)
raise Exception(msg)
cached_shas = os.listdir(snapshots_dir)
if revision not in cached_shas:
# No cache for this revision and we won't try to return a random revision
logger.error(error_msg)
raise Exception(error_msg)
return snapshots_dir / revision
def _build_offline_model_info(
repo_id: str, cache_dir: pathlib.Path, revision: str
) -> hf_api.ModelInfo:
logger.info("Rebuilding offline model info for repo %s", repo_id)
# Let's rebuild some partial model info from what we see in cache, info extracted should be enough
# for most use cases
card_path = try_to_load_from_cache(
repo_id=repo_id,
filename="README.md",
cache_dir=cache_dir,
revision=revision,
)
if not isinstance(card_path, str):
raise Exception(
"Unable to rebuild offline model info, no README could be found"
)
card_path = pathlib.Path(card_path)
logger.debug("Loading model card from model readme %s", card_path)
model_card = ModelCard.load(card_path)
card_data = model_card.data.to_dict()
repo = card_path.parent
logger.debug("Repo path %s", repo)
siblings = _build_offline_siblings(repo)
model_info = hf_api.ModelInfo(
private=False,
downloads=0,
likes=0,
id=repo_id,
card_data=card_data,
siblings=siblings,
**card_data,
)
logger.info("Offline model info for repo %s: %s", repo, model_info)
return model_info
def _build_offline_siblings(repo: pathlib.Path) -> List[dict]:
siblings = []
prefix_pattern = re.compile(r"^" + re.escape(str(repo)) + r"(.*)$")
for root, dirs, files in os.walk(repo):
for file in files:
filepath = os.path.join(root, file)
size = os.stat(filepath).st_size
m = prefix_pattern.match(filepath)
if not m:
msg = (
f"File {filepath} does not match expected pattern {prefix_pattern}"
)
logger.error(msg)
raise Exception(msg)
filepath = m.group(1)
filepath = filepath.strip(os.sep)
sibling = dict(rfilename=filepath, size=size)
siblings.append(sibling)
return siblings
def _cached_model_info(
repo_id: str, revision: str, cache_dir: pathlib.Path
) -> hf_api.ModelInfo:
"""
Looks for a json file containing prefetched model info in the revision path.
If none found we just rebuild model info with the local directory files.
Note that this file is not automatically created by hub_download/snapshot_download.
It is just a convenience we add here, just in case the offline info we rebuild from
the local directories would not cover all use cases.
"""
revision_path = cached_revision_path(cache_dir, repo_id, revision)
model_info_basename = "hub_model_info.json"
model_info_path = revision_path / model_info_basename
logger.info("Checking if there are some cached model info at %s", model_info_path)
if os.path.exists(model_info_path):
with open(model_info_path, "r") as f:
o = json.load(f)
r = hf_api.ModelInfo(**o)
logger.debug("Cached model info from file: %s", r)
else:
logger.debug(
"No cached model info file %s found, "
"rebuilding partial model info from cached model files",
model_info_path,
)
# Let's rebuild some partial model info from what we see in cache, info extracted should be enough
# for most use cases
r = _build_offline_model_info(repo_id, cache_dir, revision)
return r
def hub_model_info(
repo_id: str,
revision: Optional[str] = None,
cache_dir: Optional[pathlib.Path] = None,
**kwargs,
) -> hf_api.ModelInfo:
"""
Get Hub model info with offline support
"""
if revision is None:
revision = "main"
if not constants.HF_HUB_OFFLINE:
return hf_api.model_info(repo_id=repo_id, revision=revision, **kwargs)
logger.info("Model info for offline mode")
if cache_dir is None:
cache_dir = pathlib.Path(constants.HF_HUB_CACHE)
return _cached_model_info(repo_id, revision, cache_dir)
|
0
|
hf_public_repos/api-inference-community
|
hf_public_repos/api-inference-community/api_inference_community/routes.py
|
import base64
import io
import ipaddress
import logging
import os
import time
from typing import Any, Dict
import psutil
from api_inference_community.validation import (
AUDIO,
AUDIO_INPUTS,
IMAGE,
IMAGE_INPUTS,
IMAGE_OUTPUTS,
KNOWN_TASKS,
ffmpeg_convert,
normalize_payload,
parse_accept,
)
from pydantic import ValidationError
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
HF_HEADER_COMPUTE_TIME = "x-compute-time"
HF_HEADER_COMPUTE_TYPE = "x-compute-type"
COMPUTE_TYPE = os.getenv("COMPUTE_TYPE", "cpu")
logger = logging.getLogger(__name__)
def already_left(request: Request) -> bool:
"""
Check if the caller has already left without waiting for the answer to come. This can help during burst to relieve
the pressure on the worker by cancelling jobs whose results don't matter as they won't be fetched anyway
:param request:
:return: bool
"""
# NOTE: Starlette method request.is_disconnected is totally broken, consumes the payload, does not return
# the correct status. So we use the good old way to identify if the caller is still there.
# In any case, if we are not sure, we return False
logger.info("Checking if request caller already left")
try:
client = request.client
host = client.host
if not host:
return False
port = int(client.port)
host = ipaddress.ip_address(host)
if port <= 0 or port > 65535:
logger.warning("Unexpected source port format for caller %s", port)
return False
counter = 0
for connection in psutil.net_connections(kind="tcp"):
counter += 1
if connection.status != "ESTABLISHED":
continue
if not connection.raddr:
continue
if int(connection.raddr.port) != port:
continue
if (
not connection.raddr.ip
or ipaddress.ip_address(connection.raddr.ip) != host
):
continue
logger.info(
"Found caller connection still established, caller is most likely still there, %s",
connection,
)
return False
except Exception as e:
logger.warning(
"Unexpected error while checking if caller already left, assuming still there"
)
logger.exception(e)
return False
logger.info(
"%d connections checked. No connection found matching to the caller, probably left",
counter,
)
return True
async def pipeline_route(request: Request) -> Response:
start = time.time()
task = os.environ["TASK"]
# Shortcut: quickly check the task is in enum: no need to go any further otherwise, as we know for sure that
# normalize_payload will fail below: this avoids us to wait for the pipeline to be loaded to return
if task not in KNOWN_TASKS:
msg = f"The task `{task}` is not recognized by api-inference-community"
logger.error(msg)
# Special case: despite the fact that the task comes from environment (which could be considered a service
# config error, thus triggering a 500), this var indirectly comes from the user
# so we choose to have a 400 here
return JSONResponse({"error": msg}, status_code=400)
if os.getenv("DISCARD_LEFT", "0").lower() in [
"1",
"true",
"yes",
] and already_left(request):
logger.info("Discarding request as the caller already left")
return Response(status_code=204)
payload = await request.body()
if os.getenv("DEBUG", "0") in {"1", "true"}:
pipe = request.app.get_pipeline()
try:
pipe = request.app.get_pipeline()
try:
sampling_rate = pipe.sampling_rate
except Exception:
sampling_rate = None
if task in AUDIO_INPUTS:
msg = f"Sampling rate is expected for model for audio task {task}"
logger.error(msg)
return JSONResponse({"error": msg}, status_code=500)
except Exception as e:
return JSONResponse({"error": str(e)}, status_code=500)
try:
inputs, params = normalize_payload(payload, task, sampling_rate=sampling_rate)
except ValidationError as e:
errors = []
for error in e.errors():
if len(error["loc"]) > 0:
errors.append(
f'{error["msg"]}: received `{error["loc"][0]}` in `parameters`'
)
else:
errors.append(
f'{error["msg"]}: received `{error["input"]}` in `parameters`'
)
return JSONResponse({"error": errors}, status_code=400)
except Exception as e:
# We assume the payload is bad -> 400
logger.warning("Error while parsing input %s", e)
return JSONResponse({"error": str(e)}, status_code=400)
accept = request.headers.get("accept", "")
lora_adapter = request.headers.get("lora")
if lora_adapter:
params["lora_adapter"] = lora_adapter
return call_pipe(pipe, inputs, params, start, accept)
def call_pipe(pipe: Any, inputs, params: Dict, start: float, accept: str) -> Response:
root_logger = logging.getLogger()
warnings = set()
class RequestsHandler(logging.Handler):
def emit(self, record):
"""Send the log records (created by loggers) to
the appropriate destination.
"""
warnings.add(record.getMessage())
handler = RequestsHandler()
handler.setLevel(logging.WARNING)
root_logger.addHandler(handler)
for _logger in logging.root.manager.loggerDict.values(): # type: ignore
try:
_logger.addHandler(handler)
except Exception:
pass
status_code = 200
if os.getenv("DEBUG", "0") in {"1", "true"}:
outputs = pipe(inputs, **params)
try:
outputs = pipe(inputs, **params)
task = os.getenv("TASK")
metrics = get_metric(inputs, task, pipe)
except (AssertionError, ValueError, TypeError) as e:
outputs = {"error": str(e)}
status_code = 400
except Exception as e:
outputs = {"error": "unknown error"}
status_code = 500
logger.error(f"There was an inference error: {e}")
logger.exception(e)
if warnings and isinstance(outputs, dict):
outputs["warnings"] = list(sorted(warnings))
compute_type = COMPUTE_TYPE
headers = {
HF_HEADER_COMPUTE_TIME: "{:.3f}".format(time.time() - start),
HF_HEADER_COMPUTE_TYPE: compute_type,
# https://stackoverflow.com/questions/43344819/reading-response-headers-with-fetch-api/44816592#44816592
"access-control-expose-headers": f"{HF_HEADER_COMPUTE_TYPE}, {HF_HEADER_COMPUTE_TIME}",
}
if status_code == 200:
headers.update(**{k: str(v) for k, v in metrics.items()})
task = os.getenv("TASK")
if task == "text-to-speech":
waveform, sampling_rate = outputs
audio_format = parse_accept(accept, AUDIO)
data = ffmpeg_convert(waveform, sampling_rate, audio_format)
headers["content-type"] = f"audio/{audio_format}"
return Response(data, headers=headers, status_code=status_code)
elif task == "audio-to-audio":
waveforms, sampling_rate, labels = outputs
items = []
headers["content-type"] = "application/json"
audio_format = parse_accept(accept, AUDIO)
for waveform, label in zip(waveforms, labels):
data = ffmpeg_convert(waveform, sampling_rate, audio_format)
items.append(
{
"label": label,
"blob": base64.b64encode(data).decode("utf-8"),
"content-type": f"audio/{audio_format}",
}
)
return JSONResponse(items, headers=headers, status_code=status_code)
elif task in IMAGE_OUTPUTS:
image = outputs
image_format = parse_accept(accept, IMAGE)
buffer = io.BytesIO()
image.save(buffer, format=image_format.upper())
buffer.seek(0)
img_bytes = buffer.read()
return Response(
img_bytes,
headers=headers,
status_code=200,
media_type=f"image/{image_format}",
)
return JSONResponse(
outputs,
headers=headers,
status_code=status_code,
)
def get_metric(inputs, task, pipe):
if task in AUDIO_INPUTS:
return {"x-compute-audio-length": get_audio_length(inputs, pipe.sampling_rate)}
elif task in IMAGE_INPUTS:
return {"x-compute-images": 1}
else:
return {"x-compute-characters": get_input_characters(inputs)}
def get_audio_length(inputs, sampling_rate: int) -> float:
if isinstance(inputs, dict):
# Should only apply for internal AsrLive
length_in_s = inputs["raw"].shape[0] / inputs["sampling_rate"]
else:
length_in_s = inputs.shape[0] / sampling_rate
return length_in_s
def get_input_characters(inputs) -> int:
if isinstance(inputs, str):
return len(inputs)
elif isinstance(inputs, (tuple, list)):
return sum(get_input_characters(input_) for input_ in inputs)
elif isinstance(inputs, dict):
return sum(get_input_characters(input_) for input_ in inputs.values())
return 0
async def status_ok(request):
return JSONResponse({"ok": "ok"})
|
0
|
hf_public_repos/api-inference-community
|
hf_public_repos/api-inference-community/api_inference_community/validation.py
|
import json
import os
import subprocess
from base64 import b64decode
from io import BytesIO
from typing import Any, Dict, List, Optional, Tuple, Union
import annotated_types
import numpy as np
from pydantic import BaseModel, RootModel, Strict, field_validator
from typing_extensions import Annotated
MinLength = Annotated[int, annotated_types.Ge(1), annotated_types.Le(500), Strict()]
MaxLength = Annotated[int, annotated_types.Ge(1), annotated_types.Le(500), Strict()]
TopK = Annotated[int, annotated_types.Ge(1), Strict()]
TopP = Annotated[float, annotated_types.Ge(0.0), annotated_types.Le(1.0), Strict()]
MaxTime = Annotated[float, annotated_types.Ge(0.0), annotated_types.Le(120.0), Strict()]
NumReturnSequences = Annotated[
int, annotated_types.Ge(1), annotated_types.Le(10), Strict()
]
RepetitionPenalty = Annotated[
float, annotated_types.Ge(0.0), annotated_types.Le(100.0), Strict()
]
Temperature = Annotated[
float, annotated_types.Ge(0.0), annotated_types.Le(100.0), Strict()
]
CandidateLabels = Annotated[list, annotated_types.MinLen(1)]
class FillMaskParamsCheck(BaseModel):
top_k: Optional[TopK] = None
class ZeroShotParamsCheck(BaseModel):
candidate_labels: Union[str, CandidateLabels]
multi_label: Optional[bool] = None
class SharedGenerationParams(BaseModel):
min_length: Optional[MinLength] = None
max_length: Optional[MaxLength] = None
top_k: Optional[TopK] = None
top_p: Optional[TopP] = None
max_time: Optional[MaxTime] = None
repetition_penalty: Optional[RepetitionPenalty] = None
temperature: Optional[Temperature] = None
@field_validator("max_length")
def max_length_must_be_larger_than_min_length(
cls, max_length: Optional[MaxLength], values
):
min_length = values.data.get("min_length", 0)
if min_length is None:
min_length = 0
if max_length is not None and max_length < min_length:
raise ValueError("min_length cannot be larger than max_length")
return max_length
class TextGenerationParamsCheck(SharedGenerationParams):
return_full_text: Optional[bool] = None
num_return_sequences: Optional[NumReturnSequences] = None
class SummarizationParamsCheck(SharedGenerationParams):
num_return_sequences: Optional[NumReturnSequences] = None
class ConversationalInputsCheck(BaseModel):
text: str
past_user_inputs: List[str]
generated_responses: List[str]
class QuestionInputsCheck(BaseModel):
question: str
context: str
class SentenceSimilarityInputsCheck(BaseModel):
source_sentence: str
sentences: List[str]
class TableQuestionAnsweringInputsCheck(BaseModel):
table: Dict[str, List[str]]
query: str
@field_validator("table")
def all_rows_must_have_same_length(cls, table: Dict[str, List[str]]):
rows = list(table.values())
n = len(rows[0])
if all(len(x) == n for x in rows):
return table
raise ValueError("All rows in the table must be the same length")
class TabularDataInputsCheck(BaseModel):
data: Dict[str, List[str]]
@field_validator("data")
def all_rows_must_have_same_length(cls, data: Dict[str, List[str]]):
rows = list(data.values())
n = len(rows[0])
if all(len(x) == n for x in rows):
return data
raise ValueError("All rows in the data must be the same length")
class StringOrStringBatchInputCheck(RootModel):
root: Union[List[str], str]
@field_validator("root")
def input_must_not_be_empty(cls, root: Union[List[str], str]):
if isinstance(root, list):
if len(root) == 0:
raise ValueError(
"The inputs are invalid, at least one input is required"
)
return root
class StringInput(RootModel):
root: str
PARAMS_MAPPING = {
"conversational": SharedGenerationParams,
"fill-mask": FillMaskParamsCheck,
"text2text-generation": TextGenerationParamsCheck,
"text-generation": TextGenerationParamsCheck,
"summarization": SummarizationParamsCheck,
"zero-shot-classification": ZeroShotParamsCheck,
}
INPUTS_MAPPING = {
"conversational": ConversationalInputsCheck,
"question-answering": QuestionInputsCheck,
"feature-extraction": StringOrStringBatchInputCheck,
"sentence-similarity": SentenceSimilarityInputsCheck,
"table-question-answering": TableQuestionAnsweringInputsCheck,
"tabular-classification": TabularDataInputsCheck,
"tabular-regression": TabularDataInputsCheck,
"fill-mask": StringInput,
"summarization": StringInput,
"text2text-generation": StringInput,
"text-generation": StringInput,
"text-classification": StringInput,
"token-classification": StringInput,
"translation": StringInput,
"zero-shot-classification": StringInput,
"text-to-speech": StringInput,
"text-to-image": StringInput,
}
BATCH_ENABLED_PIPELINES = ["feature-extraction"]
def check_params(params, tag):
if tag in PARAMS_MAPPING:
PARAMS_MAPPING[tag].model_validate(params)
return True
def check_inputs(inputs, tag):
if tag in INPUTS_MAPPING:
INPUTS_MAPPING[tag].model_validate(inputs)
return True
else:
raise ValueError(f"{tag} is not a valid pipeline.")
AUDIO_INPUTS = {
"automatic-speech-recognition",
"audio-to-audio",
"speech-segmentation",
"audio-classification",
}
AUDIO_OUTPUTS = {
"audio-to-audio",
"text-to-speech",
}
IMAGE_INPUTS = {
"image-classification",
"image-segmentation",
"image-to-text",
"image-to-image",
"object-detection",
"zero-shot-image-classification",
}
IMAGE_OUTPUTS = {
"image-to-image",
"text-to-image",
}
TEXT_INPUTS = {
"conversational",
"feature-extraction",
"question-answering",
"sentence-similarity",
"fill-mask",
"table-question-answering",
"tabular-classification",
"tabular-regression",
"summarization",
"text-generation",
"text2text-generation",
"text-classification",
"text-to-image",
"text-to-speech",
"token-classification",
"zero-shot-classification",
}
KNOWN_TASKS = AUDIO_INPUTS.union(IMAGE_INPUTS).union(TEXT_INPUTS)
AUDIO = [
"flac",
"ogg",
"mp3",
"wav",
"m4a",
"aac",
"webm",
]
IMAGE = [
"jpeg",
"png",
"webp",
"tiff",
"bmp",
]
def parse_accept(accept: str, accepted: List[str]) -> str:
for mimetype in accept.split(","):
# remove quality
mimetype = mimetype.split(";")[0]
# remove prefix
extension = mimetype.split("/")[-1]
if extension in accepted:
return extension
return accepted[0]
def normalize_payload(
bpayload: bytes, task: str, sampling_rate: Optional[int]
) -> Tuple[Any, Dict]:
if task in AUDIO_INPUTS:
if sampling_rate is None:
raise EnvironmentError(
"We cannot normalize audio file if we don't know the sampling rate"
)
return normalize_payload_audio(bpayload, sampling_rate)
elif task in IMAGE_INPUTS:
return normalize_payload_image(bpayload)
elif task in TEXT_INPUTS:
return normalize_payload_nlp(bpayload, task)
else:
raise EnvironmentError(
f"The task `{task}` is not recognized by api-inference-community"
)
def ffmpeg_convert(
array: np.array, sampling_rate: int, format_for_conversion: str
) -> bytes:
"""
Helper function to convert raw waveforms to actual compressed file (lossless compression here)
"""
ar = str(sampling_rate)
ac = "1"
ffmpeg_command = [
"ffmpeg",
"-ac",
"1",
"-f",
"f32le",
"-ac",
ac,
"-ar",
ar,
"-i",
"pipe:0",
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
ffmpeg_process = subprocess.Popen(
ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE
)
output_stream = ffmpeg_process.communicate(array.tobytes())
out_bytes = output_stream[0]
if len(out_bytes) == 0:
raise Exception("Impossible to convert output stream")
return out_bytes
def ffmpeg_read(bpayload: bytes, sampling_rate: int) -> np.array:
"""
Librosa does that under the hood but forces the use of an actual
file leading to hitting disk, which is almost always very bad.
"""
ar = f"{sampling_rate}"
ac = "1"
format_for_conversion = "f32le"
ffmpeg_command = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
ffmpeg_process = subprocess.Popen(
ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE
)
output_stream = ffmpeg_process.communicate(bpayload)
out_bytes = output_stream[0]
audio = np.frombuffer(out_bytes, np.float32).copy()
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile")
return audio
def normalize_payload_image(bpayload: bytes) -> Tuple[Any, Dict]:
from PIL import Image
try:
# We accept both binary image with mimetype
# and {"inputs": base64encodedimage}
data = json.loads(bpayload)
image = data["image"] if "image" in data else data["inputs"]
image_bytes = b64decode(image)
img = Image.open(BytesIO(image_bytes))
return img, data.get("parameters", {})
except Exception:
pass
img = Image.open(BytesIO(bpayload))
return img, {}
DATA_PREFIX = os.getenv("HF_TRANSFORMERS_CACHE", "")
def normalize_payload_audio(bpayload: bytes, sampling_rate: int) -> Tuple[Any, Dict]:
if os.path.isfile(bpayload) and bpayload.startswith(DATA_PREFIX.encode("utf-8")):
# XXX:
# This is necessary for batch jobs where the datasets can contain
# filenames instead of the raw data.
# We attempt to sanitize this roughly, by checking it lives on the data
# path (hardcoded in the deployment and in all the dockerfiles)
# We also attempt to prevent opening files that are not obviously
# audio files, to prevent opening stuff like model weights.
filename, ext = os.path.splitext(bpayload)
if ext.decode("utf-8")[1:] in AUDIO:
with open(bpayload, "rb") as f:
bpayload = f.read()
inputs = ffmpeg_read(bpayload, sampling_rate)
if len(inputs.shape) > 1:
# ogg can take dual channel input -> take only first input channel in this case
inputs = inputs[:, 0]
return inputs, {}
def normalize_payload_nlp(bpayload: bytes, task: str) -> Tuple[Any, Dict]:
payload = bpayload.decode("utf-8")
# We used to accept raw strings, we need to maintain backward compatibility
try:
payload = json.loads(payload)
if isinstance(payload, (float, int)):
payload = str(payload)
except Exception:
pass
parameters: Dict[str, Any] = {}
if isinstance(payload, dict) and "inputs" in payload:
inputs = payload["inputs"]
parameters = payload.get("parameters", {})
else:
inputs = payload
check_params(parameters, task)
check_inputs(inputs, task)
return inputs, parameters
|
0
|
hf_public_repos/api-inference-community
|
hf_public_repos/api-inference-community/tests/test_hub.py
|
import logging
import sys
from unittest import TestCase
from api_inference_community import hub
from huggingface_hub import constants, hf_api, snapshot_download
logger = logging.getLogger(__name__)
logger.level = logging.DEBUG
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class HubTestCase(TestCase):
def test_offline_model_info1(self):
repo_id = "google/t5-efficient-tiny"
revision = "3441d7e8bf3f89841f366d39452b95200416e4a9"
bak_value = constants.HF_HUB_OFFLINE
try:
# with tempfile.TemporaryDirectory() as cache_dir:
# logger.info("Cache directory %s", cache_dir)
dirpath = snapshot_download(repo_id=repo_id, revision=revision)
logger.info("Snapshot downloaded at %s", dirpath)
constants.HF_HUB_OFFLINE = True
model_info = hub.hub_model_info(repo_id=repo_id, revision=revision)
finally:
constants.HF_HUB_OFFLINE = bak_value
logger.info("Model info %s", model_info)
self.assertIsInstance(model_info, hf_api.ModelInfo)
self.assertEqual(model_info.id, repo_id)
self.assertEqual(model_info.downloads, 0)
self.assertEqual(model_info.likes, 0)
self.assertEqual(len(model_info.siblings), 12)
self.assertIn("pytorch_model.bin", [s.rfilename for s in model_info.siblings])
self.assertFalse(model_info.private)
self.assertEqual(model_info.license, "apache-2.0") # noqa
self.assertEqual(model_info.tags, ["deep-narrow"])
self.assertIsNone(model_info.library_name)
logger.info("Model card data %s", model_info.card_data)
self.assertEqual(model_info.card_data, model_info.cardData)
self.assertEqual(model_info.card_data.license, "apache-2.0")
self.assertEqual(model_info.card_data.tags, ["deep-narrow"])
def test_offline_model_info2(self):
repo_id = "dfurman/Mixtral-8x7B-peft-v0.1"
revision = "8908d586219993ec79949acaef566363a7c7864c"
bak_value = constants.HF_HUB_OFFLINE
try:
# with tempfile.TemporaryDirectory() as cache_dir:
# logger.info("Cache directory %s", cache_dir)
dirpath = snapshot_download(repo_id=repo_id, revision=revision)
logger.info("Snapshot downloaded at %s", dirpath)
constants.HF_HUB_OFFLINE = True
model_info = hub.hub_model_info(repo_id=repo_id, revision=revision)
finally:
constants.HF_HUB_OFFLINE = bak_value
logger.info("Model info %s", model_info)
self.assertIsInstance(model_info, hf_api.ModelInfo)
self.assertEqual(model_info.id, repo_id)
self.assertEqual(model_info.downloads, 0)
self.assertEqual(model_info.likes, 0)
self.assertEqual(len(model_info.siblings), 9)
self.assertFalse(model_info.private)
self.assertEqual(model_info.license, "apache-2.0") # noqa
self.assertEqual(model_info.tags, ["mistral"])
self.assertEqual(model_info.library_name, "peft")
self.assertEqual(model_info.pipeline_tag, "text-generation")
self.assertIn(".gitattributes", [s.rfilename for s in model_info.siblings])
logger.info("Model card data %s", model_info.card_data)
self.assertEqual(model_info.card_data, model_info.cardData)
self.assertEqual(model_info.card_data.license, "apache-2.0")
self.assertEqual(model_info.card_data.tags, ["mistral"])
def test_online_model_info(self):
repo_id = "dfurman/Mixtral-8x7B-Instruct-v0.1"
revision = "8908d586219993ec79949acaef566363a7c7864c"
bak_value = constants.HF_HUB_OFFLINE
try:
constants.HF_HUB_OFFLINE = False
model_info = hub.hub_model_info(repo_id=repo_id, revision=revision)
finally:
constants.HF_HUB_OFFLINE = bak_value
logger.info("Model info %s", model_info)
self.assertIsInstance(model_info, hf_api.ModelInfo)
self.assertEqual(model_info.id, repo_id)
self.assertGreater(model_info.downloads, 0)
self.assertGreater(model_info.likes, 0)
self.assertEqual(len(model_info.siblings), 9)
self.assertFalse(model_info.private)
self.assertGreater(model_info.tags, ["peft", "safetensors", "mistral"])
self.assertEqual(model_info.library_name, "peft")
self.assertEqual(model_info.pipeline_tag, "text-generation")
self.assertIn(".gitattributes", [s.rfilename for s in model_info.siblings])
logger.info("Model card data %s", model_info.card_data)
self.assertEqual(model_info.card_data, model_info.cardData)
self.assertEqual(model_info.card_data.license, "apache-2.0")
self.assertEqual(model_info.card_data.tags, ["mistral"])
self.assertIsNone(model_info.safetensors)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.