|
|
import { NextRequest, NextResponse } from "next/server"; |
|
|
|
|
|
interface HuggingFaceModel { |
|
|
id?: string; |
|
|
modelId?: string; |
|
|
tags?: string[]; |
|
|
} |
|
|
|
|
|
type ModelOption = { id: string; name: string }; |
|
|
|
|
|
type CacheEntry = { |
|
|
timestamp: number; |
|
|
data: ModelOption[]; |
|
|
}; |
|
|
|
|
|
const CACHE_DURATION = 1000 * 60 * 5; |
|
|
const cache = new Map<string, CacheEntry>(); |
|
|
|
|
|
function getCacheKey(query: string, limit: number) { |
|
|
return `${query}::${limit}`; |
|
|
} |
|
|
|
|
|
function normalizeModelId(model: HuggingFaceModel) { |
|
|
return model.id || model.modelId || ""; |
|
|
} |
|
|
|
|
|
function hasTag(model: HuggingFaceModel, tag: string) { |
|
|
return Array.isArray(model.tags) && model.tags.includes(tag); |
|
|
} |
|
|
|
|
|
function isExcludedFormat(model: HuggingFaceModel) { |
|
|
if (!Array.isArray(model.tags)) return false; |
|
|
const tags = new Set(model.tags); |
|
|
if (tags.has("gguf")) return true; |
|
|
if (tags.has("mlx")) return true; |
|
|
if (tags.has("ggml")) return true; |
|
|
return false; |
|
|
} |
|
|
|
|
|
export async function GET(request: NextRequest) { |
|
|
const query = request.nextUrl.searchParams.get("q")?.trim() ?? ""; |
|
|
const limitParam = Number(request.nextUrl.searchParams.get("limit") ?? "20"); |
|
|
const limit = Number.isFinite(limitParam) ? Math.max(1, Math.min(50, limitParam)) : 20; |
|
|
|
|
|
const cacheKey = getCacheKey(query.toLowerCase(), limit); |
|
|
const now = Date.now(); |
|
|
|
|
|
const cached = cache.get(cacheKey); |
|
|
if (cached && now - cached.timestamp < CACHE_DURATION) { |
|
|
return NextResponse.json(cached.data); |
|
|
} |
|
|
|
|
|
try { |
|
|
const url = new URL("https://huggingface.co/api/models"); |
|
|
|
|
|
if (query) { |
|
|
url.searchParams.set("search", query); |
|
|
} else { |
|
|
url.searchParams.set("sort", "downloads"); |
|
|
url.searchParams.set("direction", "-1"); |
|
|
} |
|
|
|
|
|
|
|
|
url.searchParams.append("filter", "safetensors"); |
|
|
|
|
|
url.searchParams.set("limit", String(limit)); |
|
|
|
|
|
const response = await fetch(url.toString(), { |
|
|
headers: { |
|
|
Accept: "application/json", |
|
|
}, |
|
|
next: { revalidate: 300 }, |
|
|
}); |
|
|
|
|
|
if (!response.ok) { |
|
|
throw new Error(`Hugging Face API error: ${response.status}`); |
|
|
} |
|
|
|
|
|
const data = (await response.json()) as HuggingFaceModel[]; |
|
|
|
|
|
const seen = new Set<string>(); |
|
|
const models: ModelOption[] = Array.isArray(data) |
|
|
? data |
|
|
.filter((m) => hasTag(m, "safetensors") && !isExcludedFormat(m)) |
|
|
.map((m) => normalizeModelId(m)) |
|
|
.filter(Boolean) |
|
|
.filter((id) => { |
|
|
if (seen.has(id)) return false; |
|
|
seen.add(id); |
|
|
return true; |
|
|
}) |
|
|
.map((id) => ({ |
|
|
id, |
|
|
name: id, |
|
|
})) |
|
|
: []; |
|
|
|
|
|
cache.set(cacheKey, { timestamp: now, data: models }); |
|
|
|
|
|
return NextResponse.json(models); |
|
|
} catch (error) { |
|
|
console.error("Error fetching Hugging Face models:", error); |
|
|
if (cached) { |
|
|
return NextResponse.json(cached.data); |
|
|
} |
|
|
return NextResponse.json([], { status: 500 }); |
|
|
} |
|
|
} |
|
|
|