feat: add payment verification middleware — API keys validated through scottzilla-payments
00205f5 verified | import express from "express"; | |
| import cors from "cors"; | |
| import { readFileSync } from "fs"; | |
| import https from "https"; | |
| const app = express(); | |
| app.use(cors()); | |
| app.use(express.json({ limit: "50mb" })); | |
| const PORT = 11434; | |
| const HF_TOKEN = process.env.HF_TOKEN || process.env.OPENAI_API_KEY || ""; | |
| const PAYMENTS_URL = "https://scottzillasystems-scottzilla-payments.hf.space"; | |
| // ─── Load model catalog ────────────────────────────────────────────────────── | |
| const catalog = JSON.parse(readFileSync("models.json", "utf-8")); | |
| const models = catalog.models; | |
| const aliasMap = new Map(models.map((m) => [m.alias, m])); | |
| const hfIdMap = new Map(models.map((m) => [m.hf_id, m])); | |
| function resolveModel(name) { | |
| if (!name) return models[0]; | |
| const lower = name.toLowerCase().replace(/^scottzillasystems\//, ""); | |
| return aliasMap.get(lower) || hfIdMap.get(name) || | |
| models.find((m) => m.hf_id.toLowerCase().includes(lower)) || | |
| models.find((m) => m.name.toLowerCase().includes(lower)) || | |
| models.find((m) => m.capabilities?.some((c) => c === lower)) || null; | |
| } | |
| function routeByCapability(messages) { | |
| const text = messages.map((m) => m.content).join(" ").toLowerCase(); | |
| if (/\b(image|picture|photo|draw|edit image|generate image)\b/.test(text)) | |
| return resolveModel("qwen3-vl-8b-abliterated") || resolveModel("qwen3.5-9b"); | |
| if (/\b(code|python|javascript|function|debug|program|script)\b/.test(text)) | |
| return resolveModel("qwen3-coder-abliterated"); | |
| if (/\b(uncensor|abliterat|jailbreak|unrestrict|nsfw)\b/.test(text)) | |
| return resolveModel("qwen3.6-27b-abliterated"); | |
| if (/\b(think|reason|math|logic|proof|step.by.step)\b/.test(text)) | |
| return resolveModel("qwen3.5-40b-uncensored"); | |
| if (/\b(creative|story|roleplay|write|fiction|narrative)\b/.test(text)) | |
| return resolveModel("cydonia-24b"); | |
| return resolveModel("qwen3.5-9b"); | |
| } | |
| // ─── Payment verification middleware ───────────────────────────────────────── | |
| // Free tier: 25 req/day on chatgpt-5 only, no key needed | |
| // Paid tiers: validated through scottzilla-payments | |
| let freeUsageToday = new Map(); // ip → count | |
| async function verifyApiKey(key) { | |
| try { | |
| const resp = await fetch(`${PAYMENTS_URL}/api/keys/verify/${key}`); | |
| if (!resp.ok) return null; | |
| return await resp.json(); | |
| } catch { return null; } | |
| } | |
| async function recordUsage(key, model) { | |
| try { | |
| await fetch(`${PAYMENTS_URL}/api/usage/record`, { | |
| method: "POST", | |
| headers: { "Content-Type": "application/json" }, | |
| body: JSON.stringify({ api_key: key, model }), | |
| }); | |
| } catch {} | |
| } | |
| async function authMiddleware(req, res, next) { | |
| const authHeader = req.headers.authorization || req.headers["x-api-key"] || ""; | |
| const key = authHeader.replace(/^Bearer\s+/i, "").trim(); | |
| // No key → free tier (25 req/day, chatgpt-5 only) | |
| if (!key || key === HF_TOKEN) { | |
| const ip = req.ip || req.headers["x-forwarded-for"] || "unknown"; | |
| const today = new Date().toISOString().split("T")[0]; | |
| const usageKey = `${ip}:${today}`; | |
| const used = freeUsageToday.get(usageKey) || 0; | |
| if (used >= 25) { | |
| return res.status(429).json({ | |
| error: "Free tier limit reached (25/day). Get an API key at " + PAYMENTS_URL, | |
| upgrade_url: PAYMENTS_URL, | |
| }); | |
| } | |
| freeUsageToday.set(usageKey, used + 1); | |
| req.tier = "free"; | |
| req.allowedModels = ["chatgpt-5"]; | |
| return next(); | |
| } | |
| // Validate API key | |
| const keyData = await verifyApiKey(key); | |
| if (!keyData || !keyData.valid) { | |
| return res.status(401).json({ error: "Invalid API key. Get one at " + PAYMENTS_URL }); | |
| } | |
| if (!keyData.within_limit) { | |
| return res.status(429).json({ | |
| error: `Rate limit exceeded for ${keyData.tier_name} tier. Upgrade at ${PAYMENTS_URL}`, | |
| usage: keyData.usage_today, | |
| limit: keyData.limit_today, | |
| }); | |
| } | |
| req.tier = keyData.tier; | |
| req.apiKey = key; | |
| req.allowedModels = keyData.models; | |
| next(); | |
| } | |
| // ─── HF Router proxy ──────────────────────────────────────────────────────── | |
| async function hfChat(routerModel, messages, stream = false, params = {}) { | |
| const body = JSON.stringify({ | |
| model: routerModel, messages, stream, | |
| max_tokens: params.max_tokens || 2048, | |
| temperature: params.temperature || 0.7, | |
| top_p: params.top_p || 0.95, | |
| }); | |
| return new Promise((resolve, reject) => { | |
| const req = https.request({ | |
| hostname: "router.huggingface.co", path: "/v1/chat/completions", method: "POST", | |
| headers: { "Content-Type": "application/json", Authorization: `Bearer ${HF_TOKEN}`, "Content-Length": Buffer.byteLength(body) }, | |
| }, (res) => { | |
| if (stream) return resolve(res); | |
| let data = ""; res.on("data", (c) => data += c); | |
| res.on("end", () => { try { resolve(JSON.parse(data)); } catch { resolve({ error: data }); } }); | |
| }); | |
| req.on("error", reject); req.end(body); | |
| }); | |
| } | |
| // ─── Public endpoints (no auth) ────────────────────────────────────────────── | |
| app.get("/", (req, res) => { | |
| res.json({ | |
| status: "Scottzilla Gateway ⚡ operational", | |
| models: models.length, | |
| pricing: PAYMENTS_URL, | |
| endpoints: [ | |
| "GET /api/tags, /v1/models — list models", | |
| "POST /api/chat, /v1/chat/completions — chat (requires API key for paid models)", | |
| "GET /api/library — full catalog", | |
| "GET /api/route?q=... — smart route preview", | |
| ], | |
| free_tier: "25 requests/day on chatgpt-5 — no key needed", | |
| get_api_key: PAYMENTS_URL + "/api/keys/create", | |
| }); | |
| }); | |
| app.get("/api/tags", (req, res) => { | |
| res.json({ models: models.map((m) => ({ | |
| name: m.alias, model: m.alias, modified_at: new Date().toISOString(), size: (m.size_gb || 0) * 1e9, digest: m.hf_id, | |
| details: { parent_model: m.hf_id, format: m.arch, family: m.arch, parameter_size: m.params || "unknown" }, | |
| }))}); | |
| }); | |
| app.get("/v1/models", (req, res) => { | |
| res.json({ object: "list", data: models.map((m) => ({ | |
| id: m.alias, object: "model", created: Math.floor(Date.now() / 1000), owned_by: "ScottzillaSystems", | |
| hf_id: m.hf_id, capabilities: m.capabilities, params: m.params, | |
| }))}); | |
| }); | |
| app.get("/api/library", (req, res) => res.json(catalog)); | |
| app.get("/api/route", (req, res) => { | |
| const q = req.query.q || ""; | |
| const pick = routeByCapability([{ role: "user", content: q }]); | |
| res.json({ query: q, routed_to: pick ? { alias: pick.alias, name: pick.name, hf_id: pick.hf_id, capabilities: pick.capabilities } : null }); | |
| }); | |
| app.post("/api/show", (req, res) => { | |
| const entry = resolveModel(req.body.name || req.body.model); | |
| if (!entry) return res.status(404).json({ error: "model not found" }); | |
| res.json({ | |
| modelfile: `# ${entry.name}\nFROM ${entry.hf_id}\nPARAMETER temperature 0.7`, | |
| parameters: "temperature 0.7\ntop_p 0.95", template: "{{ .System }}\n{{ .Prompt }}", | |
| details: { parent_model: entry.hf_id, format: entry.arch, family: entry.arch, parameter_size: entry.params || "unknown", capabilities: entry.capabilities }, | |
| }); | |
| }); | |
| // ─── Auth-gated chat endpoints ─────────────────────────────────────────────── | |
| app.post("/api/chat", authMiddleware, async (req, res) => { | |
| try { | |
| const { model: modelName, messages, options } = req.body; | |
| const entry = modelName === "auto" ? routeByCapability(messages) : (resolveModel(modelName) || routeByCapability(messages)); | |
| if (!entry) return res.status(404).json({ error: `Model not found: ${modelName}` }); | |
| // Check model access | |
| if (req.tier === "free" && !req.allowedModels.includes(entry.alias) && !req.allowedModels.includes("all")) { | |
| return res.status(403).json({ error: `${entry.name} requires a paid plan. Free tier allows: ${req.allowedModels.join(", ")}`, upgrade: PAYMENTS_URL }); | |
| } | |
| const routerModel = entry.router_model || "Qwen/Qwen3.5-9B"; | |
| const result = await hfChat(routerModel, messages, false, options || {}); | |
| const content = result.choices?.[0]?.message?.content || ""; | |
| if (req.apiKey) recordUsage(req.apiKey, entry.alias); | |
| res.json({ model: entry.alias, created_at: new Date().toISOString(), message: { role: "assistant", content }, done: true, | |
| _meta: { hf_id: entry.hf_id, router_model: routerModel, tier: req.tier } }); | |
| } catch (err) { res.status(500).json({ error: err.message }); } | |
| }); | |
| app.post("/api/generate", authMiddleware, async (req, res) => { | |
| try { | |
| const { model: modelName, prompt, options } = req.body; | |
| const messages = [{ role: "user", content: prompt }]; | |
| const entry = modelName === "auto" ? routeByCapability(messages) : (resolveModel(modelName) || routeByCapability(messages)); | |
| if (!entry) return res.status(404).json({ error: `Model not found: ${modelName}` }); | |
| if (req.tier === "free" && !req.allowedModels.includes(entry.alias) && !req.allowedModels.includes("all")) { | |
| return res.status(403).json({ error: `${entry.name} requires a paid plan.`, upgrade: PAYMENTS_URL }); | |
| } | |
| const routerModel = entry.router_model || "Qwen/Qwen3.5-9B"; | |
| const result = await hfChat(routerModel, messages, false, options || {}); | |
| if (req.apiKey) recordUsage(req.apiKey, entry.alias); | |
| res.json({ model: entry.alias, created_at: new Date().toISOString(), response: result.choices?.[0]?.message?.content || "", done: true }); | |
| } catch (err) { res.status(500).json({ error: err.message }); } | |
| }); | |
| app.post("/v1/chat/completions", authMiddleware, async (req, res) => { | |
| try { | |
| const { model: modelName, messages, stream, temperature, max_tokens, top_p } = req.body; | |
| const entry = modelName === "auto" ? routeByCapability(messages) : (resolveModel(modelName) || routeByCapability(messages)); | |
| if (!entry) return res.status(404).json({ error: { message: `Model not found: ${modelName}`, type: "invalid_request_error" } }); | |
| if (req.tier === "free" && !req.allowedModels.includes(entry.alias) && !req.allowedModels.includes("all")) { | |
| return res.status(403).json({ error: { message: `${entry.name} requires a paid plan. Upgrade at ${PAYMENTS_URL}`, type: "insufficient_quota" } }); | |
| } | |
| const routerModel = entry.router_model || "Qwen/Qwen3.5-9B"; | |
| if (stream) { | |
| res.setHeader("Content-Type", "text/event-stream"); | |
| res.setHeader("Cache-Control", "no-cache"); | |
| const upstream = await hfChat(routerModel, messages, true, { temperature, max_tokens, top_p }); | |
| upstream.on("data", (chunk) => res.write(chunk)); | |
| upstream.on("end", () => { if (req.apiKey) recordUsage(req.apiKey, entry.alias); res.end(); }); | |
| upstream.on("error", () => res.end()); | |
| return; | |
| } | |
| const result = await hfChat(routerModel, messages, false, { temperature, max_tokens, top_p }); | |
| if (req.apiKey) recordUsage(req.apiKey, entry.alias); | |
| res.json({ ...result, model: entry.alias, _scottzilla: { hf_id: entry.hf_id, router_model: routerModel, tier: req.tier, capabilities: entry.capabilities } }); | |
| } catch (err) { res.status(500).json({ error: { message: err.message, type: "server_error" } }); } | |
| }); | |
| // ─── Start ─────────────────────────────────────────────────────────────────── | |
| app.listen(PORT, "0.0.0.0", () => { | |
| console.log(`⚡ Scottzilla Gateway listening on :${PORT}`); | |
| console.log(` ${models.length} models | Payments: ${PAYMENTS_URL}`); | |
| console.log(` Free tier: 25 req/day on chatgpt-5 (no key needed)`); | |
| console.log(` Paid tiers: validated through scottzilla-payments`); | |
| }); | |