godmod3-api / api /server.ts
pliny-the-prompter's picture
Upload 22 files
c78c312 verified
/**
* G0DM0D3 Research Preview API
*
* Exposes the core engines (AutoTune, Parseltongue, STM, Feedback Loop)
* and the flagship ULTRAPLINIAN multi-model racing mode as a REST API.
*
* Includes opt-in dataset collection for building an open source research dataset.
*
* Designed for deployment on Hugging Face Spaces (Docker) or any container host.
*/
import express from 'express'
import cors from 'cors'
import path from 'path'
import { rateLimit } from './middleware/rateLimit'
import { apiKeyAuth } from './middleware/auth'
import { autotuneRoutes } from './routes/autotune'
import { parseltongueRoutes } from './routes/parseltongue'
import { transformRoutes } from './routes/transform'
import { chatRoutes } from './routes/chat'
import { feedbackRoutes } from './routes/feedback'
import { ultraplinianRoutes } from './routes/ultraplinian'
import { datasetRoutes } from './routes/dataset'
const app = express()
const PORT = parseInt(process.env.PORT || '7860', 10) // HF Spaces default
// ── Middleware ─────────────────────────────────────────────────────────
app.use(cors())
app.use(express.json({ limit: '1mb' }))
// ── Static UI ─────────────────────────────────────────────────────────
app.use(express.static(path.join(__dirname, 'public')))
// ── Health / Info (no auth required) ──────────────────────────────────
app.get('/v1/health', (_req, res) => {
res.json({ status: 'ok', timestamp: Date.now() })
})
app.get('/v1/info', (_req, res) => {
res.json({
name: 'G0DM0D3 Research Preview API',
version: '0.3.0',
description: 'ULTRAPLINIAN multi-model racing with Liquid Response live upgrades, context-adaptive parameter tuning, text transformation, obfuscation, and opt-in open dataset collection.',
license: 'AGPL-3.0',
flagship: 'POST /v1/ultraplinian/completions',
defaults: {
stream: true,
liquid_min_delta: 8,
note: 'Streaming (Liquid Response) is ON by default. The first good response is served immediately via SSE, then auto-upgraded when a better model beats the current leader by liquid_min_delta score points.',
},
endpoints: {
'POST /v1/ultraplinian/completions': 'ULTRAPLINIAN: Race N models in parallel with Liquid Response (stream=true default). First good response served immediately, auto-upgrades live.',
'POST /v1/chat/completions': 'Single-model pipeline with GODMODE + AutoTune + Parseltongue + STM',
'POST /v1/autotune/analyze': 'Analyze message context and compute optimal LLM parameters',
'POST /v1/parseltongue/encode': 'Obfuscate trigger words in text',
'POST /v1/parseltongue/detect': 'Detect trigger words in text',
'POST /v1/transform': 'Apply semantic transformation modules to text',
'POST /v1/feedback': 'Submit quality feedback for the EMA learning loop',
'GET /v1/dataset/stats': 'Dataset collection statistics',
'GET /v1/dataset/export': 'Export the open research dataset (JSON or JSONL)',
},
authentication: {
openrouter_key: process.env.OPENROUTER_API_KEY
? 'Server-provided (callers do NOT need their own OpenRouter key)'
: 'Caller must provide openrouter_api_key in request body',
api_key: 'Send Authorization: Bearer <your-api-key> (if server has GODMODE_API_KEY set)',
},
dataset: {
note: 'Opt-in per request via contribute_to_dataset: true. No PII stored. Exportable as JSONL for HuggingFace Datasets.',
},
limits: {
requests_total: parseInt(process.env.RATE_LIMIT_TOTAL || '5', 10) || 'unlimited',
requests_per_minute: 60,
requests_per_day: 1000,
note: 'Research preview β€” each API key gets 5 total requests by default. Set RATE_LIMIT_TOTAL=0 to uncap.',
},
source: 'https://github.com/LYS10S/G0DM0D3',
})
})
// ── Authenticated + rate-limited routes ───────────────────────────────
app.use('/v1/ultraplinian', apiKeyAuth, rateLimit, ultraplinianRoutes)
app.use('/v1/chat', apiKeyAuth, rateLimit, chatRoutes)
app.use('/v1/autotune', apiKeyAuth, rateLimit, autotuneRoutes)
app.use('/v1/parseltongue', apiKeyAuth, rateLimit, parseltongueRoutes)
app.use('/v1/transform', apiKeyAuth, rateLimit, transformRoutes)
app.use('/v1/feedback', apiKeyAuth, rateLimit, feedbackRoutes)
app.use('/v1/dataset', apiKeyAuth, rateLimit, datasetRoutes)
// ── 404 ───────────────────────────────────────────────────────────────
app.use((_req, res) => {
res.status(404).json({ error: 'Not found. See GET /v1/info for available endpoints.' })
})
// ── Error handler ─────────────────────────────────────────────────────
app.use((err: Error, _req: express.Request, res: express.Response, _next: express.NextFunction) => {
console.error('[API Error]', err.message)
res.status(500).json({ error: 'Internal server error' })
})
// ── Start ─────────────────────────────────────────────────────────────
app.listen(PORT, '0.0.0.0', () => {
console.log(`
╔══════════════════════════════════════════════════════════╗
β•‘ G0DM0D3 Research Preview API v0.3.0 β•‘
β•‘ Listening on http://0.0.0.0:${PORT} β•‘
β•‘ β•‘
β•‘ FLAGSHIP: β•‘
β•‘ POST /v1/ultraplinian/completions Multi-model racing β•‘
β•‘ β•‘
β•‘ ENGINES: β•‘
β•‘ POST /v1/chat/completions Single-model + GODMODE β•‘
β•‘ POST /v1/autotune/analyze Context analysis β•‘
β•‘ POST /v1/parseltongue/encode Text obfuscation β•‘
β•‘ POST /v1/parseltongue/detect Trigger detection β•‘
β•‘ POST /v1/transform STM transforms β•‘
β•‘ POST /v1/feedback Feedback loop β•‘
β•‘ β•‘
β•‘ DATASET: β•‘
β•‘ GET /v1/dataset/stats Collection stats β•‘
β•‘ GET /v1/dataset/export Export (JSON/JSONL) β•‘
β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
`)
})
export default app