Spaces:
Sleeping
Sleeping
| // Sup 2ch /ai/ | |
| const express = require("express"); | |
| const app = express(); | |
| const port = 7860; | |
| app.use(express.json()); | |
| app.get("//models", async (req, res) => { | |
| try { | |
| res.status(200).json({ object: "list", data: createFakeModelsList() }); | |
| } catch {} | |
| }); | |
| app.post("//chat/completions", async (clientRequest, clientResponse) => { | |
| try { | |
| const { | |
| frequency_penalty, | |
| presence_penalty, | |
| max_tokens, | |
| stop, | |
| temperature, | |
| top_p, | |
| } = clientRequest.body; | |
| const apiRequestBody = { | |
| model: "gpt-4", | |
| prompt: convertChatMLPrompt(clientRequest.body.messages), | |
| frequency_penalty, | |
| presence_penalty, | |
| max_tokens, | |
| stop, | |
| temperature, | |
| top_p, | |
| }; | |
| const apiResponse = await fetch(process.env.API_URL, { | |
| method: "POST", | |
| headers: { | |
| "Content-Type": "application/json", | |
| }, | |
| body: JSON.stringify(apiRequestBody), | |
| }); | |
| if (clientRequest.body.stream) { | |
| handleResponseAsStream(clientResponse, apiResponse); | |
| } else { | |
| handleResponseAsNonStreamable(clientResponse, apiResponse); | |
| } | |
| } catch {} | |
| }); | |
| app.listen(port, () => { | |
| console.log(`Example app listening on port ${port}`); | |
| }); | |
| async function handleResponseAsNonStreamable(clientResponse, apiResponse) { | |
| const apiText = await apiResponse.text(); | |
| const clientMessage = createClientMessage(apiText); | |
| clientResponse.send(JSON.stringify(clientMessage)); | |
| } | |
| async function handleResponseAsStream(clientResponse, apiResponse) { | |
| const reader = apiResponse.body.getReader(); | |
| const nextDecoder = new TextDecoder(); | |
| clientResponse.write("data: " + JSON.stringify(createBeginChunk()) + "\n\n"); | |
| new ReadableStream({ | |
| start(controller) { | |
| return pump(); | |
| function pump() { | |
| return reader.read().then(({ done, value }) => { | |
| const textData = nextDecoder.decode(value); | |
| clientResponse.write( | |
| "data: " + JSON.stringify(createMessageChunk(textData)) + "\n\n" | |
| ); | |
| // When no more data needs to be consumed, close the stream | |
| if (done) { | |
| clientResponse.write( | |
| "data: " + JSON.stringify(createEndChunk()) + "\n\n" | |
| ); | |
| clientResponse.end(); | |
| controller.close(); | |
| return; | |
| } | |
| // Enqueue the next data chunk into our target stream | |
| controller.enqueue(value); | |
| return pump(); | |
| }); | |
| } | |
| }, | |
| }); | |
| } | |
| function getCurrentDate() { | |
| return Math.floor(new Date().getTime()); | |
| } | |
| function convertChatMLPrompt(messages) { | |
| const messageStrings = []; | |
| messages.forEach((m) => { | |
| if (m.role === "system" && m.name === undefined) { | |
| messageStrings.push("System: " + m.content); | |
| } else if (m.role === "system" && m.name !== undefined) { | |
| messageStrings.push(m.name + ": " + m.content); | |
| } else { | |
| messageStrings.push(m.role + ": " + m.content); | |
| } | |
| }); | |
| return messageStrings.join("\n") + "\nassistant:"; | |
| } | |
| const createClientMessage = (text) => ({ | |
| id: "chatcmpl-123", | |
| object: "chat.completion", | |
| created: getCurrentDate(), | |
| model: "gpt-4", | |
| choices: [ | |
| { | |
| index: 0, | |
| message: { role: "assistant", content: text }, | |
| logprobs: null, | |
| finish_reason: "stop", | |
| }, | |
| ], | |
| }); | |
| const createBeginChunk = () => ({ | |
| id: "chatcmpl-123", | |
| object: "chat.completion.chunk", | |
| created: getCurrentDate(), | |
| model: "gpt-4", | |
| system_fingerprint: "", | |
| choices: [ | |
| { | |
| index: 0, | |
| delta: { role: "assistant", content: "" }, | |
| logprobs: null, | |
| finish_reason: null, | |
| }, | |
| ], | |
| }); | |
| const createMessageChunk = (text) => ({ | |
| id: "chatcmpl-123", | |
| object: "chat.completion.chunk", | |
| created: getCurrentDate(), | |
| model: "gpt-4", | |
| system_fingerprint: "", | |
| choices: [ | |
| { | |
| index: 0, | |
| delta: { content: text }, | |
| logprobs: null, | |
| finish_reason: null, | |
| }, | |
| ], | |
| }); | |
| const createEndChunk = () => ({ | |
| id: "chatcmpl-123", | |
| object: "chat.completion.chunk", | |
| created: getCurrentDate(), | |
| model: "gpt-4", | |
| system_fingerprint: "", | |
| choices: [{ index: 0, delta: {}, logprobs: null, finish_reason: "stop" }], | |
| }); | |
| function createFakeModelsList() { | |
| return [ | |
| { | |
| id: "gpt-4", | |
| object: "model", | |
| created: getCurrentDate(), | |
| owned_by: "openai", | |
| permission: [ | |
| { | |
| id: "modelperm-gpt-4", | |
| object: "model_permission", | |
| created: getCurrentDate(), | |
| organization: "*", | |
| group: null, | |
| is_blocking: false, | |
| }, | |
| ], | |
| root: "gpt-4", | |
| parent: null, | |
| }, | |
| ]; | |
| } | |