Spaces:
Sleeping
Sleeping
File size: 1,083 Bytes
e9d637f 8820311 e4584af 8820311 e0766bf e4584af e9d637f e4584af 8820311 e4584af e9d637f 770fc7e e9d637f e4584af cef2da9 e4584af 71f44fd e4584af e9d637f e4584af |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
const express = require("express");
const path = require("path");
const cors = require("cors");
const app = express();
const port = 7860;
app.use(cors());
// Test Express API GET method with parameters
app.get("/api/test", async (req, res) => {
const reqData = req.query;
res.json({
message: "Test getApiResponse GET success!",
method: "GET",
reqData,
});
});
// Test Express API POST method with variables
app.post("/api/chat", async (req, res) => {
const { LlamaModel, LlamaContext, LlamaChatSession } = await import(
"node-llama-cpp"
);
const reqData = await req.body.userInput;
const model = new LlamaModel({
modelPath: path.join(
process.cwd(),
"Model",
"orca-mini-3b-gguf2-q4_0.gguf"
),
});
const context = new LlamaContext({ model });
const session = new LlamaChatSession({ context });
const aiAnswer = await session.prompt(reqData);
console.log(reqData);
console.log(aiAnswer);
res.json({ aiAnswer });
});
app.listen(port, () => {
console.log(`Express server is running on port ${port}`);
});
|