File size: 752 Bytes
e706de2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
import {
getLlama,
LlamaChatSession,
} from "node-llama-cpp";
import {fileURLToPath} from "url";
import path from "path";
const __dirname = path.dirname(fileURLToPath(import.meta.url));
const llama = await getLlama();
const model = await llama.loadModel({
modelPath: path.join(
__dirname,
'..',
'..',
'models',
'Qwen3-1.7B-Q8_0.gguf'
)
});
const context = await model.createContext();
const session = new LlamaChatSession({
contextSequence: context.getSequence(),
});
const prompt = `do you know node-llama-cpp`;
const a1 = await session.prompt(prompt);
console.log("AI: " + a1);
session.dispose()
context.dispose()
model.dispose()
llama.dispose()
|