File size: 552 Bytes
f7886b0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 |
import { Tokenizer } from './tokenizer.js';
import { loadModel, generateText } from './model.js'; // Create this next if not present
let tokenizer, model;
export default async function initWebLLM(config) {
console.log("Initializing WebLLM...");
tokenizer = await Tokenizer.load(config.tokenizerUrl);
model = await loadModel(config.modelUrl);
return {
async chat(prompt) {
const inputIds = tokenizer.encode(prompt);
const outputIds = await generateText(model, inputIds);
return tokenizer.decode(outputIds);
}
};
} |