| import { Tokenizer } from './tokenizer.js'; | |
| import { loadModel, generateText } from './model.js'; // Create this next if not present | |
| let tokenizer, model; | |
| export default async function initWebLLM(config) { | |
| console.log("Initializing WebLLM..."); | |
| tokenizer = await Tokenizer.load(config.tokenizerUrl); | |
| model = await loadModel(config.modelUrl); | |
| return { | |
| async chat(prompt) { | |
| const inputIds = tokenizer.encode(prompt); | |
| const outputIds = await generateText(model, inputIds); | |
| return tokenizer.decode(outputIds); | |
| } | |
| }; | |
| } |