WebGPU-browser-brain-lab / progress_callback.js
jaison2611's picture
Update progress_callback.js
a462a68 verified
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.0.0';
// CONFIG MUST COME FIRST
env.allowRemoteModels = true;
env.allowLocalModels = false;
env.backends.onnx.wasm.numThreads = 1; // Stick to 1 for stability on mobile
env.backends.onnx.wasm.proxy = false;
const status = document.getElementById('status');
const btn = document.getElementById('main-btn');
const input = document.getElementById('chat-input');
const chatBox = document.getElementById('chat-box');
let generator = null;
async function init() {
try {
btn.disabled = true;
status.textContent = "Downloading Model (Stay on page)...";
generator = await pipeline(
'text-generation',
'Xenova/phi-1_5-tiny-onnx',
{
device: 'wasm',
dtype: 'q4', // Critical: Using 4-bit quantization to save RAM
progress_callback: (d) => {
if (d.status === 'progress') {
status.textContent = `Loading: ${Math.round(d.progress)}%`;
}
}
}
);
status.textContent = "Ready!";
btn.textContent = "Send";
input.disabled = false;
btn.disabled = false;
btn.onclick = async () => {
const userText = input.value.trim();
if (!userText) return;
addMessage('user', userText);
input.value = '';
status.textContent = "AI is thinking...";
const output = await generator(userText, {
max_new_tokens: 30, // Keep this low for mobile
temperature: 0.7
});
addMessage('ai', output[0].generated_text.replace(userText, '').trim());
status.textContent = "Ready!";
};
} catch (e) {
status.textContent = "Error: Mobile RAM limit hit.";
console.error(e);
}
}
function addMessage(sender, text) {
const msgDiv = document.createElement('div');
msgDiv.className = `msg ${sender}-message`;
msgDiv.textContent = text;
chatBox.appendChild(msgDiv);
chatBox.scrollTop = chatBox.scrollHeight;
}
// Start the process
init();