Update progress_callback.js
Browse files- progress_callback.js +41 -41
progress_callback.js
CHANGED
|
@@ -1,49 +1,49 @@
|
|
| 1 |
-
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.0.0';
|
| 2 |
-
|
| 3 |
-
env.allowRemoteModels = true;
|
| 4 |
-
env.allowLocalModels = false;
|
| 5 |
-
env.remotePathTemplate = 'https://huggingface.co/{model}/resolve/main/';
|
| 6 |
-
env.backends.onnx.wasm.numThreads = 1;
|
| 7 |
-
env.backends.onnx.wasm.proxy = false;
|
| 8 |
-
|
| 9 |
-
const status = document.getElementById('status');
|
| 10 |
-
const btn = document.getElementById('main-btn');
|
| 11 |
-
const input = document.getElementById('chat-input');
|
| 12 |
-
let generator = null;
|
| 13 |
-
|
| 14 |
-
async function init() {
|
| 15 |
-
try {
|
| 16 |
-
btn.disabled = true;
|
| 17 |
-
status.textContent = "Loading Tiny Brain (Single-Thread)...";
|
| 18 |
-
|
| 19 |
-
generator = await pipeline(
|
| 20 |
-
'text-generation',
|
| 21 |
-
'Xenova/phi-1_5-tiny-onnx', // replace with exact repo name
|
| 22 |
-
{
|
| 23 |
-
device: 'wasm',
|
| 24 |
-
dtype: 'q4',
|
| 25 |
-
progress_callback: (d) => {
|
| 26 |
-
if (d.status === 'progress') {
|
| 27 |
-
status.textContent = `Downloading: ${Math.round(d.progress)}%`;
|
| 28 |
-
}
|
| 29 |
-
}
|
| 30 |
-
}
|
| 31 |
-
);
|
| 32 |
-
|
| 33 |
status.textContent = "Ready! RAM usage minimized.";
|
| 34 |
btn.textContent = "Send";
|
| 35 |
input.disabled = false;
|
| 36 |
-
|
| 37 |
-
// Change button behavior: now it sends chat
|
| 38 |
btn.disabled = false;
|
|
|
|
| 39 |
btn.onclick = async () => {
|
| 40 |
-
const userText = input.value;
|
| 41 |
-
if (!userText) return;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
addMessage('user', userText);
|
| 43 |
-
|
| 44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
};
|
| 46 |
} catch (e) {
|
| 47 |
-
console.error(e);
|
| 48 |
-
status.innerHTML = `
|
| 49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
status.textContent = "Ready! RAM usage minimized.";
|
| 2 |
btn.textContent = "Send";
|
| 3 |
input.disabled = false;
|
|
|
|
|
|
|
| 4 |
btn.disabled = false;
|
| 5 |
+
|
| 6 |
btn.onclick = async () => {
|
| 7 |
+
const userText = input.value.trim();
|
| 8 |
+
if (!userText || btn.disabled) return;
|
| 9 |
+
|
| 10 |
+
// UI Updates
|
| 11 |
+
input.value = '';
|
| 12 |
+
btn.disabled = true;
|
| 13 |
+
status.textContent = "Thinking...";
|
| 14 |
+
|
| 15 |
addMessage('user', userText);
|
| 16 |
+
|
| 17 |
+
try {
|
| 18 |
+
const output = await generator(userText, {
|
| 19 |
+
max_new_tokens: 50,
|
| 20 |
+
temperature: 0.7,
|
| 21 |
+
do_sample: true
|
| 22 |
+
});
|
| 23 |
+
|
| 24 |
+
addMessage('ai', output[0].generated_text);
|
| 25 |
+
} catch (err) {
|
| 26 |
+
console.error("Generation Error:", err);
|
| 27 |
+
status.textContent = "Error during generation.";
|
| 28 |
+
} finally {
|
| 29 |
+
status.textContent = "Ready!";
|
| 30 |
+
btn.disabled = false;
|
| 31 |
+
}
|
| 32 |
};
|
| 33 |
} catch (e) {
|
| 34 |
+
console.error("Initialization Error:", e);
|
| 35 |
+
status.innerHTML = `<span style="color: red;">Failed to load model: ${e.message}</span>`;
|
| 36 |
+
}
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
// Helper function to append messages to your chat container
|
| 40 |
+
function addMessage(sender, text) {
|
| 41 |
+
const container = document.getElementById('chat-container'); // Ensure this ID exists in your HTML
|
| 42 |
+
const msgDiv = document.createElement('div');
|
| 43 |
+
msgDiv.className = `message ${sender}-message`;
|
| 44 |
+
msgDiv.textContent = text;
|
| 45 |
+
container.appendChild(msgDiv);
|
| 46 |
+
container.scrollTop = container.scrollHeight;
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
init();
|