Upload 3 files
Browse files- index.html +39 -17
- main.js +171 -0
- style.css +81 -18
index.html
CHANGED
|
@@ -1,19 +1,41 @@
|
|
| 1 |
<!doctype html>
|
| 2 |
-
<html>
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
</html>
|
|
|
|
| 1 |
<!doctype html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8" />
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
| 6 |
+
<title>Transformers.js | real-time CLIP</title>
|
| 7 |
+
<link rel="stylesheet" href="/style.css" />
|
| 8 |
+
</head>
|
| 9 |
+
|
| 10 |
+
<body>
|
| 11 |
+
<h1>Real-time zero-shot image classification (WebGPU)</h1>
|
| 12 |
+
<h3>
|
| 13 |
+
Runs locally in your browser w/
|
| 14 |
+
<a
|
| 15 |
+
href="https://github.com/huggingface/transformers.js"
|
| 16 |
+
target="_blank"
|
| 17 |
+
rel="noopener noreferrer"
|
| 18 |
+
>🤗 Transformers.js</a
|
| 19 |
+
>
|
| 20 |
+
</h3>
|
| 21 |
+
<div id="container">
|
| 22 |
+
<video id="video" autoplay muted playsinline></video>
|
| 23 |
+
<div id="overlay"></div>
|
| 24 |
+
</div>
|
| 25 |
+
<div id="controls">
|
| 26 |
+
<div title="Labels used to perform zero-shot image classification">
|
| 27 |
+
<label>Labels (comma-separated)</label>
|
| 28 |
+
<br />
|
| 29 |
+
<input id="labels" type="text" disabled />
|
| 30 |
+
</div>
|
| 31 |
+
<div title="Template used to perform zero-shot image classification">
|
| 32 |
+
<label>Hypothesis template</label>
|
| 33 |
+
<br />
|
| 34 |
+
<input id="template" type="text" value="A photo of a {}" disabled />
|
| 35 |
+
</div>
|
| 36 |
+
</div>
|
| 37 |
+
<label id="status"></label>
|
| 38 |
+
|
| 39 |
+
<script type="module" src="/main.js"></script>
|
| 40 |
+
</body>
|
| 41 |
</html>
|
main.js
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import {
|
| 2 |
+
AutoTokenizer,
|
| 3 |
+
CLIPTextModelWithProjection,
|
| 4 |
+
AutoProcessor,
|
| 5 |
+
CLIPVisionModelWithProjection,
|
| 6 |
+
RawImage,
|
| 7 |
+
dot,
|
| 8 |
+
softmax,
|
| 9 |
+
} from "https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.5.0";
|
| 10 |
+
|
| 11 |
+
// Reference the elements that we will need
|
| 12 |
+
const status = document.getElementById("status");
|
| 13 |
+
const container = document.getElementById("container");
|
| 14 |
+
const video = document.getElementById("video");
|
| 15 |
+
const labelsInput = document.getElementById("labels");
|
| 16 |
+
const templateInput = document.getElementById("template");
|
| 17 |
+
const overlay = document.getElementById("overlay");
|
| 18 |
+
|
| 19 |
+
status.textContent = "Loading model (88MB)...";
|
| 20 |
+
|
| 21 |
+
const model_id = "Xenova/mobileclip_s0";
|
| 22 |
+
let tokenizer, text_model, processor, vision_model;
|
| 23 |
+
try {
|
| 24 |
+
// Load tokenizer and text model
|
| 25 |
+
tokenizer = await AutoTokenizer.from_pretrained(model_id);
|
| 26 |
+
text_model = await CLIPTextModelWithProjection.from_pretrained(model_id, {
|
| 27 |
+
device: "wasm",
|
| 28 |
+
dtype: "q8",
|
| 29 |
+
});
|
| 30 |
+
|
| 31 |
+
// Load processor and vision model
|
| 32 |
+
processor = await AutoProcessor.from_pretrained(model_id);
|
| 33 |
+
vision_model = await CLIPVisionModelWithProjection.from_pretrained(model_id, {
|
| 34 |
+
device: "webnn",
|
| 35 |
+
dtype: "fp32",
|
| 36 |
+
});
|
| 37 |
+
} catch (err) {
|
| 38 |
+
console.error(err);
|
| 39 |
+
status.textContent = err.message;
|
| 40 |
+
alert(err.message);
|
| 41 |
+
throw err;
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
labelsInput.disabled = false;
|
| 45 |
+
templateInput.disabled = false;
|
| 46 |
+
|
| 47 |
+
status.textContent = "Ready";
|
| 48 |
+
|
| 49 |
+
// See `model.logit_scale` parameter of original model
|
| 50 |
+
const exp_logit_scale = Math.exp(4.6052);
|
| 51 |
+
|
| 52 |
+
const IMAGE_SIZE = 224;
|
| 53 |
+
const canvas = document.createElement("canvas");
|
| 54 |
+
canvas.width = canvas.height = IMAGE_SIZE;
|
| 55 |
+
const context = canvas.getContext("2d", { willReadFrequently: true });
|
| 56 |
+
|
| 57 |
+
let isProcessing = false;
|
| 58 |
+
let previousTime;
|
| 59 |
+
let textEmbeddings;
|
| 60 |
+
let prevTextInputs;
|
| 61 |
+
let prevTemplate;
|
| 62 |
+
let labels;
|
| 63 |
+
|
| 64 |
+
function onFrameUpdate() {
|
| 65 |
+
if (!isProcessing) {
|
| 66 |
+
isProcessing = true;
|
| 67 |
+
(async function () {
|
| 68 |
+
// If text inputs have changed, update the embeddings
|
| 69 |
+
if (
|
| 70 |
+
prevTextInputs !== labelsInput.value ||
|
| 71 |
+
prevTemplate !== templateInput.value
|
| 72 |
+
) {
|
| 73 |
+
textEmbeddings = null;
|
| 74 |
+
prevTextInputs = labelsInput.value;
|
| 75 |
+
prevTemplate = templateInput.value;
|
| 76 |
+
labels = prevTextInputs.split(/\s*,\s*/).filter((x) => x);
|
| 77 |
+
|
| 78 |
+
if (labels.length > 0) {
|
| 79 |
+
const texts = labels.map((x) =>
|
| 80 |
+
templateInput.value.replaceAll("{}", x),
|
| 81 |
+
);
|
| 82 |
+
|
| 83 |
+
const text_inputs = tokenizer(texts, {
|
| 84 |
+
padding: "max_length", // NB: the model requires max_length padding
|
| 85 |
+
truncation: true,
|
| 86 |
+
});
|
| 87 |
+
|
| 88 |
+
// Compute embeddings
|
| 89 |
+
const { text_embeds } = await text_model(text_inputs);
|
| 90 |
+
textEmbeddings = text_embeds.normalize().tolist();
|
| 91 |
+
} else {
|
| 92 |
+
overlay.innerHTML = "";
|
| 93 |
+
}
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
if (textEmbeddings) {
|
| 97 |
+
// Read the current frame from the video
|
| 98 |
+
context.drawImage(video, 0, 0, IMAGE_SIZE, IMAGE_SIZE);
|
| 99 |
+
const pixelData = context.getImageData(
|
| 100 |
+
0,
|
| 101 |
+
0,
|
| 102 |
+
IMAGE_SIZE,
|
| 103 |
+
IMAGE_SIZE,
|
| 104 |
+
).data;
|
| 105 |
+
const image = new RawImage(pixelData, IMAGE_SIZE, IMAGE_SIZE, 4);
|
| 106 |
+
|
| 107 |
+
const image_inputs = await processor(image);
|
| 108 |
+
|
| 109 |
+
// Compute embeddings
|
| 110 |
+
const { image_embeds } = await vision_model(image_inputs);
|
| 111 |
+
const imageEmbedding = image_embeds.normalize().tolist()[0];
|
| 112 |
+
|
| 113 |
+
// Compute similarity
|
| 114 |
+
const similarities = textEmbeddings.map(
|
| 115 |
+
(x) => dot(x, imageEmbedding) * exp_logit_scale,
|
| 116 |
+
);
|
| 117 |
+
|
| 118 |
+
const sortedIndices = softmax(similarities)
|
| 119 |
+
.map((x, i) => [x, i])
|
| 120 |
+
.sort((a, b) => b[0] - a[0]);
|
| 121 |
+
|
| 122 |
+
// Update UI
|
| 123 |
+
overlay.innerHTML = "";
|
| 124 |
+
for (const [score, index] of sortedIndices) {
|
| 125 |
+
overlay.appendChild(
|
| 126 |
+
document.createTextNode(`${labels[index]}: ${score.toFixed(2)}`),
|
| 127 |
+
);
|
| 128 |
+
overlay.appendChild(document.createElement("br"));
|
| 129 |
+
}
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
if (previousTime !== undefined) {
|
| 133 |
+
const fps = 1000 / (performance.now() - previousTime);
|
| 134 |
+
status.textContent = `FPS: ${fps.toFixed(2)}`;
|
| 135 |
+
}
|
| 136 |
+
previousTime = performance.now();
|
| 137 |
+
isProcessing = false;
|
| 138 |
+
})();
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
window.requestAnimationFrame(onFrameUpdate);
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
// Start the video stream
|
| 145 |
+
navigator.mediaDevices
|
| 146 |
+
.getUserMedia(
|
| 147 |
+
{ video: true }, // Ask for video
|
| 148 |
+
)
|
| 149 |
+
.then((stream) => {
|
| 150 |
+
// Set up the video and canvas elements.
|
| 151 |
+
video.srcObject = stream;
|
| 152 |
+
video.play();
|
| 153 |
+
|
| 154 |
+
const videoTrack = stream.getVideoTracks()[0];
|
| 155 |
+
const { width, height } = videoTrack.getSettings();
|
| 156 |
+
|
| 157 |
+
video.width = width;
|
| 158 |
+
video.height = height;
|
| 159 |
+
|
| 160 |
+
// Set container width and height depending on the image aspect ratio
|
| 161 |
+
const ar = width / height;
|
| 162 |
+
const [cw, ch] = ar > 720 / 405 ? [720, 720 / ar] : [405 * ar, 405];
|
| 163 |
+
container.style.width = `${cw}px`;
|
| 164 |
+
container.style.height = `${ch}px`;
|
| 165 |
+
|
| 166 |
+
// Start the animation loop
|
| 167 |
+
window.requestAnimationFrame(onFrameUpdate);
|
| 168 |
+
})
|
| 169 |
+
.catch((error) => {
|
| 170 |
+
alert(error);
|
| 171 |
+
});
|
style.css
CHANGED
|
@@ -1,28 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
body {
|
| 2 |
-
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
}
|
| 5 |
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
|
|
|
|
|
|
| 9 |
}
|
| 10 |
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
font-size: 15px;
|
| 14 |
-
margin-bottom: 10px;
|
| 15 |
-
margin-top: 5px;
|
| 16 |
}
|
| 17 |
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
|
|
|
| 24 |
}
|
| 25 |
|
| 26 |
-
|
| 27 |
-
|
| 28 |
}
|
|
|
|
| 1 |
+
* {
|
| 2 |
+
box-sizing: border-box;
|
| 3 |
+
padding: 0;
|
| 4 |
+
margin: 0;
|
| 5 |
+
font-family: sans-serif;
|
| 6 |
+
}
|
| 7 |
+
|
| 8 |
+
html,
|
| 9 |
+
body {
|
| 10 |
+
height: 100%;
|
| 11 |
+
}
|
| 12 |
+
|
| 13 |
body {
|
| 14 |
+
padding: 16px 32px;
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
body,
|
| 18 |
+
#container {
|
| 19 |
+
display: flex;
|
| 20 |
+
flex-direction: column;
|
| 21 |
+
justify-content: center;
|
| 22 |
+
align-items: center;
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
#controls {
|
| 26 |
+
display: flex;
|
| 27 |
+
padding: 1rem;
|
| 28 |
+
gap: 1rem;
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
#controls > div {
|
| 32 |
+
text-align: center;
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
h1,
|
| 36 |
+
h3 {
|
| 37 |
+
text-align: center;
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
h3 {
|
| 41 |
+
margin-top: 0.5rem;
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
#container {
|
| 45 |
+
position: relative;
|
| 46 |
+
width: 720px;
|
| 47 |
+
height: 405px;
|
| 48 |
+
max-width: 100%;
|
| 49 |
+
max-height: 100%;
|
| 50 |
+
border: 2px dashed #d1d5db;
|
| 51 |
+
border-radius: 0.75rem;
|
| 52 |
+
overflow: hidden;
|
| 53 |
+
margin-top: 1rem;
|
| 54 |
+
background-size: 100% 100%;
|
| 55 |
+
background-position: center;
|
| 56 |
+
background-repeat: no-repeat;
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
#status {
|
| 60 |
+
min-height: 16px;
|
| 61 |
+
margin: 8px 0;
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
video {
|
| 65 |
+
width: 100%;
|
| 66 |
+
height: 100%;
|
| 67 |
}
|
| 68 |
|
| 69 |
+
input[type="text"] {
|
| 70 |
+
padding: 0.25rem 0.5rem;
|
| 71 |
+
border: 1px solid #d1d5db;
|
| 72 |
+
border-radius: 0.25rem;
|
| 73 |
+
margin-top: 2px;
|
| 74 |
}
|
| 75 |
|
| 76 |
+
input[type="range"] {
|
| 77 |
+
margin-top: 6px;
|
|
|
|
|
|
|
|
|
|
| 78 |
}
|
| 79 |
|
| 80 |
+
#overlay {
|
| 81 |
+
position: absolute;
|
| 82 |
+
top: 0;
|
| 83 |
+
left: 0;
|
| 84 |
+
background-color: rgba(255, 255, 255, 0.9);
|
| 85 |
+
font-size: 1.25rem;
|
| 86 |
+
border-radius: 2px;
|
| 87 |
}
|
| 88 |
|
| 89 |
+
#overlay:not(:empty) {
|
| 90 |
+
padding: 0.5rem;
|
| 91 |
}
|