import { AutoProcessor, CLIPVisionModelWithProjection, RawImage, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.6.0';
// Since we will download the model from the Hugging Face Hub, we can skip the local model check
env.allowLocalModels = false;
// Reference the elements that we will need
const statusText = document.getElementById('status-text');
const fileUpload = document.getElementById('file-upload');
const dropZone = document.getElementById('drop-zone');
const imagePreview1 = document.getElementById('image-preview-1');
const imagePreview2 = document.getElementById('image-preview-2');
const meterContainer = document.getElementById('meter-container');
const spinner = document.querySelector('.spinner');
const showGraphBtn = document.getElementById('show-graph-btn');
const graphModal = document.getElementById('graph-modal');
const closeModalBtn = document.querySelector('.close-button');
const resetZoomBtn = document.getElementById('reset-zoom-btn');
const graphContainerModal = document.getElementById('graph-container-modal');
// Load processor and vision model for more direct embedding control
statusText.textContent = 'Loading model...';
spinner.style.display = 'block';
const processor = await AutoProcessor.from_pretrained('Xenova/clip-vit-base-patch16');
const vision_model = await CLIPVisionModelWithProjection.from_pretrained('Xenova/clip-vit-base-patch16');
statusText.textContent = 'Ready';
spinner.style.display = 'none';
let imageSrc1 = null;
let imageSrc2 = null;
let lastEmbeds = null;
// Initial setup of upload placeholders
clearUploads();
// Prevent default drag behaviors
['dragenter', 'dragover', 'dragleave', 'drop'].forEach(eventName => {
dropZone.addEventListener(eventName, preventDefaults, false);
document.body.addEventListener(eventName, preventDefaults, false);
});
// Highlight drop zone when item is dragged over it
['dragenter', 'dragover'].forEach(eventName => {
dropZone.addEventListener(eventName, () => dropZone.classList.add('highlight'), false);
});
['dragleave', 'drop'].forEach(eventName => {
dropZone.addEventListener(eventName, () => dropZone.classList.remove('highlight'), false);
});
// Handle dropped files
dropZone.addEventListener('drop', handleDrop, false);
// Handle clear button click
const clearBtn = document.getElementById('clear-btn');
clearBtn.addEventListener('click', clearUploads);
// Handle file selection via click
fileUpload.addEventListener('change', handleFileSelect);
// Modal event listeners
showGraphBtn.addEventListener('click', () => {
if (lastEmbeds) {
graphModal.style.display = 'block';
renderEmbeddingGraph(lastEmbeds.embeds1, lastEmbeds.embeds2);
}
});
closeModalBtn.addEventListener('click', () => {
graphModal.style.display = 'none';
});
window.addEventListener('click', (event) => {
if (event.target == graphModal) {
graphModal.style.display = 'none';
}
});
function preventDefaults(e) {
e.preventDefault();
e.stopPropagation();
}
function handleDrop(e) {
const dt = e.dataTransfer;
const files = dt.files;
handleFiles(files);
}
function handleFileSelect(e) {
handleFiles(e.target.files);
}
function handleFiles(files) {
const filesArray = Array.from(files);
if (filesArray.length === 0) {
return;
}
// If no image is uploaded yet, fill the first slot. Otherwise, fill the second.
if (!imageSrc1) {
handleIndividualFile(filesArray[0], '1');
if (filesArray.length > 1) {
handleIndividualFile(filesArray[1], '2');
}
} else {
handleIndividualFile(filesArray[0], '2');
}
}
function handleIndividualFile(file, target) {
if (!file) {
return;
}
const reader = new FileReader();
reader.onload = function (e2) {
const imageSrc = e2.target.result;
if (target === '1') {
imageSrc1 = imageSrc;
imagePreview1.innerHTML = ``;
} else if (target === '2') {
imageSrc2 = imageSrc;
imagePreview2.innerHTML = `
`;
}
checkAndCompare();
};
reader.readAsDataURL(file);
}
function checkAndCompare() {
if (imageSrc1 && imageSrc2) {
compareImages(imageSrc1, imageSrc2);
}
}
function clearUploads() {
const placeholder = `
Image preview
Failed to compare images: ${error.message}