Fahd-B commited on
Commit
5928f66
·
verified ·
1 Parent(s): 8de3a0a

Update index.js

Browse files
Files changed (1) hide show
  1. index.js +111 -60
index.js CHANGED
@@ -1,76 +1,127 @@
1
- import { pipeline } from 'https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.7.6';
 
 
 
2
 
3
  // Reference the elements that we will need
4
- const status = document.getElementById('status');
5
- const fileUpload = document.getElementById('upload');
6
- const imageContainer = document.getElementById('container');
7
- const example = document.getElementById('example');
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
- const EXAMPLE_URL = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/city-streets.jpg';
10
 
11
- // Create a new object detection pipeline
12
- status.textContent = 'Loading model...';
13
- const detector = await pipeline('object-detection', 'Xenova/detr-resnet-50');
14
- status.textContent = 'Ready';
 
 
 
 
 
 
 
15
 
16
- example.addEventListener('click', (e) => {
17
- e.preventDefault();
18
- detect(EXAMPLE_URL);
19
  });
20
 
21
- fileUpload.addEventListener('change', function (e) {
22
- const file = e.target.files[0];
23
- if (!file) {
24
- return;
25
- }
26
 
27
- const reader = new FileReader();
 
 
 
28
 
29
- // Set up a callback when the file is loaded
30
- reader.onload = e2 => detect(e2.target.result);
 
 
31
 
32
- reader.readAsDataURL(file);
 
 
 
33
  });
34
 
 
 
 
 
 
35
 
36
- // Detect objects in the image
37
- async function detect(img) {
38
- imageContainer.innerHTML = '';
39
- imageContainer.style.backgroundImage = `url(${img})`;
 
 
 
40
 
41
- status.textContent = 'Analysing...';
42
- const output = await detector(img, {
43
- threshold: 0.5,
44
- percentage: true,
45
- });
46
- status.textContent = '';
47
- output.forEach(renderBox);
48
- }
 
 
 
 
49
 
50
- // Render a bounding box and label on the image
51
- function renderBox({ box, label }) {
52
- const { xmax, xmin, ymax, ymin } = box;
53
-
54
- // Generate a random color for the box
55
- const color = '#' + Math.floor(Math.random() * 0xFFFFFF).toString(16).padStart(6, 0);
56
-
57
- // Draw the box
58
- const boxElement = document.createElement('div');
59
- boxElement.className = 'bounding-box';
60
- Object.assign(boxElement.style, {
61
- borderColor: color,
62
- left: 100 * xmin + '%',
63
- top: 100 * ymin + '%',
64
- width: 100 * (xmax - xmin) + '%',
65
- height: 100 * (ymax - ymin) + '%',
66
- })
67
-
68
- // Draw label
69
- const labelElement = document.createElement('span');
70
- labelElement.textContent = label;
71
- labelElement.className = 'bounding-box-label';
72
- labelElement.style.backgroundColor = color;
73
-
74
- boxElement.appendChild(labelElement);
75
- imageContainer.appendChild(boxElement);
76
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.6.0';
2
+
3
+ // Since we will download the model from the Hugging Face Hub, we can skip the local model check
4
+ env.allowLocalModels = false;
5
 
6
  // Reference the elements that we will need
7
+ const statusText = document.getElementById('status-text');
8
+ const fileUpload = document.getElementById('file-upload');
9
+ const uploadLabel = document.querySelector('.upload-label');
10
+ const imagePreview = document.getElementById('image-preview');
11
+ const labelsInput = document.getElementById('labels-input');
12
+ const classifyBtn = document.getElementById('classify-btn');
13
+ const resultsContainer = document.getElementById('results-container');
14
+ const spinner = document.querySelector('.spinner');
15
+
16
+
17
+ // Create a new zero-shot image classification pipeline
18
+ statusText.textContent = 'Loading model...';
19
+ spinner.style.display = 'block';
20
+ const classifier = await pipeline('zero-shot-image-classification', 'Xenova/clip-vit-base-patch16');
21
+ statusText.textContent = 'Ready';
22
+ spinner.style.display = 'none';
23
+
24
+ let imageSrc = null;
25
+
26
+ function handleFile(file) {
27
+ if (!file) {
28
+ return;
29
+ }
30
 
31
+ const reader = new FileReader();
32
 
33
+ reader.onload = function (e2) {
34
+ imageSrc = e2.target.result;
35
+ imagePreview.innerHTML = `<img src="${imageSrc}" alt="uploaded image">`;
36
+ resultsContainer.innerHTML = `<div class="placeholder">
37
+ <i class="fas fa-chart-pie"></i>
38
+ <h2>Results will be displayed here</h2>
39
+ <p>Get instant classification results</p>
40
+ </div>`;
41
+ };
42
+ reader.readAsDataURL(file);
43
+ }
44
 
45
+ fileUpload.addEventListener('change', (e) => {
46
+ handleFile(e.target.files[0]);
 
47
  });
48
 
49
+ // Drag and drop functionality
 
 
 
 
50
 
51
+ function preventDefaults(e) {
52
+ e.preventDefault();
53
+ e.stopPropagation();
54
+ }
55
 
56
+ ['dragenter', 'dragover', 'dragleave', 'drop'].forEach(eventName => {
57
+ uploadLabel.addEventListener(eventName, preventDefaults, false);
58
+ document.body.addEventListener(eventName, preventDefaults, false);
59
+ });
60
 
61
+ ['dragenter', 'dragover'].forEach(eventName => {
62
+ uploadLabel.addEventListener(eventName, () => {
63
+ uploadLabel.classList.add('highlight');
64
+ }, false);
65
  });
66
 
67
+ ['dragleave', 'drop'].forEach(eventName => {
68
+ uploadLabel.addEventListener(eventName, () => {
69
+ uploadLabel.classList.remove('highlight');
70
+ }, false);
71
+ });
72
 
73
+ uploadLabel.addEventListener('drop', (e) => {
74
+ const dt = e.dataTransfer;
75
+ const files = dt.files;
76
+ if (files.length > 0) {
77
+ handleFile(files[0]);
78
+ }
79
+ });
80
 
81
+ classifyBtn.addEventListener('click', function () {
82
+ if (!imageSrc) {
83
+ alert('Please upload an image first.');
84
+ return;
85
+ }
86
+ const labels = labelsInput.value.split(',').map(label => label.trim()).filter(label => label);
87
+ if (labels.length === 0) {
88
+ alert('Please enter at least one label.');
89
+ return;
90
+ }
91
+ classify(imageSrc, labels);
92
+ });
93
 
94
+ // Classify the image
95
+ async function classify(img, labels) {
96
+ statusText.textContent = 'Analysing...';
97
+ spinner.style.display = 'block';
98
+ resultsContainer.innerHTML = '';
99
+ const output = await classifier(img, labels);
100
+ statusText.textContent = '';
101
+ spinner.style.display = 'none';
102
+ renderResults(output);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  }
104
+
105
+ // Render the classification results
106
+ function renderResults(output) {
107
+ resultsContainer.innerHTML = '';
108
+ if (output.length === 0) {
109
+ resultsContainer.innerHTML = '<p>No results found.</p>';
110
+ return;
111
+ }
112
+ output.sort((a, b) => b.score - a.score);
113
+ output.forEach(({ label, score }) => {
114
+ const resultElement = document.createElement('div');
115
+ resultElement.className = 'result';
116
+
117
+ const labelElement = document.createElement('span');
118
+ labelElement.textContent = label;
119
+
120
+ const scoreElement = document.createElement('span');
121
+ scoreElement.textContent = `${(score * 100).toFixed(2)}%`;
122
+
123
+ resultElement.appendChild(labelElement);
124
+ resultElement.appendChild(scoreElement);
125
+ resultsContainer.appendChild(resultElement);
126
+ });
127
+ }