kalpitsharma commited on
Commit
141681a
·
verified ·
1 Parent(s): 81fda92

Upload 11 files

Browse files
Files changed (11) hide show
  1. classification_reports.html +46 -0
  2. cnn.html +71 -0
  3. cnn_lstm.html +55 -0
  4. cnn_resnet.html +71 -0
  5. cnnkeras.html +55 -0
  6. index.html +69 -28
  7. knn.html +71 -0
  8. lr.html +71 -0
  9. rf.html +71 -0
  10. svm.html +71 -0
  11. vit.html +72 -0
classification_reports.html ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <title>Emotion Recognition Model Reports</title>
6
+ <style>
7
+ body {
8
+ font-family: Arial, sans-serif;
9
+ background-color: #f4f4f4;
10
+ margin: 0;
11
+ padding: 2rem;
12
+ }
13
+ h1 {
14
+ text-align: center;
15
+ color: #333;
16
+ }
17
+ h2 {
18
+ margin-top: 2rem;
19
+ color: #2c3e50;
20
+ }
21
+ pre {
22
+ background-color: #fff;
23
+ border-left: 5px solid #007BFF;
24
+ padding: 1rem;
25
+ overflow-x: auto;
26
+ box-shadow: 0 0 10px rgba(0, 0, 0, 0.1);
27
+ white-space: pre-wrap;
28
+ }
29
+ </style>
30
+ </head>
31
+ <body>
32
+ <h1>Classification Reports for Emotion Recognition Models</h1>
33
+
34
+ <h2>Support Vector Machine (SVM)</h2>
35
+ <pre>{{ svm_report }}</pre>
36
+
37
+ <h2>Random Forest</h2>
38
+ <pre>{{ rf_report }}</pre>
39
+
40
+ <h2>k-Nearest Neighbors (k-NN)</h2>
41
+ <pre>{{ knn_report }}</pre>
42
+
43
+ <h2>Logistic Regression</h2>
44
+ <pre>{{ lr_report }}</pre>
45
+ </body>
46
+ </html>
cnn.html ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html>
3
+ <head>
4
+ <title>Emotion Detection via CNN</title>
5
+ <style>
6
+ body {
7
+ font-family: Arial, sans-serif;
8
+ text-align: center;
9
+ }
10
+ #video, #canvas {
11
+ border: 2px solid #444;
12
+ border-radius: 8px;
13
+ margin: 10px;
14
+ }
15
+ #result {
16
+ font-size: 18px;
17
+ font-weight: bold;
18
+ }
19
+ </style>
20
+ </head>
21
+ <body>
22
+ <h2>Webcam Emotion Detector</h2>
23
+
24
+ <video id="video" width="320" height="240" autoplay></video>
25
+ <canvas id="canvas" width="320" height="240" style="display:none;"></canvas>
26
+
27
+ <p id="result">Waiting for detection...</p>
28
+
29
+ <button onclick="window.location.href='http://127.0.0.1:7860/'">
30
+ ⬅ Back to Model Selection
31
+ </button>
32
+
33
+ <script>
34
+ const video = document.getElementById("video");
35
+ const canvas = document.getElementById("canvas");
36
+ const context = canvas.getContext("2d");
37
+ const resultText = document.getElementById("result");
38
+
39
+ // Start video stream
40
+ navigator.mediaDevices.getUserMedia({ video: true }).then(stream => {
41
+ video.srcObject = stream;
42
+ });
43
+
44
+ setInterval(() => {
45
+ // Draw current video frame to canvas
46
+ context.drawImage(video, 0, 0, canvas.width, canvas.height);
47
+
48
+ // Convert to blob and send to backend
49
+ canvas.toBlob(blob => {
50
+ const formData = new FormData();
51
+ formData.append("frame", blob, "frame.jpg");
52
+
53
+ fetch("http://127.0.0.1:7860/cnn", {
54
+ method: "POST",
55
+ body: formData,
56
+ })
57
+ .then(response => response.json())
58
+ .then(data => {
59
+ resultText.innerHTML = `
60
+ Detected Emotion: <strong>${data.emotion}</strong><br>
61
+ `;
62
+
63
+ })
64
+ .catch(err => {
65
+ resultText.textContent = "Error: " + err.message;
66
+ });
67
+ }, "image/jpeg");
68
+ }, 300); // Every 3 seconds
69
+ </script>
70
+ </body>
71
+ </html>
cnn_lstm.html ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html>
3
+ <head>
4
+ <title>Emotion Detection via ViT</title>
5
+ <style>
6
+ body {
7
+ font-family: Arial, sans-serif;
8
+ text-align: center;
9
+ }
10
+ #video, #canvas {
11
+ border: 2px solid #444;
12
+ border-radius: 8px;
13
+ margin: 10px;
14
+ }
15
+ #result {
16
+ font-size: 18px;
17
+ font-weight: bold;
18
+ }
19
+ </style>
20
+ </head>
21
+ <body>
22
+ <h2>Webcam CNN LSTM Emotion Detector</h2>
23
+ <video id="video" width="320" height="240" autoplay></video>
24
+ <canvas id="canvas" width="320" height="240" style="display:none;"></canvas>
25
+ <p id="emotion">Waiting for detection...</p>
26
+ <script>
27
+ const video = document.getElementById("video");
28
+ const canvas = document.getElementById("canvas");
29
+ const context = canvas.getContext("2d");
30
+ const emotionText = document.getElementById("emotion");
31
+
32
+ navigator.mediaDevices.getUserMedia({ video: true }).then(stream => {
33
+ video.srcObject = stream;
34
+ });
35
+
36
+ setInterval(() => {
37
+ context.drawImage(video, 0, 0, canvas.width, canvas.height);
38
+ canvas.toBlob(blob => {
39
+ const formData = new FormData();
40
+ formData.append("frame", blob, "frame.jpg");
41
+
42
+ fetch("http://127.0.0.1:7860/cnn_lstm_video_feed", {
43
+ method: "POST",
44
+ body: formData,
45
+ })
46
+ .then(response => response.json())
47
+ .then(data => {
48
+ emotionText.textContent = "Detected Emotion: " + data.emotion;
49
+ });
50
+ }, "image/jpeg");
51
+ }, 300); // every 3 seconds
52
+ </script>
53
+ <button onclick="window.location.href='http://127.0.0.1:7860/'">⬅ Back to Model Selection</button>
54
+ </body>
55
+ </html>
cnn_resnet.html ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html>
3
+ <head>
4
+ <title>Emotion Detection via CNN + RNN</title>
5
+ <style>
6
+ body {
7
+ font-family: Arial, sans-serif;
8
+ text-align: center;
9
+ }
10
+ #video, #canvas {
11
+ border: 2px solid #444;
12
+ border-radius: 8px;
13
+ margin: 10px;
14
+ }
15
+ #result {
16
+ font-size: 18px;
17
+ font-weight: bold;
18
+ }
19
+ </style>
20
+ </head>
21
+ <body>
22
+ <h2>Webcam Emotion Detector</h2>
23
+
24
+ <video id="video" width="320" height="240" autoplay></video>
25
+ <canvas id="canvas" width="320" height="240" style="display:none;"></canvas>
26
+
27
+ <p id="result">Waiting for detection...</p>
28
+
29
+ <button onclick="window.location.href='http://127.0.0.1:7860/'">
30
+ ⬅ Back to Model Selection
31
+ </button>
32
+
33
+ <script>
34
+ const video = document.getElementById("video");
35
+ const canvas = document.getElementById("canvas");
36
+ const context = canvas.getContext("2d");
37
+ const resultText = document.getElementById("result");
38
+
39
+ // Start video stream
40
+ navigator.mediaDevices.getUserMedia({ video: true }).then(stream => {
41
+ video.srcObject = stream;
42
+ });
43
+
44
+ setInterval(() => {
45
+ // Draw current video frame to canvas
46
+ context.drawImage(video, 0, 0, canvas.width, canvas.height);
47
+
48
+ // Convert to blob and send to backend
49
+ canvas.toBlob(blob => {
50
+ const formData = new FormData();
51
+ formData.append("frame", blob, "frame.jpg");
52
+
53
+ fetch("http://127.0.0.1:7860/cnn_resnet", {
54
+ method: "POST",
55
+ body: formData,
56
+ })
57
+ .then(response => response.json())
58
+ .then(data => {
59
+ resultText.innerHTML = `
60
+ Detected Emotion: <strong>${data.emotion}</strong><br>
61
+ `;
62
+
63
+ })
64
+ .catch(err => {
65
+ resultText.textContent = "Error: " + err.message;
66
+ });
67
+ }, "image/jpeg");
68
+ }, 300); // Every 3 seconds
69
+ </script>
70
+ </body>
71
+ </html>
cnnkeras.html ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html>
3
+ <head>
4
+ <title>Emotion Detection via ViT</title>
5
+ <style>
6
+ body {
7
+ font-family: Arial, sans-serif;
8
+ text-align: center;
9
+ }
10
+ #video, #canvas {
11
+ border: 2px solid #444;
12
+ border-radius: 8px;
13
+ margin: 10px;
14
+ }
15
+ #result {
16
+ font-size: 18px;
17
+ font-weight: bold;
18
+ }
19
+ </style>
20
+ </head>
21
+ <body>
22
+ <h2>Webcam CNN Keras Emotion Detector</h2>
23
+ <video id="video" width="320" height="240" autoplay></video>
24
+ <canvas id="canvas" width="320" height="240" style="display:none;"></canvas>
25
+ <p id="emotion">Waiting for detection...</p>
26
+ <script>
27
+ const video = document.getElementById("video");
28
+ const canvas = document.getElementById("canvas");
29
+ const context = canvas.getContext("2d");
30
+ const emotionText = document.getElementById("emotion");
31
+
32
+ navigator.mediaDevices.getUserMedia({ video: true }).then(stream => {
33
+ video.srcObject = stream;
34
+ });
35
+
36
+ setInterval(() => {
37
+ context.drawImage(video, 0, 0, canvas.width, canvas.height);
38
+ canvas.toBlob(blob => {
39
+ const formData = new FormData();
40
+ formData.append("frame", blob, "frame.jpg");
41
+
42
+ fetch("http://127.0.0.1:7860/video_feed", {
43
+ method: "POST",
44
+ body: formData,
45
+ })
46
+ .then(response => response.json())
47
+ .then(data => {
48
+ emotionText.textContent = "Detected Emotion: " + data.emotion;
49
+ });
50
+ }, "image/jpeg");
51
+ }, 300); // every 3 seconds
52
+ </script>
53
+ <button onclick="window.location.href='http://127.0.0.1:7860/'">⬅ Back to Model Selection</button>
54
+ </body>
55
+ </html>
index.html CHANGED
@@ -1,39 +1,80 @@
1
  <!DOCTYPE html>
2
  <html>
3
  <head>
4
- <title>Emotion Detection via ViT</title>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  </head>
6
  <body>
7
- <h2>Webcam Emotion Detector</h2>
8
- <video id="video" width="320" height="240" autoplay></video>
9
- <canvas id="canvas" width="320" height="240" style="display:none;"></canvas>
10
- <p id="emotion"></p>
11
- <script>
12
- const video = document.getElementById("video");
13
- const canvas = document.getElementById("canvas");
14
- const context = canvas.getContext("2d");
15
- const emotionText = document.getElementById("emotion");
16
 
17
- navigator.mediaDevices.getUserMedia({ video: true }).then(stream => {
18
- video.srcObject = stream;
19
- });
 
 
 
 
 
 
 
 
 
 
20
 
21
- setInterval(() => {
22
- context.drawImage(video, 0, 0, canvas.width, canvas.height);
23
- canvas.toBlob(blob => {
24
- const formData = new FormData();
25
- formData.append("frame", blob, "frame.jpg");
26
 
27
- fetch("http://127.0.0.1:7860/analyze", {
28
- method: "POST",
29
- body: formData,
30
- })
31
- .then(response => response.json())
32
- .then(data => {
33
- emotionText.textContent = "Detected Emotion: " + data.emotion;
34
- });
35
- }, "image/jpeg");
36
- }, 3000); // every 3 seconds
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  </script>
38
  </body>
39
  </html>
 
1
  <!DOCTYPE html>
2
  <html>
3
  <head>
4
+ <title>Select Emotion Detection Model</title>
5
+ <style>
6
+ body {
7
+ font-family: Arial, sans-serif;
8
+ text-align: center;
9
+ }
10
+ #video, #canvas {
11
+ border: 2px solid #444;
12
+ border-radius: 8px;
13
+ margin: 10px;
14
+ }
15
+ #result {
16
+ font-size: 18px;
17
+ font-weight: bold;
18
+ }
19
+ #reportButton {
20
+ margin-top: 20px;
21
+ padding: 10px 20px;
22
+ font-size: 16px;
23
+ cursor: pointer;
24
+ }
25
+ </style>
26
  </head>
27
  <body>
28
+ <h2>Select Emotion Detection Model</h2>
 
 
 
 
 
 
 
 
29
 
30
+ <label for="model">Choose Model:</label>
31
+ <select id="model">
32
+ <option value="" selected disabled>-- Select Model --</option>
33
+ <option value="knn">KNN </option>
34
+ <option value="svm">SVM </option>
35
+ <option value="rf">Random Forest </option>
36
+ <option value="lr">Logistic Regression </option>
37
+ <option value="vit">Vision Transformer (ViT)</option>
38
+ <option value="cnnkeras">CNN + Keras Tensorflow</option>
39
+ <option value="cnnonly">CNN</option>
40
+ <option value="cnnrnn">CNN + RNN </option>
41
+ <option value="cnnlstm">CNN + LSTM </option>
42
+ </select>
43
 
44
+ <br>
 
 
 
 
45
 
46
+ <!-- Button to call reports -->
47
+ <button id="reportButton">Show Evaluation Report</button>
48
+
49
+ <script>
50
+ // Model selection redirect
51
+ document.getElementById("model").addEventListener("change", function () {
52
+ const model = this.value;
53
+ if (model === "cnnkeras") {
54
+ window.location.href = "/cnnkeras";
55
+ } else if (model === "vit") {
56
+ window.location.href = "/vit";
57
+ } else if (model === "cnnlstm") {
58
+ window.location.href = "/cnnlstm";
59
+ } else if (model === "cnnrnn") {
60
+ window.location.href = "/cnn_resnet";
61
+ } else if (model === "cnnonly") {
62
+ window.location.href = "/cnn";
63
+ }else if (model === "knn") {
64
+ window.location.href = "/knn";
65
+ }else if (model === "svm") {
66
+ window.location.href = "/svm";
67
+ }else if (model === "rf") {
68
+ window.location.href = "/randomforest";
69
+ }else if (model === "lr") {
70
+ window.location.href = "/logistic_regression";
71
+ }
72
+ });
73
+
74
+ // Button to call /reports endpoint
75
+ document.getElementById("reportButton").addEventListener("click", function () {
76
+ window.location.href = "/reports";
77
+ });
78
  </script>
79
  </body>
80
  </html>
knn.html ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html>
3
+ <head>
4
+ <title>Emotion Detection via KNN</title>
5
+ <style>
6
+ body {
7
+ font-family: Arial, sans-serif;
8
+ text-align: center;
9
+ }
10
+ #video, #canvas {
11
+ border: 2px solid #444;
12
+ border-radius: 8px;
13
+ margin: 10px;
14
+ }
15
+ #result {
16
+ font-size: 18px;
17
+ font-weight: bold;
18
+ }
19
+ </style>
20
+ </head>
21
+ <body>
22
+ <h2>Webcam Emotion Detector</h2>
23
+
24
+ <video id="video" width="320" height="240" autoplay></video>
25
+ <canvas id="canvas" width="320" height="240" style="display:none;"></canvas>
26
+
27
+ <p id="result">Waiting for detection...</p>
28
+
29
+ <button onclick="window.location.href='http://127.0.0.1:7860/'">
30
+ ⬅ Back to Model Selection
31
+ </button>
32
+
33
+ <script>
34
+ const video = document.getElementById("video");
35
+ const canvas = document.getElementById("canvas");
36
+ const context = canvas.getContext("2d");
37
+ const resultText = document.getElementById("result");
38
+
39
+ // Start video stream
40
+ navigator.mediaDevices.getUserMedia({ video: true }).then(stream => {
41
+ video.srcObject = stream;
42
+ });
43
+
44
+ setInterval(() => {
45
+ // Draw current video frame to canvas
46
+ context.drawImage(video, 0, 0, canvas.width, canvas.height);
47
+
48
+ // Convert to blob and send to backend
49
+ canvas.toBlob(blob => {
50
+ const formData = new FormData();
51
+ formData.append("frame", blob, "frame.jpg");
52
+
53
+ fetch("http://127.0.0.1:7860/knn", {
54
+ method: "POST",
55
+ body: formData,
56
+ })
57
+ .then(response => response.json())
58
+ .then(data => {
59
+ resultText.innerHTML = `
60
+ Detected Emotion: <strong>${data.emotion}</strong><br>
61
+ `;
62
+
63
+ })
64
+ .catch(err => {
65
+ resultText.textContent = "Error: " + err.message;
66
+ });
67
+ }, "image/jpeg");
68
+ }, 300); // Every 3 seconds
69
+ </script>
70
+ </body>
71
+ </html>
lr.html ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html>
3
+ <head>
4
+ <title>Emotion Detection via Logistic Regression</title>
5
+ <style>
6
+ body {
7
+ font-family: Arial, sans-serif;
8
+ text-align: center;
9
+ }
10
+ #video, #canvas {
11
+ border: 2px solid #444;
12
+ border-radius: 8px;
13
+ margin: 10px;
14
+ }
15
+ #result {
16
+ font-size: 18px;
17
+ font-weight: bold;
18
+ }
19
+ </style>
20
+ </head>
21
+ <body>
22
+ <h2>Webcam Emotion Detector</h2>
23
+
24
+ <video id="video" width="320" height="240" autoplay></video>
25
+ <canvas id="canvas" width="320" height="240" style="display:none;"></canvas>
26
+
27
+ <p id="result">Waiting for detection...</p>
28
+
29
+ <button onclick="window.location.href='http://127.0.0.1:7860/'">
30
+ ⬅ Back to Model Selection
31
+ </button>
32
+
33
+ <script>
34
+ const video = document.getElementById("video");
35
+ const canvas = document.getElementById("canvas");
36
+ const context = canvas.getContext("2d");
37
+ const resultText = document.getElementById("result");
38
+
39
+ // Start video stream
40
+ navigator.mediaDevices.getUserMedia({ video: true }).then(stream => {
41
+ video.srcObject = stream;
42
+ });
43
+
44
+ setInterval(() => {
45
+ // Draw current video frame to canvas
46
+ context.drawImage(video, 0, 0, canvas.width, canvas.height);
47
+
48
+ // Convert to blob and send to backend
49
+ canvas.toBlob(blob => {
50
+ const formData = new FormData();
51
+ formData.append("frame", blob, "frame.jpg");
52
+
53
+ fetch("http://127.0.0.1:7860/logistic_regression", {
54
+ method: "POST",
55
+ body: formData,
56
+ })
57
+ .then(response => response.json())
58
+ .then(data => {
59
+ resultText.innerHTML = `
60
+ Detected Emotion: <strong>${data.emotion}</strong><br>
61
+ `;
62
+
63
+ })
64
+ .catch(err => {
65
+ resultText.textContent = "Error: " + err.message;
66
+ });
67
+ }, "image/jpeg");
68
+ }, 300); // Every 3 seconds
69
+ </script>
70
+ </body>
71
+ </html>
rf.html ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html>
3
+ <head>
4
+ <title>Emotion Detection via Random Forest</title>
5
+ <style>
6
+ body {
7
+ font-family: Arial, sans-serif;
8
+ text-align: center;
9
+ }
10
+ #video, #canvas {
11
+ border: 2px solid #444;
12
+ border-radius: 8px;
13
+ margin: 10px;
14
+ }
15
+ #result {
16
+ font-size: 18px;
17
+ font-weight: bold;
18
+ }
19
+ </style>
20
+ </head>
21
+ <body>
22
+ <h2>Webcam Emotion Detector</h2>
23
+
24
+ <video id="video" width="320" height="240" autoplay></video>
25
+ <canvas id="canvas" width="320" height="240" style="display:none;"></canvas>
26
+
27
+ <p id="result">Waiting for detection...</p>
28
+
29
+ <button onclick="window.location.href='http://127.0.0.1:7860/'">
30
+ ⬅ Back to Model Selection
31
+ </button>
32
+
33
+ <script>
34
+ const video = document.getElementById("video");
35
+ const canvas = document.getElementById("canvas");
36
+ const context = canvas.getContext("2d");
37
+ const resultText = document.getElementById("result");
38
+
39
+ // Start video stream
40
+ navigator.mediaDevices.getUserMedia({ video: true }).then(stream => {
41
+ video.srcObject = stream;
42
+ });
43
+
44
+ setInterval(() => {
45
+ // Draw current video frame to canvas
46
+ context.drawImage(video, 0, 0, canvas.width, canvas.height);
47
+
48
+ // Convert to blob and send to backend
49
+ canvas.toBlob(blob => {
50
+ const formData = new FormData();
51
+ formData.append("frame", blob, "frame.jpg");
52
+
53
+ fetch("http://127.0.0.1:7860/randomforest", {
54
+ method: "POST",
55
+ body: formData,
56
+ })
57
+ .then(response => response.json())
58
+ .then(data => {
59
+ resultText.innerHTML = `
60
+ Detected Emotion: <strong>${data.emotion}</strong><br>
61
+ `;
62
+
63
+ })
64
+ .catch(err => {
65
+ resultText.textContent = "Error: " + err.message;
66
+ });
67
+ }, "image/jpeg");
68
+ }, 300); // Every 3 seconds
69
+ </script>
70
+ </body>
71
+ </html>
svm.html ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html>
3
+ <head>
4
+ <title>Emotion Detection via SVM</title>
5
+ <style>
6
+ body {
7
+ font-family: Arial, sans-serif;
8
+ text-align: center;
9
+ }
10
+ #video, #canvas {
11
+ border: 2px solid #444;
12
+ border-radius: 8px;
13
+ margin: 10px;
14
+ }
15
+ #result {
16
+ font-size: 18px;
17
+ font-weight: bold;
18
+ }
19
+ </style>
20
+ </head>
21
+ <body>
22
+ <h2>Webcam Emotion Detector</h2>
23
+
24
+ <video id="video" width="320" height="240" autoplay></video>
25
+ <canvas id="canvas" width="320" height="240" style="display:none;"></canvas>
26
+
27
+ <p id="result">Waiting for detection...</p>
28
+
29
+ <button onclick="window.location.href='http://127.0.0.1:7860/'">
30
+ ⬅ Back to Model Selection
31
+ </button>
32
+
33
+ <script>
34
+ const video = document.getElementById("video");
35
+ const canvas = document.getElementById("canvas");
36
+ const context = canvas.getContext("2d");
37
+ const resultText = document.getElementById("result");
38
+
39
+ // Start video stream
40
+ navigator.mediaDevices.getUserMedia({ video: true }).then(stream => {
41
+ video.srcObject = stream;
42
+ });
43
+
44
+ setInterval(() => {
45
+ // Draw current video frame to canvas
46
+ context.drawImage(video, 0, 0, canvas.width, canvas.height);
47
+
48
+ // Convert to blob and send to backend
49
+ canvas.toBlob(blob => {
50
+ const formData = new FormData();
51
+ formData.append("frame", blob, "frame.jpg");
52
+
53
+ fetch("http://127.0.0.1:7860/svm", {
54
+ method: "POST",
55
+ body: formData,
56
+ })
57
+ .then(response => response.json())
58
+ .then(data => {
59
+ resultText.innerHTML = `
60
+ Detected Emotion: <strong>${data.emotion}</strong><br>
61
+ `;
62
+
63
+ })
64
+ .catch(err => {
65
+ resultText.textContent = "Error: " + err.message;
66
+ });
67
+ }, "image/jpeg");
68
+ }, 300); // Every 3 seconds
69
+ </script>
70
+ </body>
71
+ </html>
vit.html ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html>
3
+ <head>
4
+ <title>Emotion & Age Detection via ViT</title>
5
+ <style>
6
+ body {
7
+ font-family: Arial, sans-serif;
8
+ text-align: center;
9
+ }
10
+ #video, #canvas {
11
+ border: 2px solid #444;
12
+ border-radius: 8px;
13
+ margin: 10px;
14
+ }
15
+ #result {
16
+ font-size: 18px;
17
+ font-weight: bold;
18
+ }
19
+ </style>
20
+ </head>
21
+ <body>
22
+ <h2>Webcam Emotion & Age Detector</h2>
23
+
24
+ <video id="video" width="320" height="240" autoplay></video>
25
+ <canvas id="canvas" width="320" height="240" style="display:none;"></canvas>
26
+
27
+ <p id="result">Waiting for detection...</p>
28
+
29
+ <button onclick="window.location.href='http://127.0.0.1:7860/'">
30
+ ⬅ Back to Model Selection
31
+ </button>
32
+
33
+ <script>
34
+ const video = document.getElementById("video");
35
+ const canvas = document.getElementById("canvas");
36
+ const context = canvas.getContext("2d");
37
+ const resultText = document.getElementById("result");
38
+
39
+ // Start video stream
40
+ navigator.mediaDevices.getUserMedia({ video: true }).then(stream => {
41
+ video.srcObject = stream;
42
+ });
43
+
44
+ setInterval(() => {
45
+ // Draw current video frame to canvas
46
+ context.drawImage(video, 0, 0, canvas.width, canvas.height);
47
+
48
+ // Convert to blob and send to backend
49
+ canvas.toBlob(blob => {
50
+ const formData = new FormData();
51
+ formData.append("frame", blob, "frame.jpg");
52
+
53
+ fetch("http://127.0.0.1:7860/analyze", {
54
+ method: "POST",
55
+ body: formData,
56
+ })
57
+ .then(response => response.json())
58
+ .then(data => {
59
+ resultText.innerHTML = `
60
+ Detected Emotion: <strong>${data.emotion}</strong><br>
61
+ Estimated Age Group: <strong>${data.age}</strong>
62
+ `;
63
+
64
+ })
65
+ .catch(err => {
66
+ resultText.textContent = "Error: " + err.message;
67
+ });
68
+ }, "image/jpeg");
69
+ }, 300); // Every 3 seconds
70
+ </script>
71
+ </body>
72
+ </html>