tannuiscoding commited on
Commit
09000c8
Β·
1 Parent(s): 47f3f40

application file added

Browse files
TrueVoxAI/.gitignore β†’ .gitignore RENAMED
@@ -1,2 +1,2 @@
1
- ./env/
2
  env/
 
1
+ ./env/
2
  env/
TrueVoxAI/Dockerfile β†’ Dockerfile RENAMED
@@ -1,35 +1,35 @@
1
- # Base image with Python
2
- FROM python:3.10-slim
3
-
4
- # Set environment variables
5
- ENV PYTHONDONTWRITEBYTECODE=1
6
- ENV PYTHONUNBUFFERED=1
7
-
8
- # Set work directory
9
- WORKDIR /app
10
-
11
- # Install system dependencies
12
- RUN apt-get update && apt-get install -y \
13
- ffmpeg \
14
- libsndfile1 \
15
- libglib2.0-0 \
16
- libsm6 \
17
- libxrender1 \
18
- libxext6 \
19
- && rm -rf /var/lib/apt/lists/*
20
-
21
- # Install Python dependencies
22
- COPY requirements.txt .
23
- RUN pip install --no-cache-dir -r requirements.txt
24
-
25
- # Copy app files
26
- COPY . .
27
-
28
- # Create uploads directory
29
- RUN mkdir -p uploads
30
-
31
- # Expose the port Hugging Face expects
32
- EXPOSE 7860
33
-
34
- # Run the app
35
- CMD ["python", "app.py"]
 
1
+ # Base image with Python
2
+ FROM python:3.10-slim
3
+
4
+ # Set environment variables
5
+ ENV PYTHONDONTWRITEBYTECODE=1
6
+ ENV PYTHONUNBUFFERED=1
7
+
8
+ # Set work directory
9
+ WORKDIR /app
10
+
11
+ # Install system dependencies
12
+ RUN apt-get update && apt-get install -y \
13
+ ffmpeg \
14
+ libsndfile1 \
15
+ libglib2.0-0 \
16
+ libsm6 \
17
+ libxrender1 \
18
+ libxext6 \
19
+ && rm -rf /var/lib/apt/lists/*
20
+
21
+ # Install Python dependencies
22
+ COPY requirements.txt .
23
+ RUN pip install --no-cache-dir -r requirements.txt
24
+
25
+ # Copy app files
26
+ COPY . .
27
+
28
+ # Create uploads directory
29
+ RUN mkdir -p uploads
30
+
31
+ # Expose the port Hugging Face expects
32
+ EXPOSE 7860
33
+
34
+ # Run the app
35
+ CMD ["python", "app.py"]
{TrueVoxAI/Notebooks β†’ Notebooks}/TrueVoxAI.ipynb RENAMED
The diff for this file is too large to render. See raw diff
 
TrueVoxAI/TrueVox/.gitattributes DELETED
@@ -1,35 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
TrueVoxAI/TrueVox/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: TrueVox
3
- emoji: πŸ¦€
4
- colorFrom: red
5
- colorTo: pink
6
- sdk: docker
7
- pinned: false
8
- license: mit
9
- short_description: It is a speech emotion recognition system
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
TrueVoxAI/app.py β†’ app.py RENAMED
@@ -1,132 +1,132 @@
1
- from flask import Flask, request, jsonify, render_template
2
- import os
3
- import numpy as np
4
- import librosa
5
- import joblib
6
- import speech_recognition as sr
7
- from werkzeug.utils import secure_filename
8
-
9
- app = Flask(__name__)
10
-
11
- UPLOAD_FOLDER = 'uploads'
12
- ALLOWED_EXTENSIONS = {'wav', 'mp3', 'ogg'}
13
- app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
14
- app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # 16MB max upload
15
-
16
- os.makedirs(UPLOAD_FOLDER, exist_ok=True)
17
-
18
- model = joblib.load("mlp_emotion_model.pkl")
19
- scaler = joblib.load("scaler.pkl")
20
- label_encoder = joblib.load("label_encoder.pkl")
21
-
22
- def allowed_file(filename):
23
- return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
24
-
25
- def extract_features(file_path):
26
- """Extract audio features for emotion recognition"""
27
- try:
28
- y_data, sr = librosa.load(file_path, sr=None, mono=True)
29
-
30
- if len(y_data) == 0:
31
- return None
32
-
33
- features = np.hstack([
34
- np.mean(librosa.feature.zero_crossing_rate(y_data).T, axis=0),
35
- np.mean(librosa.feature.chroma_stft(y=y_data, sr=sr).T, axis=0),
36
- np.mean(librosa.feature.mfcc(y=y_data, sr=sr, n_mfcc=40).T, axis=0),
37
- np.mean(librosa.feature.melspectrogram(y=y_data, sr=sr).T, axis=0)
38
- ])
39
- return features
40
- except Exception as e:
41
- print(f"Error extracting features: {e}")
42
- return None
43
-
44
- def recognize_emotion(file_path):
45
- """Recognize emotion in audio file"""
46
- features = extract_features(file_path)
47
- if features is None:
48
- return {"error": "Failed to extract audio features"}
49
-
50
- scaled_features = scaler.transform(features.reshape(1, -1))
51
-
52
- prediction = model.predict(scaled_features)
53
- emotion = label_encoder.inverse_transform(prediction)[0]
54
-
55
- probs = model.predict_proba(scaled_features)[0]
56
- emotion_probs = {emotion: float(prob) for emotion, prob in zip(label_encoder.classes_, probs)}
57
-
58
- emotion_map = {
59
- 'ANG': 'Angry',
60
- 'DIS': 'Disgusted',
61
- 'FEA': 'Fearful',
62
- 'HAP': 'Happy',
63
- 'NEU': 'Neutral',
64
- 'SAD': 'Sad'
65
- }
66
-
67
- full_emotion = emotion_map.get(emotion, emotion)
68
-
69
- return {
70
- "emotion": emotion,
71
- "emotion_name": full_emotion,
72
- "confidence": float(max(probs)),
73
- "probabilities": emotion_probs
74
- }
75
-
76
- def transcribe_audio(file_path):
77
- """Transcribe speech to text from audio file"""
78
- recognizer = sr.Recognizer()
79
-
80
- try:
81
-
82
- with sr.AudioFile(file_path) as source:
83
-
84
- audio_data = recognizer.record(source)
85
- text = recognizer.recognize_google(audio_data)
86
- return {"text": text}
87
- except sr.UnknownValueError:
88
- return {"text": "Speech unclear", "error": "Could not understand audio"}
89
- except sr.RequestError as e:
90
- return {"text": "", "error": f"Speech service error: {e}"}
91
- except Exception as e:
92
- return {"text": "", "error": f"Error transcribing audio: {e}"}
93
-
94
- @app.route('/')
95
- def index():
96
- return render_template('index.html')
97
-
98
- @app.route('/analyze', methods=['POST'])
99
- def analyze_audio():
100
- if 'file' not in request.files:
101
- return jsonify({"error": "No file uploaded"}), 400
102
-
103
- file = request.files['file']
104
-
105
- if file.filename == '':
106
- return jsonify({"error": "No file selected"}), 400
107
-
108
- if not allowed_file(file.filename):
109
- return jsonify({"error": f"File type not allowed. Supported types: {', '.join(ALLOWED_EXTENSIONS)}"}), 400
110
-
111
- try:
112
- filename = secure_filename(file.filename)
113
- file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
114
- file.save(file_path)
115
-
116
- emotion_result = recognize_emotion(file_path)
117
- transcription_result = transcribe_audio(file_path)
118
-
119
- result = {
120
- "filename": filename,
121
- "emotion": emotion_result,
122
- "transcription": transcription_result
123
- }
124
-
125
- return jsonify(result)
126
- except Exception as e:
127
- return jsonify({"error": f"Error processing audio: {str(e)}"}), 500
128
- finally:
129
- pass
130
-
131
- if __name__ == '__main__':
132
- app.run(debug=True)
 
1
+ from flask import Flask, request, jsonify, render_template
2
+ import os
3
+ import numpy as np
4
+ import librosa
5
+ import joblib
6
+ import speech_recognition as sr
7
+ from werkzeug.utils import secure_filename
8
+
9
+ app = Flask(__name__)
10
+
11
+ UPLOAD_FOLDER = 'uploads'
12
+ ALLOWED_EXTENSIONS = {'wav', 'mp3', 'ogg'}
13
+ app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
14
+ app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # 16MB max upload
15
+
16
+ os.makedirs(UPLOAD_FOLDER, exist_ok=True)
17
+
18
+ model = joblib.load("mlp_emotion_model.pkl")
19
+ scaler = joblib.load("scaler.pkl")
20
+ label_encoder = joblib.load("label_encoder.pkl")
21
+
22
+ def allowed_file(filename):
23
+ return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
24
+
25
+ def extract_features(file_path):
26
+ """Extract audio features for emotion recognition"""
27
+ try:
28
+ y_data, sr = librosa.load(file_path, sr=None, mono=True)
29
+
30
+ if len(y_data) == 0:
31
+ return None
32
+
33
+ features = np.hstack([
34
+ np.mean(librosa.feature.zero_crossing_rate(y_data).T, axis=0),
35
+ np.mean(librosa.feature.chroma_stft(y=y_data, sr=sr).T, axis=0),
36
+ np.mean(librosa.feature.mfcc(y=y_data, sr=sr, n_mfcc=40).T, axis=0),
37
+ np.mean(librosa.feature.melspectrogram(y=y_data, sr=sr).T, axis=0)
38
+ ])
39
+ return features
40
+ except Exception as e:
41
+ print(f"Error extracting features: {e}")
42
+ return None
43
+
44
+ def recognize_emotion(file_path):
45
+ """Recognize emotion in audio file"""
46
+ features = extract_features(file_path)
47
+ if features is None:
48
+ return {"error": "Failed to extract audio features"}
49
+
50
+ scaled_features = scaler.transform(features.reshape(1, -1))
51
+
52
+ prediction = model.predict(scaled_features)
53
+ emotion = label_encoder.inverse_transform(prediction)[0]
54
+
55
+ probs = model.predict_proba(scaled_features)[0]
56
+ emotion_probs = {emotion: float(prob) for emotion, prob in zip(label_encoder.classes_, probs)}
57
+
58
+ emotion_map = {
59
+ 'ANG': 'Angry',
60
+ 'DIS': 'Disgusted',
61
+ 'FEA': 'Fearful',
62
+ 'HAP': 'Happy',
63
+ 'NEU': 'Neutral',
64
+ 'SAD': 'Sad'
65
+ }
66
+
67
+ full_emotion = emotion_map.get(emotion, emotion)
68
+
69
+ return {
70
+ "emotion": emotion,
71
+ "emotion_name": full_emotion,
72
+ "confidence": float(max(probs)),
73
+ "probabilities": emotion_probs
74
+ }
75
+
76
+ def transcribe_audio(file_path):
77
+ """Transcribe speech to text from audio file"""
78
+ recognizer = sr.Recognizer()
79
+
80
+ try:
81
+
82
+ with sr.AudioFile(file_path) as source:
83
+
84
+ audio_data = recognizer.record(source)
85
+ text = recognizer.recognize_google(audio_data)
86
+ return {"text": text}
87
+ except sr.UnknownValueError:
88
+ return {"text": "Speech unclear", "error": "Could not understand audio"}
89
+ except sr.RequestError as e:
90
+ return {"text": "", "error": f"Speech service error: {e}"}
91
+ except Exception as e:
92
+ return {"text": "", "error": f"Error transcribing audio: {e}"}
93
+
94
+ @app.route('/')
95
+ def index():
96
+ return render_template('index.html')
97
+
98
+ @app.route('/analyze', methods=['POST'])
99
+ def analyze_audio():
100
+ if 'file' not in request.files:
101
+ return jsonify({"error": "No file uploaded"}), 400
102
+
103
+ file = request.files['file']
104
+
105
+ if file.filename == '':
106
+ return jsonify({"error": "No file selected"}), 400
107
+
108
+ if not allowed_file(file.filename):
109
+ return jsonify({"error": f"File type not allowed. Supported types: {', '.join(ALLOWED_EXTENSIONS)}"}), 400
110
+
111
+ try:
112
+ filename = secure_filename(file.filename)
113
+ file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
114
+ file.save(file_path)
115
+
116
+ emotion_result = recognize_emotion(file_path)
117
+ transcription_result = transcribe_audio(file_path)
118
+
119
+ result = {
120
+ "filename": filename,
121
+ "emotion": emotion_result,
122
+ "transcription": transcription_result
123
+ }
124
+
125
+ return jsonify(result)
126
+ except Exception as e:
127
+ return jsonify({"error": f"Error processing audio: {str(e)}"}), 500
128
+ finally:
129
+ pass
130
+
131
+ if __name__ == '__main__':
132
+ app.run(debug=True)
TrueVoxAI/label_encoder.pkl β†’ label_encoder.pkl RENAMED
File without changes
TrueVoxAI/mlp_emotion_model.pkl β†’ mlp_emotion_model.pkl RENAMED
File without changes
TrueVoxAI/requirements.txt β†’ requirements.txt RENAMED
@@ -1,6 +1,6 @@
1
- Flask
2
- numpy
3
- librosa
4
- joblib
5
- speechrecognition
6
  werkzeug
 
1
+ Flask
2
+ numpy
3
+ librosa
4
+ joblib
5
+ speechrecognition
6
  werkzeug
TrueVoxAI/scaler.pkl β†’ scaler.pkl RENAMED
File without changes
{TrueVoxAI/static β†’ static}/style.css RENAMED
File without changes
{TrueVoxAI/templates β†’ templates}/index.html RENAMED
@@ -1,718 +1,718 @@
1
- <!DOCTYPE html>
2
- <html lang="en">
3
- <head>
4
- <meta charset="UTF-8">
5
- <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
- <title>Speech Emotion Detector</title>
7
- <link href="https://fonts.googleapis.com/css2?family=Poppins:wght@400;500;600;700&family=Open+Sans:wght@400;500;600&display=swap" rel="stylesheet">
8
- <script src="https://cdn.tailwindcss.com"></script>
9
- <script src="https://unpkg.com/lucide@latest"></script>
10
- <script src="https://unpkg.com/@lucide/web@latest"></script>
11
- <script>
12
- tailwind.config = {
13
- darkMode: 'class',
14
- theme: {
15
- extend: {
16
- colors: {
17
- primary: {
18
- DEFAULT: '#ed1b76',
19
- dark: '#d01868',
20
- light: '#ff3d8e'
21
- },
22
- accent: {
23
- DEFAULT: '#249f9c',
24
- dark: '#037a76',
25
- light: '#3cbfbc'
26
- }
27
- },
28
- fontFamily: {
29
- poppins: ['Poppins', 'sans-serif'],
30
- opensans: ['Open Sans', 'sans-serif']
31
- }
32
- }
33
- }
34
- }
35
- </script>
36
- <style type="text/tailwindcss">
37
- @layer utilities {
38
- .text-gradient {
39
- @apply bg-gradient-to-r from-primary to-accent bg-clip-text text-transparent;
40
- }
41
- .progress-ring-circle {
42
- transition: stroke-dashoffset 0.35s;
43
- transform: rotate(-90deg);
44
- transform-origin: 50% 50%;
45
- }
46
- }
47
- </style>
48
- </head>
49
- <body class="font-opensans bg-gray-50 text-gray-800 dark:bg-gray-900 dark:text-gray-100 transition-colors duration-200">
50
- <!-- Toast Notifications -->
51
- <div id="toast-container" class="fixed top-4 right-4 z-50 flex flex-col gap-2"></div>
52
-
53
- <!-- Dark Mode Toggle -->
54
- <div class="absolute top-4 right-4 z-40">
55
- <button id="theme-toggle" class="p-2 rounded-full bg-gray-200 dark:bg-gray-700 hover:bg-gray-300 dark:hover:bg-gray-600 transition-colors">
56
- <i data-lucide="sun" class="hidden dark:block w-5 h-5 text-yellow-400"></i>
57
- <i data-lucide="moon" class="block dark:hidden w-5 h-5 text-gray-700"></i>
58
- </button>
59
- </div>
60
-
61
- <!-- Hero Section -->
62
- <header class="relative bg-gradient-to-b from-white to-gray-100 dark:from-gray-800 dark:to-gray-900 pt-16 pb-12 px-4 sm:px-6 lg:px-8 text-center">
63
- <div class="max-w-4xl mx-auto">
64
- <h1 class="font-poppins font-bold text-4xl md:text-5xl lg:text-6xl mb-4 text-gradient">
65
- Speech Emotion Detector
66
- </h1>
67
- <p class="text-lg md:text-xl text-gray-600 dark:text-gray-300 mb-8">
68
- Detect emotion and transcribe speech with a single upload
69
- </p>
70
- <div class="flex justify-center">
71
- <button id="about-btn" class="flex items-center gap-2 px-4 py-2 rounded-full bg-gray-200 dark:bg-gray-700 hover:bg-gray-300 dark:hover:bg-gray-600 transition-colors">
72
- <i data-lucide="info" class="w-4 h-4"></i>
73
- <span>How it works</span>
74
- </button>
75
- </div>
76
- </div>
77
- </header>
78
-
79
- <main class="max-w-4xl mx-auto px-4 sm:px-6 lg:px-8 py-8">
80
- <!-- Upload Section -->
81
- <section class="mb-12">
82
- <div id="upload-container" class="border-2 border-dashed border-gray-300 dark:border-gray-700 rounded-lg p-8 text-center transition-all hover:border-primary dark:hover:border-primary cursor-pointer">
83
- <div class="flex flex-col items-center justify-center gap-4">
84
- <div class="w-16 h-16 rounded-full bg-primary/10 flex items-center justify-center">
85
- <i data-lucide="upload-cloud" class="w-8 h-8 text-primary"></i>
86
- </div>
87
- <div>
88
- <h2 class="font-poppins font-semibold text-xl mb-2">Upload Audio File</h2>
89
- <p class="text-gray-500 dark:text-gray-400 mb-4">Drag & drop your audio file here or click to browse</p>
90
- <p class="text-sm text-gray-400 dark:text-gray-500">Supported formats: .wav, .mp3, .ogg</p>
91
- </div>
92
- <input type="file" id="file-input" class="hidden" accept=".wav,.mp3,.ogg">
93
- </div>
94
- </div>
95
-
96
- <div id="file-info" class="hidden mt-4 p-4 bg-gray-100 dark:bg-gray-800 rounded-lg">
97
- <div class="flex items-center justify-between">
98
- <div class="flex items-center gap-3">
99
- <i data-lucide="file-audio" class="w-6 h-6 text-primary"></i>
100
- <div>
101
- <p id="file-name" class="font-medium"></p>
102
- <p id="file-size" class="text-sm text-gray-500 dark:text-gray-400"></p>
103
- </div>
104
- </div>
105
- <button id="remove-file" class="p-1 rounded-full hover:bg-gray-200 dark:hover:bg-gray-700">
106
- <i data-lucide="x" class="w-5 h-5 text-gray-500 dark:text-gray-400"></i>
107
- </button>
108
- </div>
109
- </div>
110
-
111
- <div class="flex flex-wrap gap-4 mt-6">
112
- <button id="analyze-btn" class="flex-1 bg-primary hover:bg-primary-dark text-white font-medium py-3 px-6 rounded-lg flex items-center justify-center gap-2 transition-colors disabled:opacity-50 disabled:cursor-not-allowed">
113
- <i data-lucide="activity" class="w-5 h-5"></i>
114
- <span>Analyze Audio</span>
115
- </button>
116
- <button id="record-btn" class="flex-1 bg-accent hover:bg-accent-dark text-white font-medium py-3 px-6 rounded-lg flex items-center justify-center gap-2 transition-colors">
117
- <i data-lucide="mic" class="w-5 h-5"></i>
118
- <span>Record Audio</span>
119
- </button>
120
- </div>
121
- </section>
122
-
123
- <!-- Loading Animation -->
124
- <div id="loading" class="hidden">
125
- <div class="flex flex-col items-center justify-center py-12">
126
- <div class="relative w-20 h-20">
127
- <div class="absolute top-0 left-0 w-full h-full border-4 border-gray-200 dark:border-gray-700 rounded-full"></div>
128
- <div class="absolute top-0 left-0 w-full h-full border-4 border-t-primary border-r-transparent border-b-transparent border-l-transparent rounded-full animate-spin"></div>
129
- </div>
130
- <p class="mt-4 text-lg font-medium">Analyzing audio...</p>
131
- <p class="text-gray-500 dark:text-gray-400">This may take a few moments</p>
132
- </div>
133
- </div>
134
-
135
- <!-- Results Section -->
136
- <section id="results" class="hidden">
137
- <h2 class="font-poppins font-semibold text-2xl mb-6">Analysis Results</h2>
138
-
139
- <div class="grid grid-cols-1 md:grid-cols-2 gap-6 mb-8">
140
- <!-- Emotion Card -->
141
- <div class="bg-white dark:bg-gray-800 rounded-xl shadow-md overflow-hidden">
142
- <div class="p-6">
143
- <h3 class="font-poppins font-medium text-lg mb-4">Detected Emotion</h3>
144
- <div class="flex items-center justify-between">
145
- <div class="flex items-center gap-4">
146
- <div id="emotion-emoji" class="text-4xl"></div>
147
- <div>
148
- <p id="emotion-label" class="font-poppins font-semibold text-2xl"></p>
149
- <p class="text-gray-500 dark:text-gray-400">Primary emotion</p>
150
- </div>
151
- </div>
152
- <div class="relative w-20 h-20">
153
- <svg class="w-full h-full" viewBox="0 0 36 36">
154
- <circle cx="18" cy="18" r="16" fill="none" stroke-width="2" class="stroke-gray-200 dark:stroke-gray-700"></circle>
155
- <circle id="confidence-circle" cx="18" cy="18" r="16" fill="none" stroke-width="2" class="stroke-primary progress-ring-circle" stroke-dasharray="100" stroke-dashoffset="0"></circle>
156
- <text id="confidence-text" x="18" y="18" text-anchor="middle" dominant-baseline="middle" class="font-medium text-sm"></text>
157
- </svg>
158
- </div>
159
- </div>
160
- </div>
161
- </div>
162
-
163
- <!-- All Emotions Card -->
164
- <div class="bg-white dark:bg-gray-800 rounded-xl shadow-md overflow-hidden">
165
- <div class="p-6">
166
- <h3 class="font-poppins font-medium text-lg mb-4">All Emotions</h3>
167
- <div class="space-y-3">
168
- <div class="emotion-bar" data-emotion="angry">
169
- <div class="flex justify-between mb-1">
170
- <span>Angry 😠</span>
171
- <span class="emotion-percentage">0%</span>
172
- </div>
173
- <div class="w-full bg-gray-200 dark:bg-gray-700 rounded-full h-2.5">
174
- <div class="bg-red-500 h-2.5 rounded-full" style="width: 0%"></div>
175
- </div>
176
- </div>
177
- <div class="emotion-bar" data-emotion="disgusted">
178
- <div class="flex justify-between mb-1">
179
- <span>Disgusted 🀒</span>
180
- <span class="emotion-percentage">0%</span>
181
- </div>
182
- <div class="w-full bg-gray-200 dark:bg-gray-700 rounded-full h-2.5">
183
- <div class="bg-green-500 h-2.5 rounded-full" style="width: 0%"></div>
184
- </div>
185
- </div>
186
- <div class="emotion-bar" data-emotion="fearful">
187
- <div class="flex justify-between mb-1">
188
- <span>Fearful 😨</span>
189
- <span class="emotion-percentage">0%</span>
190
- </div>
191
- <div class="w-full bg-gray-200 dark:bg-gray-700 rounded-full h-2.5">
192
- <div class="bg-purple-500 h-2.5 rounded-full" style="width: 0%"></div>
193
- </div>
194
- </div>
195
- <div class="emotion-bar" data-emotion="happy">
196
- <div class="flex justify-between mb-1">
197
- <span>Happy 😊</span>
198
- <span class="emotion-percentage">0%</span>
199
- </div>
200
- <div class="w-full bg-gray-200 dark:bg-gray-700 rounded-full h-2.5">
201
- <div class="bg-yellow-500 h-2.5 rounded-full" style="width: 0%"></div>
202
- </div>
203
- </div>
204
- <div class="emotion-bar" data-emotion="neutral">
205
- <div class="flex justify-between mb-1">
206
- <span>Neutral 😐</span>
207
- <span class="emotion-percentage">0%</span>
208
- </div>
209
- <div class="w-full bg-gray-200 dark:bg-gray-700 rounded-full h-2.5">
210
- <div class="bg-blue-400 h-2.5 rounded-full" style="width: 0%"></div>
211
- </div>
212
- </div>
213
- <div class="emotion-bar" data-emotion="sad">
214
- <div class="flex justify-between mb-1">
215
- <span>Sad 😒</span>
216
- <span class="emotion-percentage">0%</span>
217
- </div>
218
- <div class="w-full bg-gray-200 dark:bg-gray-700 rounded-full h-2.5">
219
- <div class="bg-blue-600 h-2.5 rounded-full" style="width: 0%"></div>
220
- </div>
221
- </div>
222
- </div>
223
- </div>
224
- </div>
225
- </div>
226
-
227
- <!-- Transcription Card -->
228
- <div class="bg-white dark:bg-gray-800 rounded-xl shadow-md overflow-hidden">
229
- <div class="p-6">
230
- <div class="flex justify-between items-center mb-4">
231
- <h3 class="font-poppins font-medium text-lg">Transcription</h3>
232
- <button id="copy-transcription" class="p-2 rounded-lg bg-gray-100 dark:bg-gray-700 hover:bg-gray-200 dark:hover:bg-gray-600 transition-colors">
233
- <i data-lucide="copy" class="w-5 h-5"></i>
234
- </button>
235
- </div>
236
- <div class="bg-gray-50 dark:bg-gray-900 rounded-lg p-4 min-h-[100px]">
237
- <p id="transcription-text" class="whitespace-pre-wrap"></p>
238
- </div>
239
- </div>
240
- </div>
241
- </section>
242
-
243
- <!-- Error Alert -->
244
- <div id="error-alert" class="hidden bg-red-100 border-l-4 border-red-500 text-red-700 p-4 rounded-lg mt-6">
245
- <div class="flex items-start">
246
- <div class="flex-shrink-0">
247
- <i data-lucide="alert-circle" class="w-5 h-5"></i>
248
- </div>
249
- <div class="ml-3">
250
- <p id="error-message" class="text-sm font-medium"></p>
251
- </div>
252
- <button id="close-error" class="ml-auto -mx-1.5 -my-1.5 bg-red-100 text-red-500 rounded-lg p-1.5 hover:bg-red-200 inline-flex h-8 w-8 items-center justify-center">
253
- <i data-lucide="x" class="w-5 h-5"></i>
254
- </button>
255
- </div>
256
- </div>
257
- </main>
258
-
259
- <!-- About Modal -->
260
- <div id="about-modal" class="fixed inset-0 z-50 hidden">
261
- <div class="absolute inset-0 bg-black bg-opacity-50"></div>
262
- <div class="relative top-1/2 left-1/2 transform -translate-x-1/2 -translate-y-1/2 bg-white dark:bg-gray-800 rounded-xl shadow-xl max-w-lg w-full max-h-[90vh] overflow-y-auto">
263
- <div class="p-6">
264
- <div class="flex justify-between items-center mb-4">
265
- <h2 class="font-poppins font-semibold text-2xl">How It Works</h2>
266
- <button id="close-modal" class="p-1 rounded-full hover:bg-gray-200 dark:hover:bg-gray-700">
267
- <i data-lucide="x" class="w-6 h-6"></i>
268
- </button>
269
- </div>
270
- <div class="space-y-4">
271
- <p>The Speech Emotion Detector uses advanced machine learning algorithms to analyze audio and detect emotions in speech.</p>
272
-
273
- <h3 class="font-poppins font-medium text-lg">How to use:</h3>
274
- <ol class="list-decimal list-inside space-y-2">
275
- <li>Upload an audio file (.wav, .mp3, or .ogg format)</li>
276
- <li>Click "Analyze Audio" to process the file</li>
277
- <li>View the detected emotion, confidence score, and transcription</li>
278
- </ol>
279
-
280
- <h3 class="font-poppins font-medium text-lg">Technology:</h3>
281
- <p>This application uses:</p>
282
- <ul class="list-disc list-inside space-y-1">
283
- <li>Speech recognition for transcription</li>
284
- <li>Audio feature extraction (MFCC, chroma, mel spectrogram)</li>
285
- <li>Machine learning models trained on emotional speech datasets</li>
286
- </ul>
287
-
288
- <p>The system can detect six basic emotions: Angry, Disgusted, Fearful, Happy, Neutral, and Sad.</p>
289
- </div>
290
- </div>
291
- </div>
292
- </div>
293
-
294
- <!-- Record Modal -->
295
- <div id="record-modal" class="fixed inset-0 z-50 hidden">
296
- <div class="absolute inset-0 bg-black bg-opacity-50"></div>
297
- <div class="relative top-1/2 left-1/2 transform -translate-x-1/2 -translate-y-1/2 bg-white dark:bg-gray-800 rounded-xl shadow-xl max-w-md w-full">
298
- <div class="p-6">
299
- <div class="flex justify-between items-center mb-4">
300
- <h2 class="font-poppins font-semibold text-2xl">Record Audio</h2>
301
- <button id="close-record-modal" class="p-1 rounded-full hover:bg-gray-200 dark:hover:bg-gray-700">
302
- <i data-lucide="x" class="w-6 h-6"></i>
303
- </button>
304
- </div>
305
- <div class="flex flex-col items-center justify-center py-8">
306
- <div id="record-button" class="w-24 h-24 rounded-full bg-red-500 flex items-center justify-center cursor-pointer hover:bg-red-600 transition-colors mb-4">
307
- <i data-lucide="mic" class="w-12 h-12 text-white"></i>
308
- </div>
309
- <p id="record-status" class="text-lg font-medium">Click to start recording</p>
310
- <p id="record-timer" class="text-gray-500 dark:text-gray-400 mt-2">00:00</p>
311
- </div>
312
- <div class="flex justify-center gap-4">
313
- <button id="cancel-recording" class="bg-gray-200 dark:bg-gray-700 hover:bg-gray-300 dark:hover:bg-gray-600 text-gray-800 dark:text-gray-200 font-medium py-2 px-4 rounded-lg transition-colors">
314
- Cancel
315
- </button>
316
- <button id="save-recording" class="bg-primary hover:bg-primary-dark text-white font-medium py-2 px-4 rounded-lg transition-colors disabled:opacity-50 disabled:cursor-not-allowed" disabled>
317
- Use Recording
318
- </button>
319
- </div>
320
- </div>
321
- </div>
322
- </div>
323
-
324
- <script>
325
- // Initialize Lucide icons
326
- lucide.createIcons();
327
-
328
- // DOM Elements
329
- const themeToggle = document.getElementById('theme-toggle');
330
- const uploadContainer = document.getElementById('upload-container');
331
- const fileInput = document.getElementById('file-input');
332
- const fileInfo = document.getElementById('file-info');
333
- const fileName = document.getElementById('file-name');
334
- const fileSize = document.getElementById('file-size');
335
- const removeFile = document.getElementById('remove-file');
336
- const analyzeBtn = document.getElementById('analyze-btn');
337
- const recordBtn = document.getElementById('record-btn');
338
- const loading = document.getElementById('loading');
339
- const results = document.getElementById('results');
340
- const emotionEmoji = document.getElementById('emotion-emoji');
341
- const emotionLabel = document.getElementById('emotion-label');
342
- const confidenceCircle = document.getElementById('confidence-circle');
343
- const confidenceText = document.getElementById('confidence-text');
344
- const transcriptionText = document.getElementById('transcription-text');
345
- const copyTranscription = document.getElementById('copy-transcription');
346
- const errorAlert = document.getElementById('error-alert');
347
- const errorMessage = document.getElementById('error-message');
348
- const closeError = document.getElementById('close-error');
349
- const aboutBtn = document.getElementById('about-btn');
350
- const aboutModal = document.getElementById('about-modal');
351
- const closeModal = document.getElementById('close-modal');
352
- const recordModal = document.getElementById('record-modal');
353
- const closeRecordModal = document.getElementById('close-record-modal');
354
- const recordButton = document.getElementById('record-button');
355
- const recordStatus = document.getElementById('record-status');
356
- const recordTimer = document.getElementById('record-timer');
357
- const cancelRecording = document.getElementById('cancel-recording');
358
- const saveRecording = document.getElementById('save-recording');
359
- const toastContainer = document.getElementById('toast-container');
360
-
361
- // Check for dark mode preference
362
- if (localStorage.getItem('theme') === 'dark' ||
363
- (!localStorage.getItem('theme') && window.matchMedia('(prefers-color-scheme: dark)').matches)) {
364
- document.documentElement.classList.add('dark');
365
- } else {
366
- document.documentElement.classList.remove('dark');
367
- }
368
-
369
- // Theme toggle
370
- themeToggle.addEventListener('click', () => {
371
- if (document.documentElement.classList.contains('dark')) {
372
- document.documentElement.classList.remove('dark');
373
- localStorage.setItem('theme', 'light');
374
- } else {
375
- document.documentElement.classList.add('dark');
376
- localStorage.setItem('theme', 'dark');
377
- }
378
- });
379
-
380
- // File upload handling
381
- uploadContainer.addEventListener('click', () => {
382
- fileInput.click();
383
- });
384
-
385
- uploadContainer.addEventListener('dragover', (e) => {
386
- e.preventDefault();
387
- uploadContainer.classList.add('border-primary');
388
- });
389
-
390
- uploadContainer.addEventListener('dragleave', () => {
391
- uploadContainer.classList.remove('border-primary');
392
- });
393
-
394
- uploadContainer.addEventListener('drop', (e) => {
395
- e.preventDefault();
396
- uploadContainer.classList.remove('border-primary');
397
-
398
- if (e.dataTransfer.files.length) {
399
- handleFile(e.dataTransfer.files[0]);
400
- }
401
- });
402
-
403
- fileInput.addEventListener('change', () => {
404
- if (fileInput.files.length) {
405
- handleFile(fileInput.files[0]);
406
- }
407
- });
408
-
409
- function handleFile(file) {
410
- // Check if file type is supported
411
- const fileType = file.name.split('.').pop().toLowerCase();
412
- if (!['wav', 'mp3', 'ogg'].includes(fileType)) {
413
- showError('Unsupported file format. Please upload a .wav, .mp3, or .ogg file.');
414
- return;
415
- }
416
-
417
- // Display file info
418
- fileName.textContent = file.name;
419
- fileSize.textContent = formatFileSize(file.size);
420
- fileInfo.classList.remove('hidden');
421
- analyzeBtn.disabled = false;
422
- }
423
-
424
- function formatFileSize(bytes) {
425
- if (bytes < 1024) return bytes + ' bytes';
426
- else if (bytes < 1048576) return (bytes / 1024).toFixed(1) + ' KB';
427
- else return (bytes / 1048576).toFixed(1) + ' MB';
428
- }
429
-
430
- removeFile.addEventListener('click', () => {
431
- fileInput.value = '';
432
- fileInfo.classList.add('hidden');
433
- analyzeBtn.disabled = true;
434
- });
435
-
436
- // Analyze button
437
- analyzeBtn.addEventListener('click', () => {
438
- if (!fileInput.files.length) return;
439
-
440
- // Hide upload section and show loading
441
- loading.classList.remove('hidden');
442
- results.classList.add('hidden');
443
- errorAlert.classList.add('hidden');
444
-
445
- // Simulate API call with timeout
446
- setTimeout(() => {
447
- loading.classList.add('hidden');
448
-
449
- // Simulate random results
450
- const emotions = ['angry', 'disgusted', 'fearful', 'happy', 'neutral', 'sad'];
451
- const randomEmotion = emotions[Math.floor(Math.random() * emotions.length)];
452
- const confidence = Math.random() * 0.5 + 0.5; // Random between 50-100%
453
-
454
- // Generate random probabilities for all emotions
455
- const probabilities = {};
456
- let total = 0;
457
-
458
- emotions.forEach(emotion => {
459
- if (emotion === randomEmotion) {
460
- probabilities[emotion] = confidence;
461
- } else {
462
- probabilities[emotion] = Math.random() * (1 - confidence);
463
- total += probabilities[emotion];
464
- }
465
- });
466
-
467
- // Normalize other probabilities
468
- if (total > 0) {
469
- const scale = (1 - confidence) / total;
470
- emotions.forEach(emotion => {
471
- if (emotion !== randomEmotion) {
472
- probabilities[emotion] *= scale;
473
- }
474
- });
475
- }
476
-
477
- // Update UI with results
478
- displayResults(randomEmotion, confidence, probabilities);
479
-
480
- // Show toast notification
481
- showToast('Audio analysis complete!', 'success');
482
- }, 2000);
483
- });
484
-
485
- function displayResults(emotion, confidence, probabilities) {
486
- // Set emoji and label
487
- const emojis = {
488
- 'angry': '😠',
489
- 'disgusted': '🀒',
490
- 'fearful': '😨',
491
- 'happy': '😊',
492
- 'neutral': '😐',
493
- 'sad': '😒'
494
- };
495
-
496
- emotionEmoji.textContent = emojis[emotion];
497
- emotionLabel.textContent = emotion.charAt(0).toUpperCase() + emotion.slice(1);
498
-
499
- // Set confidence circle
500
- const circumference = 2 * Math.PI * 16;
501
- const offset = circumference - (confidence * circumference);
502
- confidenceCircle.style.strokeDasharray = `${circumference} ${circumference}`;
503
- confidenceCircle.style.strokeDashoffset = offset;
504
- confidenceText.textContent = `${Math.round(confidence * 100)}%`;
505
-
506
- // Update emotion bars
507
- Object.keys(probabilities).forEach(emotion => {
508
- const percentage = Math.round(probabilities[emotion] * 100);
509
- const bar = document.querySelector(`.emotion-bar[data-emotion="${emotion}"] .emotion-percentage`);
510
- const progress = document.querySelector(`.emotion-bar[data-emotion="${emotion}"] div div`);
511
-
512
- bar.textContent = `${percentage}%`;
513
- progress.style.width = `${percentage}%`;
514
- });
515
-
516
- // Set transcription (simulated)
517
- const transcriptions = [
518
- "Hello, this is a sample transcription of what I'm saying in this audio file.",
519
- "I'm really excited about this new project we're working on together.",
520
- "I don't think that's a good idea. We should reconsider our approach.",
521
- "The weather today is absolutely beautiful, perfect for a walk in the park.",
522
- "I'm feeling a bit under the weather today, might need to take it easy."
523
- ];
524
-
525
- transcriptionText.textContent = transcriptions[Math.floor(Math.random() * transcriptions.length)];
526
-
527
- // Show results section
528
- results.classList.remove('hidden');
529
- }
530
-
531
- // Copy transcription
532
- copyTranscription.addEventListener('click', () => {
533
- navigator.clipboard.writeText(transcriptionText.textContent)
534
- .then(() => {
535
- showToast('Transcription copied to clipboard!', 'success');
536
- })
537
- .catch(err => {
538
- showToast('Failed to copy text', 'error');
539
- });
540
- });
541
-
542
- // Error handling
543
- function showError(message) {
544
- errorMessage.textContent = message;
545
- errorAlert.classList.remove('hidden');
546
- }
547
-
548
- closeError.addEventListener('click', () => {
549
- errorAlert.classList.add('hidden');
550
- });
551
-
552
- // About modal
553
- aboutBtn.addEventListener('click', () => {
554
- aboutModal.classList.remove('hidden');
555
- });
556
-
557
- closeModal.addEventListener('click', () => {
558
- aboutModal.classList.add('hidden');
559
- });
560
-
561
- // Close modal when clicking outside
562
- aboutModal.addEventListener('click', (e) => {
563
- if (e.target === aboutModal) {
564
- aboutModal.classList.add('hidden');
565
- }
566
- });
567
-
568
- // Record modal
569
- recordBtn.addEventListener('click', () => {
570
- recordModal.classList.remove('hidden');
571
- });
572
-
573
- closeRecordModal.addEventListener('click', () => {
574
- recordModal.classList.add('hidden');
575
- });
576
-
577
- // Close record modal when clicking outside
578
- recordModal.addEventListener('click', (e) => {
579
- if (e.target === recordModal) {
580
- recordModal.classList.add('hidden');
581
- }
582
- });
583
-
584
- // Record functionality (simulated)
585
- let isRecording = false;
586
- let recordingTimer;
587
- let recordingSeconds = 0;
588
-
589
- recordButton.addEventListener('click', () => {
590
- if (!isRecording) {
591
- startRecording();
592
- } else {
593
- stopRecording();
594
- }
595
- });
596
-
597
- function startRecording() {
598
- isRecording = true;
599
- recordingSeconds = 0;
600
- recordButton.classList.remove('bg-red-500', 'hover:bg-red-600');
601
- recordButton.classList.add('bg-gray-500', 'hover:bg-gray-600', 'animate-pulse');
602
- recordStatus.textContent = 'Recording...';
603
- saveRecording.disabled = true;
604
-
605
- // Start timer
606
- recordingTimer = setInterval(() => {
607
- recordingSeconds++;
608
- const minutes = Math.floor(recordingSeconds / 60).toString().padStart(2, '0');
609
- const seconds = (recordingSeconds % 60).toString().padStart(2, '0');
610
- recordTimer.textContent = `${minutes}:${seconds}`;
611
-
612
- // Enable save button after 1 second
613
- if (recordingSeconds >= 1) {
614
- saveRecording.disabled = false;
615
- }
616
- }, 1000);
617
- }
618
-
619
- function stopRecording() {
620
- isRecording = false;
621
- clearInterval(recordingTimer);
622
- recordButton.classList.remove('bg-gray-500', 'hover:bg-gray-600', 'animate-pulse');
623
- recordButton.classList.add('bg-red-500', 'hover:bg-red-600');
624
- recordStatus.textContent = 'Recording stopped';
625
- }
626
-
627
- cancelRecording.addEventListener('click', () => {
628
- if (isRecording) {
629
- stopRecording();
630
- }
631
- recordModal.classList.add('hidden');
632
- recordStatus.textContent = 'Click to start recording';
633
- recordTimer.textContent = '00:00';
634
- saveRecording.disabled = true;
635
- });
636
-
637
- saveRecording.addEventListener('click', () => {
638
- if (isRecording) {
639
- stopRecording();
640
- }
641
- recordModal.classList.add('hidden');
642
-
643
- // Simulate file creation
644
- fileInfo.classList.remove('hidden');
645
- fileName.textContent = 'recording_' + new Date().toISOString().slice(0, 19).replace(/[-:T]/g, '') + '.wav';
646
- fileSize.textContent = '256 KB';
647
- analyzeBtn.disabled = false;
648
-
649
- showToast('Recording saved successfully!', 'success');
650
- });
651
-
652
- // Toast notifications
653
- function showToast(message, type = 'info') {
654
- const toast = document.createElement('div');
655
- toast.className = `flex items-center p-4 mb-3 rounded-lg shadow-md transition-all transform translate-x-full animate-toast-in ${
656
- type === 'success' ? 'bg-green-100 text-green-800 dark:bg-green-800 dark:text-green-100' :
657
- type === 'error' ? 'bg-red-100 text-red-800 dark:bg-red-800 dark:text-red-100' :
658
- 'bg-blue-100 text-blue-800 dark:bg-blue-800 dark:text-blue-100'
659
- }`;
660
-
661
- const icon = document.createElement('div');
662
- icon.className = 'flex-shrink-0 mr-3';
663
- icon.innerHTML = `<i data-lucide="${
664
- type === 'success' ? 'check-circle' :
665
- type === 'error' ? 'alert-circle' : 'info'
666
- }" class="w-5 h-5"></i>`;
667
-
668
- const content = document.createElement('div');
669
- content.textContent = message;
670
-
671
- const closeBtn = document.createElement('button');
672
- closeBtn.className = 'ml-auto -mx-1.5 -my-1.5 rounded-lg p-1.5 inline-flex h-8 w-8 items-center justify-center hover:bg-opacity-25 hover:bg-gray-500';
673
- closeBtn.innerHTML = '<i data-lucide="x" class="w-4 h-4"></i>';
674
-
675
- toast.appendChild(icon);
676
- toast.appendChild(content);
677
- toast.appendChild(closeBtn);
678
-
679
- toastContainer.appendChild(toast);
680
- lucide.createIcons();
681
-
682
- closeBtn.addEventListener('click', () => {
683
- toast.classList.replace('animate-toast-in', 'animate-toast-out');
684
- setTimeout(() => {
685
- toast.remove();
686
- }, 300);
687
- });
688
-
689
- setTimeout(() => {
690
- toast.classList.replace('animate-toast-in', 'animate-toast-out');
691
- setTimeout(() => {
692
- toast.remove();
693
- }, 300);
694
- }, 5000);
695
- }
696
-
697
- // Add animation for toast
698
- const style = document.createElement('style');
699
- style.textContent = `
700
- @keyframes toastIn {
701
- from { transform: translateX(100%); opacity: 0; }
702
- to { transform: translateX(0); opacity: 1; }
703
- }
704
- @keyframes toastOut {
705
- from { transform: translateX(0); opacity: 1; }
706
- to { transform: translateX(100%); opacity: 0; }
707
- }
708
- .animate-toast-in {
709
- animation: toastIn 0.3s ease forwards;
710
- }
711
- .animate-toast-out {
712
- animation: toastOut 0.3s ease forwards;
713
- }
714
- `;
715
- document.head.appendChild(style);
716
- </script>
717
- </body>
718
- </html>
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>Speech Emotion Detector</title>
7
+ <link href="https://fonts.googleapis.com/css2?family=Poppins:wght@400;500;600;700&family=Open+Sans:wght@400;500;600&display=swap" rel="stylesheet">
8
+ <script src="https://cdn.tailwindcss.com"></script>
9
+ <script src="https://unpkg.com/lucide@latest"></script>
10
+ <script src="https://unpkg.com/@lucide/web@latest"></script>
11
+ <script>
12
+ tailwind.config = {
13
+ darkMode: 'class',
14
+ theme: {
15
+ extend: {
16
+ colors: {
17
+ primary: {
18
+ DEFAULT: '#ed1b76',
19
+ dark: '#d01868',
20
+ light: '#ff3d8e'
21
+ },
22
+ accent: {
23
+ DEFAULT: '#249f9c',
24
+ dark: '#037a76',
25
+ light: '#3cbfbc'
26
+ }
27
+ },
28
+ fontFamily: {
29
+ poppins: ['Poppins', 'sans-serif'],
30
+ opensans: ['Open Sans', 'sans-serif']
31
+ }
32
+ }
33
+ }
34
+ }
35
+ </script>
36
+ <style type="text/tailwindcss">
37
+ @layer utilities {
38
+ .text-gradient {
39
+ @apply bg-gradient-to-r from-primary to-accent bg-clip-text text-transparent;
40
+ }
41
+ .progress-ring-circle {
42
+ transition: stroke-dashoffset 0.35s;
43
+ transform: rotate(-90deg);
44
+ transform-origin: 50% 50%;
45
+ }
46
+ }
47
+ </style>
48
+ </head>
49
+ <body class="font-opensans bg-gray-50 text-gray-800 dark:bg-gray-900 dark:text-gray-100 transition-colors duration-200">
50
+ <!-- Toast Notifications -->
51
+ <div id="toast-container" class="fixed top-4 right-4 z-50 flex flex-col gap-2"></div>
52
+
53
+ <!-- Dark Mode Toggle -->
54
+ <div class="absolute top-4 right-4 z-40">
55
+ <button id="theme-toggle" class="p-2 rounded-full bg-gray-200 dark:bg-gray-700 hover:bg-gray-300 dark:hover:bg-gray-600 transition-colors">
56
+ <i data-lucide="sun" class="hidden dark:block w-5 h-5 text-yellow-400"></i>
57
+ <i data-lucide="moon" class="block dark:hidden w-5 h-5 text-gray-700"></i>
58
+ </button>
59
+ </div>
60
+
61
+ <!-- Hero Section -->
62
+ <header class="relative bg-gradient-to-b from-white to-gray-100 dark:from-gray-800 dark:to-gray-900 pt-16 pb-12 px-4 sm:px-6 lg:px-8 text-center">
63
+ <div class="max-w-4xl mx-auto">
64
+ <h1 class="font-poppins font-bold text-4xl md:text-5xl lg:text-6xl mb-4 text-gradient">
65
+ Speech Emotion Detector
66
+ </h1>
67
+ <p class="text-lg md:text-xl text-gray-600 dark:text-gray-300 mb-8">
68
+ Detect emotion and transcribe speech with a single upload
69
+ </p>
70
+ <div class="flex justify-center">
71
+ <button id="about-btn" class="flex items-center gap-2 px-4 py-2 rounded-full bg-gray-200 dark:bg-gray-700 hover:bg-gray-300 dark:hover:bg-gray-600 transition-colors">
72
+ <i data-lucide="info" class="w-4 h-4"></i>
73
+ <span>How it works</span>
74
+ </button>
75
+ </div>
76
+ </div>
77
+ </header>
78
+
79
+ <main class="max-w-4xl mx-auto px-4 sm:px-6 lg:px-8 py-8">
80
+ <!-- Upload Section -->
81
+ <section class="mb-12">
82
+ <div id="upload-container" class="border-2 border-dashed border-gray-300 dark:border-gray-700 rounded-lg p-8 text-center transition-all hover:border-primary dark:hover:border-primary cursor-pointer">
83
+ <div class="flex flex-col items-center justify-center gap-4">
84
+ <div class="w-16 h-16 rounded-full bg-primary/10 flex items-center justify-center">
85
+ <i data-lucide="upload-cloud" class="w-8 h-8 text-primary"></i>
86
+ </div>
87
+ <div>
88
+ <h2 class="font-poppins font-semibold text-xl mb-2">Upload Audio File</h2>
89
+ <p class="text-gray-500 dark:text-gray-400 mb-4">Drag & drop your audio file here or click to browse</p>
90
+ <p class="text-sm text-gray-400 dark:text-gray-500">Supported formats: .wav, .mp3, .ogg</p>
91
+ </div>
92
+ <input type="file" id="file-input" class="hidden" accept=".wav,.mp3,.ogg">
93
+ </div>
94
+ </div>
95
+
96
+ <div id="file-info" class="hidden mt-4 p-4 bg-gray-100 dark:bg-gray-800 rounded-lg">
97
+ <div class="flex items-center justify-between">
98
+ <div class="flex items-center gap-3">
99
+ <i data-lucide="file-audio" class="w-6 h-6 text-primary"></i>
100
+ <div>
101
+ <p id="file-name" class="font-medium"></p>
102
+ <p id="file-size" class="text-sm text-gray-500 dark:text-gray-400"></p>
103
+ </div>
104
+ </div>
105
+ <button id="remove-file" class="p-1 rounded-full hover:bg-gray-200 dark:hover:bg-gray-700">
106
+ <i data-lucide="x" class="w-5 h-5 text-gray-500 dark:text-gray-400"></i>
107
+ </button>
108
+ </div>
109
+ </div>
110
+
111
+ <div class="flex flex-wrap gap-4 mt-6">
112
+ <button id="analyze-btn" class="flex-1 bg-primary hover:bg-primary-dark text-white font-medium py-3 px-6 rounded-lg flex items-center justify-center gap-2 transition-colors disabled:opacity-50 disabled:cursor-not-allowed">
113
+ <i data-lucide="activity" class="w-5 h-5"></i>
114
+ <span>Analyze Audio</span>
115
+ </button>
116
+ <button id="record-btn" class="flex-1 bg-accent hover:bg-accent-dark text-white font-medium py-3 px-6 rounded-lg flex items-center justify-center gap-2 transition-colors">
117
+ <i data-lucide="mic" class="w-5 h-5"></i>
118
+ <span>Record Audio</span>
119
+ </button>
120
+ </div>
121
+ </section>
122
+
123
+ <!-- Loading Animation -->
124
+ <div id="loading" class="hidden">
125
+ <div class="flex flex-col items-center justify-center py-12">
126
+ <div class="relative w-20 h-20">
127
+ <div class="absolute top-0 left-0 w-full h-full border-4 border-gray-200 dark:border-gray-700 rounded-full"></div>
128
+ <div class="absolute top-0 left-0 w-full h-full border-4 border-t-primary border-r-transparent border-b-transparent border-l-transparent rounded-full animate-spin"></div>
129
+ </div>
130
+ <p class="mt-4 text-lg font-medium">Analyzing audio...</p>
131
+ <p class="text-gray-500 dark:text-gray-400">This may take a few moments</p>
132
+ </div>
133
+ </div>
134
+
135
+ <!-- Results Section -->
136
+ <section id="results" class="hidden">
137
+ <h2 class="font-poppins font-semibold text-2xl mb-6">Analysis Results</h2>
138
+
139
+ <div class="grid grid-cols-1 md:grid-cols-2 gap-6 mb-8">
140
+ <!-- Emotion Card -->
141
+ <div class="bg-white dark:bg-gray-800 rounded-xl shadow-md overflow-hidden">
142
+ <div class="p-6">
143
+ <h3 class="font-poppins font-medium text-lg mb-4">Detected Emotion</h3>
144
+ <div class="flex items-center justify-between">
145
+ <div class="flex items-center gap-4">
146
+ <div id="emotion-emoji" class="text-4xl"></div>
147
+ <div>
148
+ <p id="emotion-label" class="font-poppins font-semibold text-2xl"></p>
149
+ <p class="text-gray-500 dark:text-gray-400">Primary emotion</p>
150
+ </div>
151
+ </div>
152
+ <div class="relative w-20 h-20">
153
+ <svg class="w-full h-full" viewBox="0 0 36 36">
154
+ <circle cx="18" cy="18" r="16" fill="none" stroke-width="2" class="stroke-gray-200 dark:stroke-gray-700"></circle>
155
+ <circle id="confidence-circle" cx="18" cy="18" r="16" fill="none" stroke-width="2" class="stroke-primary progress-ring-circle" stroke-dasharray="100" stroke-dashoffset="0"></circle>
156
+ <text id="confidence-text" x="18" y="18" text-anchor="middle" dominant-baseline="middle" class="font-medium text-sm"></text>
157
+ </svg>
158
+ </div>
159
+ </div>
160
+ </div>
161
+ </div>
162
+
163
+ <!-- All Emotions Card -->
164
+ <div class="bg-white dark:bg-gray-800 rounded-xl shadow-md overflow-hidden">
165
+ <div class="p-6">
166
+ <h3 class="font-poppins font-medium text-lg mb-4">All Emotions</h3>
167
+ <div class="space-y-3">
168
+ <div class="emotion-bar" data-emotion="angry">
169
+ <div class="flex justify-between mb-1">
170
+ <span>Angry 😠</span>
171
+ <span class="emotion-percentage">0%</span>
172
+ </div>
173
+ <div class="w-full bg-gray-200 dark:bg-gray-700 rounded-full h-2.5">
174
+ <div class="bg-red-500 h-2.5 rounded-full" style="width: 0%"></div>
175
+ </div>
176
+ </div>
177
+ <div class="emotion-bar" data-emotion="disgusted">
178
+ <div class="flex justify-between mb-1">
179
+ <span>Disgusted 🀒</span>
180
+ <span class="emotion-percentage">0%</span>
181
+ </div>
182
+ <div class="w-full bg-gray-200 dark:bg-gray-700 rounded-full h-2.5">
183
+ <div class="bg-green-500 h-2.5 rounded-full" style="width: 0%"></div>
184
+ </div>
185
+ </div>
186
+ <div class="emotion-bar" data-emotion="fearful">
187
+ <div class="flex justify-between mb-1">
188
+ <span>Fearful 😨</span>
189
+ <span class="emotion-percentage">0%</span>
190
+ </div>
191
+ <div class="w-full bg-gray-200 dark:bg-gray-700 rounded-full h-2.5">
192
+ <div class="bg-purple-500 h-2.5 rounded-full" style="width: 0%"></div>
193
+ </div>
194
+ </div>
195
+ <div class="emotion-bar" data-emotion="happy">
196
+ <div class="flex justify-between mb-1">
197
+ <span>Happy 😊</span>
198
+ <span class="emotion-percentage">0%</span>
199
+ </div>
200
+ <div class="w-full bg-gray-200 dark:bg-gray-700 rounded-full h-2.5">
201
+ <div class="bg-yellow-500 h-2.5 rounded-full" style="width: 0%"></div>
202
+ </div>
203
+ </div>
204
+ <div class="emotion-bar" data-emotion="neutral">
205
+ <div class="flex justify-between mb-1">
206
+ <span>Neutral 😐</span>
207
+ <span class="emotion-percentage">0%</span>
208
+ </div>
209
+ <div class="w-full bg-gray-200 dark:bg-gray-700 rounded-full h-2.5">
210
+ <div class="bg-blue-400 h-2.5 rounded-full" style="width: 0%"></div>
211
+ </div>
212
+ </div>
213
+ <div class="emotion-bar" data-emotion="sad">
214
+ <div class="flex justify-between mb-1">
215
+ <span>Sad 😒</span>
216
+ <span class="emotion-percentage">0%</span>
217
+ </div>
218
+ <div class="w-full bg-gray-200 dark:bg-gray-700 rounded-full h-2.5">
219
+ <div class="bg-blue-600 h-2.5 rounded-full" style="width: 0%"></div>
220
+ </div>
221
+ </div>
222
+ </div>
223
+ </div>
224
+ </div>
225
+ </div>
226
+
227
+ <!-- Transcription Card -->
228
+ <div class="bg-white dark:bg-gray-800 rounded-xl shadow-md overflow-hidden">
229
+ <div class="p-6">
230
+ <div class="flex justify-between items-center mb-4">
231
+ <h3 class="font-poppins font-medium text-lg">Transcription</h3>
232
+ <button id="copy-transcription" class="p-2 rounded-lg bg-gray-100 dark:bg-gray-700 hover:bg-gray-200 dark:hover:bg-gray-600 transition-colors">
233
+ <i data-lucide="copy" class="w-5 h-5"></i>
234
+ </button>
235
+ </div>
236
+ <div class="bg-gray-50 dark:bg-gray-900 rounded-lg p-4 min-h-[100px]">
237
+ <p id="transcription-text" class="whitespace-pre-wrap"></p>
238
+ </div>
239
+ </div>
240
+ </div>
241
+ </section>
242
+
243
+ <!-- Error Alert -->
244
+ <div id="error-alert" class="hidden bg-red-100 border-l-4 border-red-500 text-red-700 p-4 rounded-lg mt-6">
245
+ <div class="flex items-start">
246
+ <div class="flex-shrink-0">
247
+ <i data-lucide="alert-circle" class="w-5 h-5"></i>
248
+ </div>
249
+ <div class="ml-3">
250
+ <p id="error-message" class="text-sm font-medium"></p>
251
+ </div>
252
+ <button id="close-error" class="ml-auto -mx-1.5 -my-1.5 bg-red-100 text-red-500 rounded-lg p-1.5 hover:bg-red-200 inline-flex h-8 w-8 items-center justify-center">
253
+ <i data-lucide="x" class="w-5 h-5"></i>
254
+ </button>
255
+ </div>
256
+ </div>
257
+ </main>
258
+
259
+ <!-- About Modal -->
260
+ <div id="about-modal" class="fixed inset-0 z-50 hidden">
261
+ <div class="absolute inset-0 bg-black bg-opacity-50"></div>
262
+ <div class="relative top-1/2 left-1/2 transform -translate-x-1/2 -translate-y-1/2 bg-white dark:bg-gray-800 rounded-xl shadow-xl max-w-lg w-full max-h-[90vh] overflow-y-auto">
263
+ <div class="p-6">
264
+ <div class="flex justify-between items-center mb-4">
265
+ <h2 class="font-poppins font-semibold text-2xl">How It Works</h2>
266
+ <button id="close-modal" class="p-1 rounded-full hover:bg-gray-200 dark:hover:bg-gray-700">
267
+ <i data-lucide="x" class="w-6 h-6"></i>
268
+ </button>
269
+ </div>
270
+ <div class="space-y-4">
271
+ <p>The Speech Emotion Detector uses advanced machine learning algorithms to analyze audio and detect emotions in speech.</p>
272
+
273
+ <h3 class="font-poppins font-medium text-lg">How to use:</h3>
274
+ <ol class="list-decimal list-inside space-y-2">
275
+ <li>Upload an audio file (.wav, .mp3, or .ogg format)</li>
276
+ <li>Click "Analyze Audio" to process the file</li>
277
+ <li>View the detected emotion, confidence score, and transcription</li>
278
+ </ol>
279
+
280
+ <h3 class="font-poppins font-medium text-lg">Technology:</h3>
281
+ <p>This application uses:</p>
282
+ <ul class="list-disc list-inside space-y-1">
283
+ <li>Speech recognition for transcription</li>
284
+ <li>Audio feature extraction (MFCC, chroma, mel spectrogram)</li>
285
+ <li>Machine learning models trained on emotional speech datasets</li>
286
+ </ul>
287
+
288
+ <p>The system can detect six basic emotions: Angry, Disgusted, Fearful, Happy, Neutral, and Sad.</p>
289
+ </div>
290
+ </div>
291
+ </div>
292
+ </div>
293
+
294
+ <!-- Record Modal -->
295
+ <div id="record-modal" class="fixed inset-0 z-50 hidden">
296
+ <div class="absolute inset-0 bg-black bg-opacity-50"></div>
297
+ <div class="relative top-1/2 left-1/2 transform -translate-x-1/2 -translate-y-1/2 bg-white dark:bg-gray-800 rounded-xl shadow-xl max-w-md w-full">
298
+ <div class="p-6">
299
+ <div class="flex justify-between items-center mb-4">
300
+ <h2 class="font-poppins font-semibold text-2xl">Record Audio</h2>
301
+ <button id="close-record-modal" class="p-1 rounded-full hover:bg-gray-200 dark:hover:bg-gray-700">
302
+ <i data-lucide="x" class="w-6 h-6"></i>
303
+ </button>
304
+ </div>
305
+ <div class="flex flex-col items-center justify-center py-8">
306
+ <div id="record-button" class="w-24 h-24 rounded-full bg-red-500 flex items-center justify-center cursor-pointer hover:bg-red-600 transition-colors mb-4">
307
+ <i data-lucide="mic" class="w-12 h-12 text-white"></i>
308
+ </div>
309
+ <p id="record-status" class="text-lg font-medium">Click to start recording</p>
310
+ <p id="record-timer" class="text-gray-500 dark:text-gray-400 mt-2">00:00</p>
311
+ </div>
312
+ <div class="flex justify-center gap-4">
313
+ <button id="cancel-recording" class="bg-gray-200 dark:bg-gray-700 hover:bg-gray-300 dark:hover:bg-gray-600 text-gray-800 dark:text-gray-200 font-medium py-2 px-4 rounded-lg transition-colors">
314
+ Cancel
315
+ </button>
316
+ <button id="save-recording" class="bg-primary hover:bg-primary-dark text-white font-medium py-2 px-4 rounded-lg transition-colors disabled:opacity-50 disabled:cursor-not-allowed" disabled>
317
+ Use Recording
318
+ </button>
319
+ </div>
320
+ </div>
321
+ </div>
322
+ </div>
323
+
324
+ <script>
325
+ // Initialize Lucide icons
326
+ lucide.createIcons();
327
+
328
+ // DOM Elements
329
+ const themeToggle = document.getElementById('theme-toggle');
330
+ const uploadContainer = document.getElementById('upload-container');
331
+ const fileInput = document.getElementById('file-input');
332
+ const fileInfo = document.getElementById('file-info');
333
+ const fileName = document.getElementById('file-name');
334
+ const fileSize = document.getElementById('file-size');
335
+ const removeFile = document.getElementById('remove-file');
336
+ const analyzeBtn = document.getElementById('analyze-btn');
337
+ const recordBtn = document.getElementById('record-btn');
338
+ const loading = document.getElementById('loading');
339
+ const results = document.getElementById('results');
340
+ const emotionEmoji = document.getElementById('emotion-emoji');
341
+ const emotionLabel = document.getElementById('emotion-label');
342
+ const confidenceCircle = document.getElementById('confidence-circle');
343
+ const confidenceText = document.getElementById('confidence-text');
344
+ const transcriptionText = document.getElementById('transcription-text');
345
+ const copyTranscription = document.getElementById('copy-transcription');
346
+ const errorAlert = document.getElementById('error-alert');
347
+ const errorMessage = document.getElementById('error-message');
348
+ const closeError = document.getElementById('close-error');
349
+ const aboutBtn = document.getElementById('about-btn');
350
+ const aboutModal = document.getElementById('about-modal');
351
+ const closeModal = document.getElementById('close-modal');
352
+ const recordModal = document.getElementById('record-modal');
353
+ const closeRecordModal = document.getElementById('close-record-modal');
354
+ const recordButton = document.getElementById('record-button');
355
+ const recordStatus = document.getElementById('record-status');
356
+ const recordTimer = document.getElementById('record-timer');
357
+ const cancelRecording = document.getElementById('cancel-recording');
358
+ const saveRecording = document.getElementById('save-recording');
359
+ const toastContainer = document.getElementById('toast-container');
360
+
361
+ // Check for dark mode preference
362
+ if (localStorage.getItem('theme') === 'dark' ||
363
+ (!localStorage.getItem('theme') && window.matchMedia('(prefers-color-scheme: dark)').matches)) {
364
+ document.documentElement.classList.add('dark');
365
+ } else {
366
+ document.documentElement.classList.remove('dark');
367
+ }
368
+
369
+ // Theme toggle
370
+ themeToggle.addEventListener('click', () => {
371
+ if (document.documentElement.classList.contains('dark')) {
372
+ document.documentElement.classList.remove('dark');
373
+ localStorage.setItem('theme', 'light');
374
+ } else {
375
+ document.documentElement.classList.add('dark');
376
+ localStorage.setItem('theme', 'dark');
377
+ }
378
+ });
379
+
380
+ // File upload handling
381
+ uploadContainer.addEventListener('click', () => {
382
+ fileInput.click();
383
+ });
384
+
385
+ uploadContainer.addEventListener('dragover', (e) => {
386
+ e.preventDefault();
387
+ uploadContainer.classList.add('border-primary');
388
+ });
389
+
390
+ uploadContainer.addEventListener('dragleave', () => {
391
+ uploadContainer.classList.remove('border-primary');
392
+ });
393
+
394
+ uploadContainer.addEventListener('drop', (e) => {
395
+ e.preventDefault();
396
+ uploadContainer.classList.remove('border-primary');
397
+
398
+ if (e.dataTransfer.files.length) {
399
+ handleFile(e.dataTransfer.files[0]);
400
+ }
401
+ });
402
+
403
+ fileInput.addEventListener('change', () => {
404
+ if (fileInput.files.length) {
405
+ handleFile(fileInput.files[0]);
406
+ }
407
+ });
408
+
409
+ function handleFile(file) {
410
+ // Check if file type is supported
411
+ const fileType = file.name.split('.').pop().toLowerCase();
412
+ if (!['wav', 'mp3', 'ogg'].includes(fileType)) {
413
+ showError('Unsupported file format. Please upload a .wav, .mp3, or .ogg file.');
414
+ return;
415
+ }
416
+
417
+ // Display file info
418
+ fileName.textContent = file.name;
419
+ fileSize.textContent = formatFileSize(file.size);
420
+ fileInfo.classList.remove('hidden');
421
+ analyzeBtn.disabled = false;
422
+ }
423
+
424
+ function formatFileSize(bytes) {
425
+ if (bytes < 1024) return bytes + ' bytes';
426
+ else if (bytes < 1048576) return (bytes / 1024).toFixed(1) + ' KB';
427
+ else return (bytes / 1048576).toFixed(1) + ' MB';
428
+ }
429
+
430
+ removeFile.addEventListener('click', () => {
431
+ fileInput.value = '';
432
+ fileInfo.classList.add('hidden');
433
+ analyzeBtn.disabled = true;
434
+ });
435
+
436
+ // Analyze button
437
+ analyzeBtn.addEventListener('click', () => {
438
+ if (!fileInput.files.length) return;
439
+
440
+ // Hide upload section and show loading
441
+ loading.classList.remove('hidden');
442
+ results.classList.add('hidden');
443
+ errorAlert.classList.add('hidden');
444
+
445
+ // Simulate API call with timeout
446
+ setTimeout(() => {
447
+ loading.classList.add('hidden');
448
+
449
+ // Simulate random results
450
+ const emotions = ['angry', 'disgusted', 'fearful', 'happy', 'neutral', 'sad'];
451
+ const randomEmotion = emotions[Math.floor(Math.random() * emotions.length)];
452
+ const confidence = Math.random() * 0.5 + 0.5; // Random between 50-100%
453
+
454
+ // Generate random probabilities for all emotions
455
+ const probabilities = {};
456
+ let total = 0;
457
+
458
+ emotions.forEach(emotion => {
459
+ if (emotion === randomEmotion) {
460
+ probabilities[emotion] = confidence;
461
+ } else {
462
+ probabilities[emotion] = Math.random() * (1 - confidence);
463
+ total += probabilities[emotion];
464
+ }
465
+ });
466
+
467
+ // Normalize other probabilities
468
+ if (total > 0) {
469
+ const scale = (1 - confidence) / total;
470
+ emotions.forEach(emotion => {
471
+ if (emotion !== randomEmotion) {
472
+ probabilities[emotion] *= scale;
473
+ }
474
+ });
475
+ }
476
+
477
+ // Update UI with results
478
+ displayResults(randomEmotion, confidence, probabilities);
479
+
480
+ // Show toast notification
481
+ showToast('Audio analysis complete!', 'success');
482
+ }, 2000);
483
+ });
484
+
485
+ function displayResults(emotion, confidence, probabilities) {
486
+ // Set emoji and label
487
+ const emojis = {
488
+ 'angry': '😠',
489
+ 'disgusted': '🀒',
490
+ 'fearful': '😨',
491
+ 'happy': '😊',
492
+ 'neutral': '😐',
493
+ 'sad': '😒'
494
+ };
495
+
496
+ emotionEmoji.textContent = emojis[emotion];
497
+ emotionLabel.textContent = emotion.charAt(0).toUpperCase() + emotion.slice(1);
498
+
499
+ // Set confidence circle
500
+ const circumference = 2 * Math.PI * 16;
501
+ const offset = circumference - (confidence * circumference);
502
+ confidenceCircle.style.strokeDasharray = `${circumference} ${circumference}`;
503
+ confidenceCircle.style.strokeDashoffset = offset;
504
+ confidenceText.textContent = `${Math.round(confidence * 100)}%`;
505
+
506
+ // Update emotion bars
507
+ Object.keys(probabilities).forEach(emotion => {
508
+ const percentage = Math.round(probabilities[emotion] * 100);
509
+ const bar = document.querySelector(`.emotion-bar[data-emotion="${emotion}"] .emotion-percentage`);
510
+ const progress = document.querySelector(`.emotion-bar[data-emotion="${emotion}"] div div`);
511
+
512
+ bar.textContent = `${percentage}%`;
513
+ progress.style.width = `${percentage}%`;
514
+ });
515
+
516
+ // Set transcription (simulated)
517
+ const transcriptions = [
518
+ "Hello, this is a sample transcription of what I'm saying in this audio file.",
519
+ "I'm really excited about this new project we're working on together.",
520
+ "I don't think that's a good idea. We should reconsider our approach.",
521
+ "The weather today is absolutely beautiful, perfect for a walk in the park.",
522
+ "I'm feeling a bit under the weather today, might need to take it easy."
523
+ ];
524
+
525
+ transcriptionText.textContent = transcriptions[Math.floor(Math.random() * transcriptions.length)];
526
+
527
+ // Show results section
528
+ results.classList.remove('hidden');
529
+ }
530
+
531
+ // Copy transcription
532
+ copyTranscription.addEventListener('click', () => {
533
+ navigator.clipboard.writeText(transcriptionText.textContent)
534
+ .then(() => {
535
+ showToast('Transcription copied to clipboard!', 'success');
536
+ })
537
+ .catch(err => {
538
+ showToast('Failed to copy text', 'error');
539
+ });
540
+ });
541
+
542
+ // Error handling
543
+ function showError(message) {
544
+ errorMessage.textContent = message;
545
+ errorAlert.classList.remove('hidden');
546
+ }
547
+
548
+ closeError.addEventListener('click', () => {
549
+ errorAlert.classList.add('hidden');
550
+ });
551
+
552
+ // About modal
553
+ aboutBtn.addEventListener('click', () => {
554
+ aboutModal.classList.remove('hidden');
555
+ });
556
+
557
+ closeModal.addEventListener('click', () => {
558
+ aboutModal.classList.add('hidden');
559
+ });
560
+
561
+ // Close modal when clicking outside
562
+ aboutModal.addEventListener('click', (e) => {
563
+ if (e.target === aboutModal) {
564
+ aboutModal.classList.add('hidden');
565
+ }
566
+ });
567
+
568
+ // Record modal
569
+ recordBtn.addEventListener('click', () => {
570
+ recordModal.classList.remove('hidden');
571
+ });
572
+
573
+ closeRecordModal.addEventListener('click', () => {
574
+ recordModal.classList.add('hidden');
575
+ });
576
+
577
+ // Close record modal when clicking outside
578
+ recordModal.addEventListener('click', (e) => {
579
+ if (e.target === recordModal) {
580
+ recordModal.classList.add('hidden');
581
+ }
582
+ });
583
+
584
+ // Record functionality (simulated)
585
+ let isRecording = false;
586
+ let recordingTimer;
587
+ let recordingSeconds = 0;
588
+
589
+ recordButton.addEventListener('click', () => {
590
+ if (!isRecording) {
591
+ startRecording();
592
+ } else {
593
+ stopRecording();
594
+ }
595
+ });
596
+
597
+ function startRecording() {
598
+ isRecording = true;
599
+ recordingSeconds = 0;
600
+ recordButton.classList.remove('bg-red-500', 'hover:bg-red-600');
601
+ recordButton.classList.add('bg-gray-500', 'hover:bg-gray-600', 'animate-pulse');
602
+ recordStatus.textContent = 'Recording...';
603
+ saveRecording.disabled = true;
604
+
605
+ // Start timer
606
+ recordingTimer = setInterval(() => {
607
+ recordingSeconds++;
608
+ const minutes = Math.floor(recordingSeconds / 60).toString().padStart(2, '0');
609
+ const seconds = (recordingSeconds % 60).toString().padStart(2, '0');
610
+ recordTimer.textContent = `${minutes}:${seconds}`;
611
+
612
+ // Enable save button after 1 second
613
+ if (recordingSeconds >= 1) {
614
+ saveRecording.disabled = false;
615
+ }
616
+ }, 1000);
617
+ }
618
+
619
+ function stopRecording() {
620
+ isRecording = false;
621
+ clearInterval(recordingTimer);
622
+ recordButton.classList.remove('bg-gray-500', 'hover:bg-gray-600', 'animate-pulse');
623
+ recordButton.classList.add('bg-red-500', 'hover:bg-red-600');
624
+ recordStatus.textContent = 'Recording stopped';
625
+ }
626
+
627
+ cancelRecording.addEventListener('click', () => {
628
+ if (isRecording) {
629
+ stopRecording();
630
+ }
631
+ recordModal.classList.add('hidden');
632
+ recordStatus.textContent = 'Click to start recording';
633
+ recordTimer.textContent = '00:00';
634
+ saveRecording.disabled = true;
635
+ });
636
+
637
+ saveRecording.addEventListener('click', () => {
638
+ if (isRecording) {
639
+ stopRecording();
640
+ }
641
+ recordModal.classList.add('hidden');
642
+
643
+ // Simulate file creation
644
+ fileInfo.classList.remove('hidden');
645
+ fileName.textContent = 'recording_' + new Date().toISOString().slice(0, 19).replace(/[-:T]/g, '') + '.wav';
646
+ fileSize.textContent = '256 KB';
647
+ analyzeBtn.disabled = false;
648
+
649
+ showToast('Recording saved successfully!', 'success');
650
+ });
651
+
652
+ // Toast notifications
653
+ function showToast(message, type = 'info') {
654
+ const toast = document.createElement('div');
655
+ toast.className = `flex items-center p-4 mb-3 rounded-lg shadow-md transition-all transform translate-x-full animate-toast-in ${
656
+ type === 'success' ? 'bg-green-100 text-green-800 dark:bg-green-800 dark:text-green-100' :
657
+ type === 'error' ? 'bg-red-100 text-red-800 dark:bg-red-800 dark:text-red-100' :
658
+ 'bg-blue-100 text-blue-800 dark:bg-blue-800 dark:text-blue-100'
659
+ }`;
660
+
661
+ const icon = document.createElement('div');
662
+ icon.className = 'flex-shrink-0 mr-3';
663
+ icon.innerHTML = `<i data-lucide="${
664
+ type === 'success' ? 'check-circle' :
665
+ type === 'error' ? 'alert-circle' : 'info'
666
+ }" class="w-5 h-5"></i>`;
667
+
668
+ const content = document.createElement('div');
669
+ content.textContent = message;
670
+
671
+ const closeBtn = document.createElement('button');
672
+ closeBtn.className = 'ml-auto -mx-1.5 -my-1.5 rounded-lg p-1.5 inline-flex h-8 w-8 items-center justify-center hover:bg-opacity-25 hover:bg-gray-500';
673
+ closeBtn.innerHTML = '<i data-lucide="x" class="w-4 h-4"></i>';
674
+
675
+ toast.appendChild(icon);
676
+ toast.appendChild(content);
677
+ toast.appendChild(closeBtn);
678
+
679
+ toastContainer.appendChild(toast);
680
+ lucide.createIcons();
681
+
682
+ closeBtn.addEventListener('click', () => {
683
+ toast.classList.replace('animate-toast-in', 'animate-toast-out');
684
+ setTimeout(() => {
685
+ toast.remove();
686
+ }, 300);
687
+ });
688
+
689
+ setTimeout(() => {
690
+ toast.classList.replace('animate-toast-in', 'animate-toast-out');
691
+ setTimeout(() => {
692
+ toast.remove();
693
+ }, 300);
694
+ }, 5000);
695
+ }
696
+
697
+ // Add animation for toast
698
+ const style = document.createElement('style');
699
+ style.textContent = `
700
+ @keyframes toastIn {
701
+ from { transform: translateX(100%); opacity: 0; }
702
+ to { transform: translateX(0); opacity: 1; }
703
+ }
704
+ @keyframes toastOut {
705
+ from { transform: translateX(0); opacity: 1; }
706
+ to { transform: translateX(100%); opacity: 0; }
707
+ }
708
+ .animate-toast-in {
709
+ animation: toastIn 0.3s ease forwards;
710
+ }
711
+ .animate-toast-out {
712
+ animation: toastOut 0.3s ease forwards;
713
+ }
714
+ `;
715
+ document.head.appendChild(style);
716
+ </script>
717
+ </body>
718
+ </html>