File size: 15,961 Bytes
67eb08f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
!pip install fastapi uvicorn python-multipart librosa numpy ai-edge-litert pycloudflared nest-asyncio
import numpy as np
import uvicorn
import librosa
import io
import threading
import asyncio
import shutil
import os
from fastapi import FastAPI, File, UploadFile
from fastapi.responses import HTMLResponse, JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from numpy.lib.stride_tricks import as_strided
from typing import Tuple, Optional
from ai_edge_litert.interpreter import Interpreter
from pycloudflared import try_cloudflare
from pydub import AudioSegment

# ==========================================
# 1. CORE LOGIC (GIỮ NGUYÊN)
# ==========================================

def mel_scale_scalar(freq: float) -> float:
    return 1127.0 * np.log(1.0 + freq / 700.0)

def mel_scale(freq: np.ndarray) -> np.ndarray:
    return 1127.0 * np.log(1.0 + freq / 700.0)

def inverse_mel_scale(mel: np.ndarray) -> np.ndarray:
    return 700.0 * (np.exp(mel / 1127.0) - 1.0)

def get_mel_banks(num_bins, window_length_padded, sample_freq, low_freq, high_freq, vtln_low, vtln_high, vtln_warp_factor):
    assert num_bins > 3
    assert window_length_padded % 2 == 0
    num_fft_bins = window_length_padded // 2
    nyquist = 0.5 * sample_freq
    if high_freq <= 0.0: high_freq += nyquist
    fft_bin_width = sample_freq / window_length_padded
    mel_low_freq = mel_scale_scalar(low_freq)
    mel_high_freq = mel_scale_scalar(high_freq)
    mel_freq_delta = (mel_high_freq - mel_low_freq) / (num_bins + 1)
    if vtln_high < 0.0: vtln_high += nyquist
    bin = np.arange(num_bins)[:, np.newaxis]
    left_mel = mel_low_freq + bin * mel_freq_delta
    center_mel = mel_low_freq + (bin + 1.0) * mel_freq_delta
    right_mel = mel_low_freq + (bin + 2.0) * mel_freq_delta
    center_freqs = inverse_mel_scale(center_mel).squeeze(-1)
    mel = mel_scale(fft_bin_width * np.arange(num_fft_bins))[np.newaxis, :]
    up_slope = (mel - left_mel) / (center_mel - left_mel)
    down_slope = (right_mel - mel) / (right_mel - center_mel)
    bins = np.maximum(0.0, np.minimum(up_slope, down_slope))
    return bins, center_freqs

def stft(input, n_fft, hop_length=None, win_length=None, window=None, center=True, pad_mode="reflect", normalized=False, onesided=True, return_complex=True):
    if hop_length is None: hop_length = n_fft // 4
    if win_length is None: win_length = n_fft
    if window is None: window = np.ones(win_length)
    if len(window) < n_fft:
        pad_width = (n_fft - len(window)) // 2
        window = np.pad(window, (pad_width, n_fft - len(window) - pad_width))

    input = np.asarray(input)
    if input.ndim == 1:
        input = input[np.newaxis, :]
        squeeze_batch = True
    else:
        squeeze_batch = False

    if center:
        pad_width = int(n_fft // 2)
        input = np.pad(input, ((0, 0), (pad_width, pad_width)), mode=pad_mode)

    n_frames = 1 + (input.shape[-1] - n_fft) // hop_length
    frame_length = n_fft
    frame_step = hop_length
    frame_stride = input.strides[-1]
    shape = (input.shape[0], n_frames, frame_length)
    strides = (input.strides[0], frame_step * frame_stride, frame_stride)
    frames = as_strided(input, shape=shape, strides=strides, writeable=False)
    frames = frames * window
    stft_matrix = np.fft.fft(frames, n=n_fft, axis=-1)

    if normalized: stft_matrix = stft_matrix / np.sqrt(n_fft)
    if onesided: stft_matrix = stft_matrix[..., :(n_fft // 2) + 1]

    result = stft_matrix if return_complex else np.stack((stft_matrix.real, stft_matrix.imag), axis=-1)
    if squeeze_batch: result = result[0]
    return result

class MelSTFT:
    def __init__(self, n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, fmin=0.0, fmax=None):
        self.n_mels = n_mels
        self.sr = sr
        self.win_length = win_length
        self.hopsize = hopsize
        self.n_fft = n_fft
        self.fmin = fmin
        self.fmax = fmax if fmax else sr // 2 - 1000
        self.window = np.hanning(win_length)
        self.mel_basis, _ = get_mel_banks(self.n_mels, self.n_fft, self.sr, self.fmin, self.fmax, 100.0, -500., 1.0)
        self.mel_basis = np.pad(self.mel_basis, ((0, 0), (0, 1)), mode='constant', constant_values=0)
        self.preemphasis_coefficient = np.array([-.97, 1]).reshape(1, 1, 2)

    def preemphasis(self, x):
        x = x.reshape(1, 1, -1)
        output_size = x.shape[2] - self.preemphasis_coefficient.shape[2] + 1
        result = np.zeros((1, 1, output_size))
        for i in range(output_size):
            result[0, 0, i] = np.sum(x[0, 0, i:i+2] * self.preemphasis_coefficient[0, 0])
        return result[0]

    def __call__(self, x):
        x = self.preemphasis(x)
        spec_x = stft(input=x, n_fft=self.n_fft, hop_length=self.hopsize, win_length=self.win_length, window=self.window, return_complex=False)
        spec_x = np.sum(spec_x ** 2, axis=-1)
        melspec = np.dot(self.mel_basis, spec_x.transpose(0,2,1)).transpose(1,0,2)
        melspec = np.log(melspec + 1e-5)
        melspec = (melspec + 4.5) / 5.
        return melspec

def softmax(x):
    exp_x = np.exp(x - np.max(x))
    return exp_x / np.sum(exp_x, axis=-1, keepdims=True)

# ==========================================
# 2. SETUP BACKEND
# ==========================================

app = FastAPI()
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_methods=["*"],
    allow_headers=["*"],
)

MODEL_PATH = '/content/emotion_model_2025_08_18212.tflite'
interpreter = None
input_details = None
output_details = None
model_lock = threading.Lock()

mel_processor = MelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320)
CLASSES = ['Angry', 'Disgust', 'Fear', 'Happy', 'Neutral', 'Sad', 'Surprise']

@app.on_event("startup")
def load_model():
    global interpreter, input_details, output_details
    try:
        interpreter = Interpreter(model_path=MODEL_PATH)
        interpreter.allocate_tensors()
        input_details = interpreter.get_input_details()
        output_details = interpreter.get_output_details()
        print("✅ Model loaded successfully!")
    except Exception as e:
        print(f"❌ Error loading model: {e}")

# ==========================================
# 3. FRONTEND INTERFACE (CÓ THÊM REPLAY)
# ==========================================

html_content = """
<!DOCTYPE html>
<html>
<head>
    <title>AI Emotion Detection</title>
    <meta name="viewport" content="width=device-width, initial-scale=1">
    <style>
        body { font-family: 'Segoe UI', sans-serif; text-align: center; padding: 20px; background: #f0f2f5; color: #333; }
        .container { max-width: 600px; margin: 0 auto; background: white; padding: 30px; border-radius: 16px; box-shadow: 0 4px 15px rgba(0,0,0,0.1); }
        h1 { color: #2c3e50; margin-bottom: 5px; }
        p { color: #7f8c8d; }

        button { padding: 15px 30px; font-size: 18px; cursor: pointer; border-radius: 50px; border: none; margin: 20px auto; transition: 0.3s; display: block; width: 80%; font-weight: bold;}
        #recordBtn { background-color: #ff4757; color: white; box-shadow: 0 4px 10px rgba(255, 71, 87, 0.3); }
        #recordBtn:hover { background-color: #ff6b81; transform: translateY(-2px); }
        #recordBtn.recording { background-color: #2ed573; animation: pulse 1.5s infinite; }

        #playbackContainer { display: none; margin: 20px 0; padding: 15px; background: #f1f2f6; border-radius: 10px; }
        audio { width: 100%; outline: none; }

        #status { margin: 10px 0; font-style: italic; color: #666; height: 20px;}

        #results { margin-top: 30px; text-align: left; }
        .bar-container { margin-bottom: 12px; display: flex; align-items: center; }
        .label { font-weight: bold; width: 70px; font-size: 14px; }
        .bar-bg { flex-grow: 1; background: #dfe4ea; height: 12px; border-radius: 6px; margin: 0 10px; overflow: hidden;}
        .bar-fill { height: 100%; background: linear-gradient(90deg, #3498db, #2980b9); border-radius: 6px; width: 0%; transition: width 0.6s ease-out; }
        .percent { width: 40px; font-size: 14px; color: #555; text-align: right;}

        @keyframes pulse { 0% { box-shadow: 0 0 0 0 rgba(46, 213, 115, 0.7); } 70% { box-shadow: 0 0 0 15px rgba(46, 213, 115, 0); } 100% { box-shadow: 0 0 0 0 rgba(46, 213, 115, 0); } }
    </style>
</head>
<body>
    <div class="container">
        <h1>🎙️ Cảm xúc giọng nói</h1>
        <p>Hệ thống phân tích cảm xúc qua giọng nói (AI)</p>

        <button id="recordBtn" onclick="toggleRecording()">Bắt đầu Ghi âm</button>
        <div id="status">Sẵn sàng</div>

        <div id="playbackContainer">
            <p style="margin: 0 0 10px 0; font-size: 14px;">🎧 Nghe lại giọng của bạn:</p>
            <audio id="audioPlayer" controls></audio>
        </div>

        <div id="results"></div>
    </div>

    <script>
        let mediaRecorder;
        let audioChunks = [];
        let isRecording = false;

        async function toggleRecording() {
            const btn = document.getElementById('recordBtn');
            const status = document.getElementById('status');
            const playbackContainer = document.getElementById('playbackContainer');
            const resultsContainer = document.getElementById('results');

            if (!isRecording) {
                // BẮT ĐẦU GHI
                try {
                    const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
                    mediaRecorder = new MediaRecorder(stream);
                    audioChunks = [];

                    // Ẩn kết quả cũ khi ghi âm mới
                    playbackContainer.style.display = 'none';
                    resultsContainer.innerHTML = '';

                    mediaRecorder.ondataavailable = event => {
                        audioChunks.push(event.data);
                    };

                    mediaRecorder.onstop = async () => {
                        // Tạo blob audio
                        const audioBlob = new Blob(audioChunks, { type: 'audio/webm' });

                        // 1. TẠO URL ĐỂ NGHE LẠI (CLIENT-SIDE)
                        const audioUrl = URL.createObjectURL(audioBlob);
                        const audioPlayer = document.getElementById('audioPlayer');
                        audioPlayer.src = audioUrl;
                        playbackContainer.style.display = 'block'; // Hiện trình phát

                        // 2. GỬI LÊN SERVER
                        uploadAudio(audioBlob);
                    };

                    mediaRecorder.start();
                    isRecording = true;
                    btn.textContent = "⏹ Dừng & Phân tích";
                    btn.classList.add("recording");
                    status.textContent = "Đang thu âm...";
                } catch (err) {
                    alert("Không thể truy cập microphone: " + err);
                }
            } else {
                // DỪNG GHI
                mediaRecorder.stop();
                isRecording = false;
                btn.textContent = "🎙️ Bắt đầu Ghi âm mới";
                btn.classList.remove("recording");
                status.textContent = "Đang gửi dữ liệu...";
            }
        }

        async function uploadAudio(blob) {
            const formData = new FormData();
            formData.append("file", blob, "recording.webm");

            try {
                const response = await fetch("/predict", {
                    method: "POST",
                    body: formData
                });

                if (!response.ok) {
                    throw new Error(`Server error: ${response.status}`);
                }

                const data = await response.json();
                displayResults(data);
                document.getElementById('status').textContent = "Hoàn tất!";
            } catch (error) {
                console.error("Error:", error);
                document.getElementById('status').textContent = "Lỗi: " + error.message;
            }
        }

        function displayResults(data) {
            const container = document.getElementById('results');
            container.innerHTML = "<h3>📊 Kết quả phân tích:</h3>";

            data.results.forEach(item => {
                const percentage = (item.score * 100).toFixed(1);
                // Đổi màu thanh bar nếu > 50%
                let barColor = percentage > 50 ? '#2ed573' : 'linear-gradient(90deg, #3498db, #2980b9)';

                const html = `
                    <div class="bar-container">
                        <span class="label">${item.label}</span>
                        <div class="bar-bg">
                            <div class="bar-fill" style="width: ${percentage}%; background: ${barColor}"></div>
                        </div>
                        <span class="percent">${percentage}%</span>
                    </div>
                `;
                container.innerHTML += html;
            });
        }
    </script>
</body>
</html>
"""

@app.get("/", response_class=HTMLResponse)
async def home():
    return html_content

# ==========================================
# 4. API PREDICT (ĐÃ FIX PYDUB CHO WEBM)
# ==========================================

@app.post("/predict")
async def predict(file: UploadFile = File(...)):
    # Tên file tạm
    webm_filename = "temp_input.webm"
    wav_filename = "temp_converted.wav"

    try:
        # 1. Lưu file WebM gốc
        with open(webm_filename, "wb") as buffer:
            shutil.copyfileobj(file.file, buffer)

        # 2. Convert WebM -> WAV (Fix lỗi librosa)
        audio = AudioSegment.from_file(webm_filename)
        audio = audio.set_frame_rate(32000).set_channels(1)
        audio.export(wav_filename, format="wav")

        # 3. Librosa đọc
        waveform, _ = librosa.load(wav_filename, sr=32000, mono=True)

    except Exception as e:
        import traceback
        traceback.print_exc()
        return JSONResponse(status_code=500, content={"error": f"Lỗi xử lý file: {str(e)}"})

    finally:
        # Dọn dẹp
        if os.path.exists(webm_filename): os.remove(webm_filename)
        if os.path.exists(wav_filename): os.remove(wav_filename)

    # 4. Preprocessing & Inference
    waveform = np.stack([waveform])
    spec = mel_processor(waveform)

    target_len = 400
    if spec.shape[-1] > target_len:
        spec = spec[:, :, :target_len]
    elif spec.shape[-1] < target_len:
        spec = np.pad(spec, ((0, 0), (0, 0), (0, target_len - spec.shape[-1])), mode='constant')

    spec = np.expand_dims(spec, axis=0).astype(np.float32)

    if interpreter is None:
        return JSONResponse(status_code=500, content={"error": "Model not loaded"})

    with model_lock:
        interpreter.set_tensor(input_details[0]['index'], spec)
        interpreter.invoke()
        output_data = interpreter.get_tensor(output_details[0]['index'])

    preds = softmax(output_data[0])

    results = []
    sorted_indexes = np.argsort(preds)[::-1]
    for k in range(len(CLASSES)):
        results.append({
            "label": CLASSES[sorted_indexes[k]],
            "score": float(preds[sorted_indexes[k]])
        })

    return {"results": results}

# ==========================================
# 5. RUN SERVER (FIX COLAB)
# ==========================================

if __name__ == "__main__":
    import nest_asyncio
    import uvicorn
    from pycloudflared import try_cloudflare

    nest_asyncio.apply()

    print("🚀 Đang khởi động Cloudflare Tunnel...")
    tunnel_url = try_cloudflare(port=8000)
    print(f"🔗 PUBLIC URL CỦA BẠN: {tunnel_url.tunnel}")
    print("👉 Click link trên để truy cập Web App")

    config = uvicorn.Config(app, host="0.0.0.0", port=8000)
    server = uvicorn.Server(config)
    await server.serve()