|
|
!pip install fastapi uvicorn python-multipart librosa numpy ai-edge-litert pycloudflared nest-asyncio |
|
|
import numpy as np |
|
|
import uvicorn |
|
|
import librosa |
|
|
import io |
|
|
import threading |
|
|
import asyncio |
|
|
import shutil |
|
|
import os |
|
|
from fastapi import FastAPI, File, UploadFile |
|
|
from fastapi.responses import HTMLResponse, JSONResponse |
|
|
from fastapi.middleware.cors import CORSMiddleware |
|
|
from numpy.lib.stride_tricks import as_strided |
|
|
from typing import Tuple, Optional |
|
|
from ai_edge_litert.interpreter import Interpreter |
|
|
from pycloudflared import try_cloudflare |
|
|
from pydub import AudioSegment |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def mel_scale_scalar(freq: float) -> float: |
|
|
return 1127.0 * np.log(1.0 + freq / 700.0) |
|
|
|
|
|
def mel_scale(freq: np.ndarray) -> np.ndarray: |
|
|
return 1127.0 * np.log(1.0 + freq / 700.0) |
|
|
|
|
|
def inverse_mel_scale(mel: np.ndarray) -> np.ndarray: |
|
|
return 700.0 * (np.exp(mel / 1127.0) - 1.0) |
|
|
|
|
|
def get_mel_banks(num_bins, window_length_padded, sample_freq, low_freq, high_freq, vtln_low, vtln_high, vtln_warp_factor): |
|
|
assert num_bins > 3 |
|
|
assert window_length_padded % 2 == 0 |
|
|
num_fft_bins = window_length_padded // 2 |
|
|
nyquist = 0.5 * sample_freq |
|
|
if high_freq <= 0.0: high_freq += nyquist |
|
|
fft_bin_width = sample_freq / window_length_padded |
|
|
mel_low_freq = mel_scale_scalar(low_freq) |
|
|
mel_high_freq = mel_scale_scalar(high_freq) |
|
|
mel_freq_delta = (mel_high_freq - mel_low_freq) / (num_bins + 1) |
|
|
if vtln_high < 0.0: vtln_high += nyquist |
|
|
bin = np.arange(num_bins)[:, np.newaxis] |
|
|
left_mel = mel_low_freq + bin * mel_freq_delta |
|
|
center_mel = mel_low_freq + (bin + 1.0) * mel_freq_delta |
|
|
right_mel = mel_low_freq + (bin + 2.0) * mel_freq_delta |
|
|
center_freqs = inverse_mel_scale(center_mel).squeeze(-1) |
|
|
mel = mel_scale(fft_bin_width * np.arange(num_fft_bins))[np.newaxis, :] |
|
|
up_slope = (mel - left_mel) / (center_mel - left_mel) |
|
|
down_slope = (right_mel - mel) / (right_mel - center_mel) |
|
|
bins = np.maximum(0.0, np.minimum(up_slope, down_slope)) |
|
|
return bins, center_freqs |
|
|
|
|
|
def stft(input, n_fft, hop_length=None, win_length=None, window=None, center=True, pad_mode="reflect", normalized=False, onesided=True, return_complex=True): |
|
|
if hop_length is None: hop_length = n_fft // 4 |
|
|
if win_length is None: win_length = n_fft |
|
|
if window is None: window = np.ones(win_length) |
|
|
if len(window) < n_fft: |
|
|
pad_width = (n_fft - len(window)) // 2 |
|
|
window = np.pad(window, (pad_width, n_fft - len(window) - pad_width)) |
|
|
|
|
|
input = np.asarray(input) |
|
|
if input.ndim == 1: |
|
|
input = input[np.newaxis, :] |
|
|
squeeze_batch = True |
|
|
else: |
|
|
squeeze_batch = False |
|
|
|
|
|
if center: |
|
|
pad_width = int(n_fft // 2) |
|
|
input = np.pad(input, ((0, 0), (pad_width, pad_width)), mode=pad_mode) |
|
|
|
|
|
n_frames = 1 + (input.shape[-1] - n_fft) // hop_length |
|
|
frame_length = n_fft |
|
|
frame_step = hop_length |
|
|
frame_stride = input.strides[-1] |
|
|
shape = (input.shape[0], n_frames, frame_length) |
|
|
strides = (input.strides[0], frame_step * frame_stride, frame_stride) |
|
|
frames = as_strided(input, shape=shape, strides=strides, writeable=False) |
|
|
frames = frames * window |
|
|
stft_matrix = np.fft.fft(frames, n=n_fft, axis=-1) |
|
|
|
|
|
if normalized: stft_matrix = stft_matrix / np.sqrt(n_fft) |
|
|
if onesided: stft_matrix = stft_matrix[..., :(n_fft // 2) + 1] |
|
|
|
|
|
result = stft_matrix if return_complex else np.stack((stft_matrix.real, stft_matrix.imag), axis=-1) |
|
|
if squeeze_batch: result = result[0] |
|
|
return result |
|
|
|
|
|
class MelSTFT: |
|
|
def __init__(self, n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, fmin=0.0, fmax=None): |
|
|
self.n_mels = n_mels |
|
|
self.sr = sr |
|
|
self.win_length = win_length |
|
|
self.hopsize = hopsize |
|
|
self.n_fft = n_fft |
|
|
self.fmin = fmin |
|
|
self.fmax = fmax if fmax else sr // 2 - 1000 |
|
|
self.window = np.hanning(win_length) |
|
|
self.mel_basis, _ = get_mel_banks(self.n_mels, self.n_fft, self.sr, self.fmin, self.fmax, 100.0, -500., 1.0) |
|
|
self.mel_basis = np.pad(self.mel_basis, ((0, 0), (0, 1)), mode='constant', constant_values=0) |
|
|
self.preemphasis_coefficient = np.array([-.97, 1]).reshape(1, 1, 2) |
|
|
|
|
|
def preemphasis(self, x): |
|
|
x = x.reshape(1, 1, -1) |
|
|
output_size = x.shape[2] - self.preemphasis_coefficient.shape[2] + 1 |
|
|
result = np.zeros((1, 1, output_size)) |
|
|
for i in range(output_size): |
|
|
result[0, 0, i] = np.sum(x[0, 0, i:i+2] * self.preemphasis_coefficient[0, 0]) |
|
|
return result[0] |
|
|
|
|
|
def __call__(self, x): |
|
|
x = self.preemphasis(x) |
|
|
spec_x = stft(input=x, n_fft=self.n_fft, hop_length=self.hopsize, win_length=self.win_length, window=self.window, return_complex=False) |
|
|
spec_x = np.sum(spec_x ** 2, axis=-1) |
|
|
melspec = np.dot(self.mel_basis, spec_x.transpose(0,2,1)).transpose(1,0,2) |
|
|
melspec = np.log(melspec + 1e-5) |
|
|
melspec = (melspec + 4.5) / 5. |
|
|
return melspec |
|
|
|
|
|
def softmax(x): |
|
|
exp_x = np.exp(x - np.max(x)) |
|
|
return exp_x / np.sum(exp_x, axis=-1, keepdims=True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app = FastAPI() |
|
|
app.add_middleware( |
|
|
CORSMiddleware, |
|
|
allow_origins=["*"], |
|
|
allow_methods=["*"], |
|
|
allow_headers=["*"], |
|
|
) |
|
|
|
|
|
MODEL_PATH = '/content/emotion_model_2025_08_18212.tflite' |
|
|
interpreter = None |
|
|
input_details = None |
|
|
output_details = None |
|
|
model_lock = threading.Lock() |
|
|
|
|
|
mel_processor = MelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320) |
|
|
CLASSES = ['Angry', 'Disgust', 'Fear', 'Happy', 'Neutral', 'Sad', 'Surprise'] |
|
|
|
|
|
@app.on_event("startup") |
|
|
def load_model(): |
|
|
global interpreter, input_details, output_details |
|
|
try: |
|
|
interpreter = Interpreter(model_path=MODEL_PATH) |
|
|
interpreter.allocate_tensors() |
|
|
input_details = interpreter.get_input_details() |
|
|
output_details = interpreter.get_output_details() |
|
|
print("✅ Model loaded successfully!") |
|
|
except Exception as e: |
|
|
print(f"❌ Error loading model: {e}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
html_content = """ |
|
|
<!DOCTYPE html> |
|
|
<html> |
|
|
<head> |
|
|
<title>AI Emotion Detection</title> |
|
|
<meta name="viewport" content="width=device-width, initial-scale=1"> |
|
|
<style> |
|
|
body { font-family: 'Segoe UI', sans-serif; text-align: center; padding: 20px; background: #f0f2f5; color: #333; } |
|
|
.container { max-width: 600px; margin: 0 auto; background: white; padding: 30px; border-radius: 16px; box-shadow: 0 4px 15px rgba(0,0,0,0.1); } |
|
|
h1 { color: #2c3e50; margin-bottom: 5px; } |
|
|
p { color: #7f8c8d; } |
|
|
|
|
|
button { padding: 15px 30px; font-size: 18px; cursor: pointer; border-radius: 50px; border: none; margin: 20px auto; transition: 0.3s; display: block; width: 80%; font-weight: bold;} |
|
|
#recordBtn { background-color: #ff4757; color: white; box-shadow: 0 4px 10px rgba(255, 71, 87, 0.3); } |
|
|
#recordBtn:hover { background-color: #ff6b81; transform: translateY(-2px); } |
|
|
#recordBtn.recording { background-color: #2ed573; animation: pulse 1.5s infinite; } |
|
|
|
|
|
#playbackContainer { display: none; margin: 20px 0; padding: 15px; background: #f1f2f6; border-radius: 10px; } |
|
|
audio { width: 100%; outline: none; } |
|
|
|
|
|
#status { margin: 10px 0; font-style: italic; color: #666; height: 20px;} |
|
|
|
|
|
#results { margin-top: 30px; text-align: left; } |
|
|
.bar-container { margin-bottom: 12px; display: flex; align-items: center; } |
|
|
.label { font-weight: bold; width: 70px; font-size: 14px; } |
|
|
.bar-bg { flex-grow: 1; background: #dfe4ea; height: 12px; border-radius: 6px; margin: 0 10px; overflow: hidden;} |
|
|
.bar-fill { height: 100%; background: linear-gradient(90deg, #3498db, #2980b9); border-radius: 6px; width: 0%; transition: width 0.6s ease-out; } |
|
|
.percent { width: 40px; font-size: 14px; color: #555; text-align: right;} |
|
|
|
|
|
@keyframes pulse { 0% { box-shadow: 0 0 0 0 rgba(46, 213, 115, 0.7); } 70% { box-shadow: 0 0 0 15px rgba(46, 213, 115, 0); } 100% { box-shadow: 0 0 0 0 rgba(46, 213, 115, 0); } } |
|
|
</style> |
|
|
</head> |
|
|
<body> |
|
|
<div class="container"> |
|
|
<h1>🎙️ Cảm xúc giọng nói</h1> |
|
|
<p>Hệ thống phân tích cảm xúc qua giọng nói (AI)</p> |
|
|
|
|
|
<button id="recordBtn" onclick="toggleRecording()">Bắt đầu Ghi âm</button> |
|
|
<div id="status">Sẵn sàng</div> |
|
|
|
|
|
<div id="playbackContainer"> |
|
|
<p style="margin: 0 0 10px 0; font-size: 14px;">🎧 Nghe lại giọng của bạn:</p> |
|
|
<audio id="audioPlayer" controls></audio> |
|
|
</div> |
|
|
|
|
|
<div id="results"></div> |
|
|
</div> |
|
|
|
|
|
<script> |
|
|
let mediaRecorder; |
|
|
let audioChunks = []; |
|
|
let isRecording = false; |
|
|
|
|
|
async function toggleRecording() { |
|
|
const btn = document.getElementById('recordBtn'); |
|
|
const status = document.getElementById('status'); |
|
|
const playbackContainer = document.getElementById('playbackContainer'); |
|
|
const resultsContainer = document.getElementById('results'); |
|
|
|
|
|
if (!isRecording) { |
|
|
// BẮT ĐẦU GHI |
|
|
try { |
|
|
const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); |
|
|
mediaRecorder = new MediaRecorder(stream); |
|
|
audioChunks = []; |
|
|
|
|
|
// Ẩn kết quả cũ khi ghi âm mới |
|
|
playbackContainer.style.display = 'none'; |
|
|
resultsContainer.innerHTML = ''; |
|
|
|
|
|
mediaRecorder.ondataavailable = event => { |
|
|
audioChunks.push(event.data); |
|
|
}; |
|
|
|
|
|
mediaRecorder.onstop = async () => { |
|
|
// Tạo blob audio |
|
|
const audioBlob = new Blob(audioChunks, { type: 'audio/webm' }); |
|
|
|
|
|
// 1. TẠO URL ĐỂ NGHE LẠI (CLIENT-SIDE) |
|
|
const audioUrl = URL.createObjectURL(audioBlob); |
|
|
const audioPlayer = document.getElementById('audioPlayer'); |
|
|
audioPlayer.src = audioUrl; |
|
|
playbackContainer.style.display = 'block'; // Hiện trình phát |
|
|
|
|
|
// 2. GỬI LÊN SERVER |
|
|
uploadAudio(audioBlob); |
|
|
}; |
|
|
|
|
|
mediaRecorder.start(); |
|
|
isRecording = true; |
|
|
btn.textContent = "⏹ Dừng & Phân tích"; |
|
|
btn.classList.add("recording"); |
|
|
status.textContent = "Đang thu âm..."; |
|
|
} catch (err) { |
|
|
alert("Không thể truy cập microphone: " + err); |
|
|
} |
|
|
} else { |
|
|
// DỪNG GHI |
|
|
mediaRecorder.stop(); |
|
|
isRecording = false; |
|
|
btn.textContent = "🎙️ Bắt đầu Ghi âm mới"; |
|
|
btn.classList.remove("recording"); |
|
|
status.textContent = "Đang gửi dữ liệu..."; |
|
|
} |
|
|
} |
|
|
|
|
|
async function uploadAudio(blob) { |
|
|
const formData = new FormData(); |
|
|
formData.append("file", blob, "recording.webm"); |
|
|
|
|
|
try { |
|
|
const response = await fetch("/predict", { |
|
|
method: "POST", |
|
|
body: formData |
|
|
}); |
|
|
|
|
|
if (!response.ok) { |
|
|
throw new Error(`Server error: ${response.status}`); |
|
|
} |
|
|
|
|
|
const data = await response.json(); |
|
|
displayResults(data); |
|
|
document.getElementById('status').textContent = "Hoàn tất!"; |
|
|
} catch (error) { |
|
|
console.error("Error:", error); |
|
|
document.getElementById('status').textContent = "Lỗi: " + error.message; |
|
|
} |
|
|
} |
|
|
|
|
|
function displayResults(data) { |
|
|
const container = document.getElementById('results'); |
|
|
container.innerHTML = "<h3>📊 Kết quả phân tích:</h3>"; |
|
|
|
|
|
data.results.forEach(item => { |
|
|
const percentage = (item.score * 100).toFixed(1); |
|
|
// Đổi màu thanh bar nếu > 50% |
|
|
let barColor = percentage > 50 ? '#2ed573' : 'linear-gradient(90deg, #3498db, #2980b9)'; |
|
|
|
|
|
const html = ` |
|
|
<div class="bar-container"> |
|
|
<span class="label">${item.label}</span> |
|
|
<div class="bar-bg"> |
|
|
<div class="bar-fill" style="width: ${percentage}%; background: ${barColor}"></div> |
|
|
</div> |
|
|
<span class="percent">${percentage}%</span> |
|
|
</div> |
|
|
`; |
|
|
container.innerHTML += html; |
|
|
}); |
|
|
} |
|
|
</script> |
|
|
</body> |
|
|
</html> |
|
|
""" |
|
|
|
|
|
@app.get("/", response_class=HTMLResponse) |
|
|
async def home(): |
|
|
return html_content |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@app.post("/predict") |
|
|
async def predict(file: UploadFile = File(...)): |
|
|
|
|
|
webm_filename = "temp_input.webm" |
|
|
wav_filename = "temp_converted.wav" |
|
|
|
|
|
try: |
|
|
|
|
|
with open(webm_filename, "wb") as buffer: |
|
|
shutil.copyfileobj(file.file, buffer) |
|
|
|
|
|
|
|
|
audio = AudioSegment.from_file(webm_filename) |
|
|
audio = audio.set_frame_rate(32000).set_channels(1) |
|
|
audio.export(wav_filename, format="wav") |
|
|
|
|
|
|
|
|
waveform, _ = librosa.load(wav_filename, sr=32000, mono=True) |
|
|
|
|
|
except Exception as e: |
|
|
import traceback |
|
|
traceback.print_exc() |
|
|
return JSONResponse(status_code=500, content={"error": f"Lỗi xử lý file: {str(e)}"}) |
|
|
|
|
|
finally: |
|
|
|
|
|
if os.path.exists(webm_filename): os.remove(webm_filename) |
|
|
if os.path.exists(wav_filename): os.remove(wav_filename) |
|
|
|
|
|
|
|
|
waveform = np.stack([waveform]) |
|
|
spec = mel_processor(waveform) |
|
|
|
|
|
target_len = 400 |
|
|
if spec.shape[-1] > target_len: |
|
|
spec = spec[:, :, :target_len] |
|
|
elif spec.shape[-1] < target_len: |
|
|
spec = np.pad(spec, ((0, 0), (0, 0), (0, target_len - spec.shape[-1])), mode='constant') |
|
|
|
|
|
spec = np.expand_dims(spec, axis=0).astype(np.float32) |
|
|
|
|
|
if interpreter is None: |
|
|
return JSONResponse(status_code=500, content={"error": "Model not loaded"}) |
|
|
|
|
|
with model_lock: |
|
|
interpreter.set_tensor(input_details[0]['index'], spec) |
|
|
interpreter.invoke() |
|
|
output_data = interpreter.get_tensor(output_details[0]['index']) |
|
|
|
|
|
preds = softmax(output_data[0]) |
|
|
|
|
|
results = [] |
|
|
sorted_indexes = np.argsort(preds)[::-1] |
|
|
for k in range(len(CLASSES)): |
|
|
results.append({ |
|
|
"label": CLASSES[sorted_indexes[k]], |
|
|
"score": float(preds[sorted_indexes[k]]) |
|
|
}) |
|
|
|
|
|
return {"results": results} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
import nest_asyncio |
|
|
import uvicorn |
|
|
from pycloudflared import try_cloudflare |
|
|
|
|
|
nest_asyncio.apply() |
|
|
|
|
|
print("🚀 Đang khởi động Cloudflare Tunnel...") |
|
|
tunnel_url = try_cloudflare(port=8000) |
|
|
print(f"🔗 PUBLIC URL CỦA BẠN: {tunnel_url.tunnel}") |
|
|
print("👉 Click link trên để truy cập Web App") |
|
|
|
|
|
config = uvicorn.Config(app, host="0.0.0.0", port=8000) |
|
|
server = uvicorn.Server(config) |
|
|
await server.serve() |