| import torch |
| import pickle |
| import numpy as np |
| from datasets import load_dataset |
| from transformers import AutoProcessor, AutoModel |
| from huggingface_hub import HfApi |
| from tqdm import tqdm |
|
|
| api = HfApi() |
|
|
| print("Cargando dataset desde HuggingFace...") |
| dataset = load_dataset( |
| "MrlolDev/voxtral-emotion-speech", |
| split="train+validation+test", |
| ) |
| print(f"Total clips: {len(dataset)}") |
|
|
| print("Cargando Voxtral encoder...") |
| MODEL_ID = "mistralai/Voxtral-Mini-4B-Realtime-2602" |
| processor = AutoProcessor.from_pretrained(MODEL_ID) |
| model = AutoModel.from_pretrained(MODEL_ID, torch_dtype=torch.float16) |
| model.eval() |
| device = torch.device("cuda") |
| model = model.to(device) |
| print(f"Modelo en GPU: {next(model.parameters()).device}") |
|
|
| EMOTIONS = ["neutral", "happy", "sad", "angry", "fear", "surprise"] |
|
|
|
|
| def extract_features(audio_array, sampling_rate): |
| inputs = processor(audio_array, sampling_rate=sampling_rate, return_tensors="pt") |
| inputs = {k: v.to(device) for k, v in inputs.items()} |
|
|
| with torch.no_grad(): |
| encoder_output = model.encoder(**inputs) |
| hidden = encoder_output.last_hidden_state |
| features = hidden.mean(dim=1).squeeze(0) |
|
|
| return features.cpu().float().numpy() |
|
|
|
|
| records = [] |
| errors = 0 |
|
|
| print("Extrayendo features...") |
| for sample in tqdm(dataset): |
| try: |
| audio_array = np.array(sample["audio"]["array"], dtype=np.float32) |
| sr = sample["audio"]["sampling_rate"] |
|
|
| features = extract_features(audio_array, sr) |
|
|
| records.append( |
| { |
| "features": features, |
| "label": EMOTIONS.index(sample["emotion"]), |
| "emotion": sample["emotion"], |
| "split": sample.get("split", "train"), |
| "sensevoice_score": sample.get("sensevoice_score", 0.0), |
| } |
| ) |
| except Exception as e: |
| errors += 1 |
| print(f"Error en sample: {e}") |
|
|
| print(f"\nExtracted: {len(records)} features | Errors: {errors}") |
|
|
| with open("features.pkl", "wb") as f: |
| pickle.dump(records, f) |
|
|
| print("Saved to features.pkl") |
|
|
| from collections import Counter |
|
|
| dist = Counter(r["emotion"] for r in records) |
| for emotion, count in sorted(dist.items()): |
| print(f" {emotion}: {count}") |
|
|
| print("\nUploading features.pkl to HuggingFace...") |
| api.upload_file( |
| path_or_fileobj="features.pkl", |
| path_in_repo="features.pkl", |
| repo_id="MrlolDev/voxtral-emotion-speech", |
| repo_type="dataset", |
| ) |
| print("✅ features.pkl uploaded to HuggingFace") |
|
|
| print("\nUploading README.md to HuggingFace...") |
| api.upload_file( |
| path_or_fileobj="README.md", |
| path_in_repo="README.md", |
| repo_id="MrlolDev/voxtral-emotion-speech", |
| repo_type="dataset", |
| ) |
| print("✅ README.md uploaded to HuggingFace") |
|
|