MrlolDev commited on
Commit
0583d3f
·
verified ·
1 Parent(s): 9f8bc08

Upload extract_features.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. extract_features.py +95 -0
extract_features.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import pickle
3
+ import numpy as np
4
+ from datasets import load_dataset
5
+ from transformers import AutoProcessor, AutoModel
6
+ from huggingface_hub import HfApi
7
+ from tqdm import tqdm
8
+
9
+ api = HfApi()
10
+
11
+ print("Cargando dataset desde HuggingFace...")
12
+ dataset = load_dataset(
13
+ "MrlolDev/voxtral-emotion-speech",
14
+ split="train+validation+test",
15
+ )
16
+ print(f"Total clips: {len(dataset)}")
17
+
18
+ print("Cargando Voxtral encoder...")
19
+ MODEL_ID = "mistralai/Voxtral-Mini-4B-Realtime-2602"
20
+ processor = AutoProcessor.from_pretrained(MODEL_ID)
21
+ model = AutoModel.from_pretrained(MODEL_ID, torch_dtype=torch.float16)
22
+ model.eval()
23
+ device = torch.device("cuda")
24
+ model = model.to(device)
25
+ print(f"Modelo en GPU: {next(model.parameters()).device}")
26
+
27
+ EMOTIONS = ["neutral", "happy", "sad", "angry", "fear", "surprise"]
28
+
29
+
30
+ def extract_features(audio_array, sampling_rate):
31
+ inputs = processor(audio_array, sampling_rate=sampling_rate, return_tensors="pt")
32
+ inputs = {k: v.to(device) for k, v in inputs.items()}
33
+
34
+ with torch.no_grad():
35
+ encoder_output = model.encoder(**inputs)
36
+ hidden = encoder_output.last_hidden_state # (1, T, 1280)
37
+ features = hidden.mean(dim=1).squeeze(0) # (1280,)
38
+
39
+ return features.cpu().float().numpy()
40
+
41
+
42
+ records = []
43
+ errors = 0
44
+
45
+ print("Extrayendo features...")
46
+ for sample in tqdm(dataset):
47
+ try:
48
+ audio_array = np.array(sample["audio"]["array"], dtype=np.float32)
49
+ sr = sample["audio"]["sampling_rate"]
50
+
51
+ features = extract_features(audio_array, sr)
52
+
53
+ records.append(
54
+ {
55
+ "features": features,
56
+ "label": EMOTIONS.index(sample["emotion"]),
57
+ "emotion": sample["emotion"],
58
+ "split": sample.get("split", "train"),
59
+ "sensevoice_score": sample.get("sensevoice_score", 0.0),
60
+ }
61
+ )
62
+ except Exception as e:
63
+ errors += 1
64
+ print(f"Error en sample: {e}")
65
+
66
+ print(f"\nExtracted: {len(records)} features | Errors: {errors}")
67
+
68
+ with open("features.pkl", "wb") as f:
69
+ pickle.dump(records, f)
70
+
71
+ print("Saved to features.pkl")
72
+
73
+ from collections import Counter
74
+
75
+ dist = Counter(r["emotion"] for r in records)
76
+ for emotion, count in sorted(dist.items()):
77
+ print(f" {emotion}: {count}")
78
+
79
+ print("\nUploading features.pkl to HuggingFace...")
80
+ api.upload_file(
81
+ path_or_fileobj="features.pkl",
82
+ path_in_repo="features.pkl",
83
+ repo_id="MrlolDev/voxtral-emotion-speech",
84
+ repo_type="dataset",
85
+ )
86
+ print("✅ features.pkl uploaded to HuggingFace")
87
+
88
+ print("\nUploading README.md to HuggingFace...")
89
+ api.upload_file(
90
+ path_or_fileobj="README.md",
91
+ path_in_repo="README.md",
92
+ repo_id="MrlolDev/voxtral-emotion-speech",
93
+ repo_type="dataset",
94
+ )
95
+ print("✅ README.md uploaded to HuggingFace")