Add files using upload-large-folder tool
Browse files
.cache/embeddings_mert_all_datasets.csv
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
.gitattributes
CHANGED
|
@@ -59,3 +59,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 60 |
dataset_audio_long_0.5_3.0s/metadata.csv filter=lfs diff=lfs merge=lfs -text
|
| 61 |
dataset_audio_long_10_60s/metadata.csv filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 60 |
dataset_audio_long_0.5_3.0s/metadata.csv filter=lfs diff=lfs merge=lfs -text
|
| 61 |
dataset_audio_long_10_60s/metadata.csv filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
dataset_audio_long_10_60s/embeddings_mert_all_datasets.csv filter=lfs diff=lfs merge=lfs -text
|
| 63 |
+
dataset_audio_long_0.5_3.0s/embeddings_mert_all_datasets.csv filter=lfs diff=lfs merge=lfs -text
|
How2Uploade.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
(jupyter) mshamsi@lst:~/expe/goto_raid_b/Song_Popularity$ env PATH="/lium/home/mshamsi/.local/bin:$PATH" hf upload-large-folder ENSIM/FreeSound_Popularity . --repo-type=dataset
|
dataset_audio_long_0.5_3.0s/embeddings_mert_all_datasets.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fab4057a372ee28185255dc0b1805985f082e19bd1733448cd7da1bfa3f8486f
|
| 3 |
+
size 262250743
|
dataset_audio_long_10_60s/embeddings_mert_all_datasets.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4148c314e3091b44f8ff6ba0564f22931176d2d25c4037f2527c922551314ae4
|
| 3 |
+
size 262920352
|
errors_mert.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
extract_mert_embedding.py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
import soundfile as sf
|
| 4 |
+
import librosa
|
| 5 |
+
import pandas as pd
|
| 6 |
+
import numpy as np
|
| 7 |
+
from transformers import AutoProcessor, AutoModel
|
| 8 |
+
from tqdm import tqdm
|
| 9 |
+
from datetime import datetime
|
| 10 |
+
|
| 11 |
+
# =============================
|
| 12 |
+
# CONFIG
|
| 13 |
+
# =============================
|
| 14 |
+
ROOT_DATA = "/lium/raid-b/mshamsi/FreeSound_Popularity/"
|
| 15 |
+
OUTPUT_PATH = "embeddings_mert_all_datasets.csv"
|
| 16 |
+
LOG_PATH = "errors_mert.log"
|
| 17 |
+
|
| 18 |
+
TARGET_SR = 24000
|
| 19 |
+
MAX_DURATION = 60 # secondes
|
| 20 |
+
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
| 21 |
+
|
| 22 |
+
AUDIO_EXTENSIONS = (".wav", ".WAV", ".mp3", ".flac", ".ogg", ".m4a")
|
| 23 |
+
|
| 24 |
+
# =============================
|
| 25 |
+
# INIT LOG
|
| 26 |
+
# =============================
|
| 27 |
+
with open(LOG_PATH, "w") as f:
|
| 28 |
+
f.write("=== MERT EXTRACTION LOG ===\n")
|
| 29 |
+
f.write(str(datetime.now()) + "\n\n")
|
| 30 |
+
|
| 31 |
+
def log_error(msg):
|
| 32 |
+
with open(LOG_PATH, "a") as f:
|
| 33 |
+
f.write(msg + "\n")
|
| 34 |
+
|
| 35 |
+
# =============================
|
| 36 |
+
# LOAD MODEL
|
| 37 |
+
# =============================
|
| 38 |
+
try:
|
| 39 |
+
processor = AutoProcessor.from_pretrained(
|
| 40 |
+
"m-a-p/MERT-v1-330M",
|
| 41 |
+
trust_remote_code=True
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
model = AutoModel.from_pretrained(
|
| 45 |
+
"m-a-p/MERT-v1-330M",
|
| 46 |
+
trust_remote_code=True
|
| 47 |
+
).to(DEVICE)
|
| 48 |
+
|
| 49 |
+
model.eval()
|
| 50 |
+
|
| 51 |
+
except Exception as e:
|
| 52 |
+
log_error(f"[FATAL] Model loading failed: {e}")
|
| 53 |
+
raise RuntimeError("Impossible de charger le modèle MERT")
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
# =============================
|
| 57 |
+
# LOOP OVER DATASETS
|
| 58 |
+
# =============================
|
| 59 |
+
datasets = [
|
| 60 |
+
d for d in os.listdir(ROOT_DATA)
|
| 61 |
+
if os.path.isdir(os.path.join(ROOT_DATA, d))
|
| 62 |
+
]
|
| 63 |
+
|
| 64 |
+
for dataset_name in datasets:
|
| 65 |
+
dataset_path = os.path.join(ROOT_DATA, dataset_name)
|
| 66 |
+
|
| 67 |
+
# =============================
|
| 68 |
+
# STORAGE
|
| 69 |
+
# =============================
|
| 70 |
+
rows = []
|
| 71 |
+
processed = 0
|
| 72 |
+
skipped = 0
|
| 73 |
+
|
| 74 |
+
for batch in ["batch_001", "batch_002"]:
|
| 75 |
+
batch_path = os.path.join(dataset_path, batch)
|
| 76 |
+
|
| 77 |
+
if not os.path.exists(batch_path):
|
| 78 |
+
log_error(f"[INFO] Missing folder: {batch_path}")
|
| 79 |
+
continue
|
| 80 |
+
|
| 81 |
+
audio_files = [
|
| 82 |
+
f for f in os.listdir(batch_path)
|
| 83 |
+
if f.lower().endswith(AUDIO_EXTENSIONS)
|
| 84 |
+
]
|
| 85 |
+
|
| 86 |
+
for audio_file in tqdm(audio_files, desc=f"{dataset_name}/{batch}"):
|
| 87 |
+
audio_path = os.path.join(batch_path, audio_file)
|
| 88 |
+
|
| 89 |
+
try:
|
| 90 |
+
# =============================
|
| 91 |
+
# LOAD AUDIO (SAFE)
|
| 92 |
+
# =============================
|
| 93 |
+
audio, sr = sf.read(audio_path, always_2d=False)
|
| 94 |
+
|
| 95 |
+
if audio is None or len(audio) == 0:
|
| 96 |
+
raise ValueError("Empty audio file")
|
| 97 |
+
|
| 98 |
+
# Stereo → mono
|
| 99 |
+
if audio.ndim > 1:
|
| 100 |
+
audio = np.mean(audio, axis=1)
|
| 101 |
+
|
| 102 |
+
# Convert to float32
|
| 103 |
+
audio = audio.astype(np.float32)
|
| 104 |
+
|
| 105 |
+
# Resample
|
| 106 |
+
if sr != TARGET_SR:
|
| 107 |
+
audio = librosa.resample(
|
| 108 |
+
audio,
|
| 109 |
+
orig_sr=sr,
|
| 110 |
+
target_sr=TARGET_SR
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
# Trim / pad duration
|
| 114 |
+
max_len = TARGET_SR * MAX_DURATION
|
| 115 |
+
audio = audio[:max_len]
|
| 116 |
+
|
| 117 |
+
# =============================
|
| 118 |
+
# PROCESS WITH MERT
|
| 119 |
+
# =============================
|
| 120 |
+
inputs = processor(
|
| 121 |
+
audio,
|
| 122 |
+
sampling_rate=TARGET_SR,
|
| 123 |
+
return_tensors="pt"
|
| 124 |
+
)
|
| 125 |
+
inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
|
| 126 |
+
|
| 127 |
+
with torch.no_grad():
|
| 128 |
+
outputs = model(**inputs)
|
| 129 |
+
|
| 130 |
+
if not hasattr(outputs, "last_hidden_state"):
|
| 131 |
+
raise RuntimeError("Invalid model output")
|
| 132 |
+
|
| 133 |
+
embedding = (
|
| 134 |
+
outputs.last_hidden_state
|
| 135 |
+
.mean(dim=1)
|
| 136 |
+
.squeeze()
|
| 137 |
+
.cpu()
|
| 138 |
+
.numpy()
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
# =============================
|
| 142 |
+
# SAVE ROW
|
| 143 |
+
# =============================
|
| 144 |
+
row = {
|
| 145 |
+
"dataset": dataset_name,
|
| 146 |
+
"batch": batch,
|
| 147 |
+
"filename": audio_file
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
for i, val in enumerate(embedding):
|
| 151 |
+
row[f"mert_{i}"] = float(val)
|
| 152 |
+
|
| 153 |
+
rows.append(row)
|
| 154 |
+
processed += 1
|
| 155 |
+
|
| 156 |
+
except Exception as e:
|
| 157 |
+
skipped += 1
|
| 158 |
+
log_error(f"[ERROR] {audio_path} -> {e}")
|
| 159 |
+
|
| 160 |
+
# =============================
|
| 161 |
+
# SAVE CSV
|
| 162 |
+
# =============================
|
| 163 |
+
df = pd.DataFrame(rows)
|
| 164 |
+
df.to_csv(os.path.join(dataset_path, OUTPUT_PATH), index=False)
|
| 165 |
+
|
| 166 |
+
print("\n=== EXTRACTION TERMINÉE ===")
|
| 167 |
+
print(f"Dataset : {dataset_name}")
|
| 168 |
+
print(f"Fichiers traités : {processed}")
|
| 169 |
+
print(f"Fichiers ignorés : {skipped}")
|
| 170 |
+
print(f"CSV sauvegardé : {OUTPUT_PATH}")
|
| 171 |
+
print(f"Log erreurs : {LOG_PATH}")
|