mandipgoswami commited on
Commit
ee24db9
·
verified ·
1 Parent(s): 940e7a4

Upload 21 files

Browse files
benchmarks/dereverb_sisdr/README.md ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Dereverberation baseline (SI-SDRi)
2
+
3
+ Toy pipeline for generating reverberant speech by convolving clean speech with an RIR, and applying a naive magnitude shrinkage baseline.
4
+
5
+ Usage: place a short `samples/clean.wav` (not included) and run:
6
+ ```bash
7
+ python baseline_dereverb.py
8
+ ```
benchmarks/dereverb_sisdr/README.md~ ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Dereverberation baseline (SI-SDRi)
2
+
3
+ Toy pipeline for generating reverberant speech by convolving clean speech with an RIR, and applying a naive magnitude shrinkage baseline.
4
+
5
+ Usage: place a short `samples/clean.wav` (not included) and run:
6
+ ```bash
7
+ python baseline_dereverb.py
8
+ ```
benchmarks/dereverb_sisdr/baseline_dereverb.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # benchmarks/dereverb_sisdr/baseline_dereverb.py
2
+ import numpy as np, soundfile as sf
3
+ from pathlib import Path
4
+
5
+ ROOT = Path(__file__).resolve().parents[2]
6
+ CLEAN = ROOT / "samples" / "clean.wav" # user-provided
7
+ RIR = ROOT / "samples" / "rir_000053.wav" # example path
8
+
9
+ def convolve(x, h):
10
+ return np.convolve(x, h)[:len(x)]
11
+
12
+ def sisdr(ref, est, eps=1e-8):
13
+ ref = ref - np.mean(ref); est = est - np.mean(est)
14
+ s = np.dot(est, ref) * ref / (np.dot(ref, ref) + eps)
15
+ e = est - s
16
+ return 10*np.log10((np.dot(s,s)+eps)/(np.dot(e,e)+eps))
17
+
18
+ def main():
19
+ if not CLEAN.exists():
20
+ raise SystemExit(f"Missing CLEAN wav: {CLEAN}")
21
+ if not RIR.exists():
22
+ raise SystemExit(f"Missing example RIR: {RIR}")
23
+ x, sr = sf.read(CLEAN, dtype="float32")
24
+ if x.ndim > 1: x = x[:,0]
25
+ h, _ = sf.read(RIR, dtype="float32")
26
+ if h.ndim > 1: h = h[:,0]
27
+ y = convolve(x, h)
28
+ # naive baseline: shrink magnitude in time (toy)
29
+ y_hat = y * 0.9
30
+ print("SI-SDR (rev):", sisdr(x, y))
31
+ print("SI-SDR (est):", sisdr(x, y_hat))
32
+
33
+ if __name__ == "__main__":
34
+ main()
benchmarks/rt60_regression/README.md ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # RT60 Regression Baseline
2
+
3
+ A fast baseline predicting RT60-like targets from RIR signals using lightweight features + RandomForest.
4
+
5
+ ## Usage
6
+ ```bash
7
+ pip install soundfile numpy pandas scikit-learn
8
+ python benchmarks/rt60_regression/train_rt60.py # auto target
9
+ python benchmarks/rt60_regression/train_rt60.py --target rt60
10
+ ```
11
+
12
+ **Default target order:**
13
+ `rt60, drr_db, c50_db, c80_db, band_rt60s.125, 250, 500, 1000, 2000, 4000`
14
+
15
+ If you have no `valid` split, the script will carve 10% of `train` as an in-memory validation set.
16
+
17
+ ## Tips
18
+ - Ensure `metadata/metadata.csv` paths match your audio under `data/` (or set `RIRMEGA_DATA_DIR`).
19
+ - To run on a tiny subset quickly, generate `data-mini/` (see `scripts/make_mini_subset.py`) and copy this folder to a fresh repo/Space.
benchmarks/rt60_regression/README.md~ ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # RT60 Regression (baseline)
2
+
3
+ Simple baseline that predicts `rt60_t30` from lightweight RIR features.
4
+ Default expects local data under `../../data`.
5
+
6
+ Run:
7
+ ```bash
8
+ python train_rt60.py
9
+ ```
benchmarks/rt60_regression/train_rt60.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, ast, json, math, argparse
2
+ from pathlib import Path
3
+ import numpy as np, pandas as pd, soundfile as sf
4
+ from sklearn.ensemble import RandomForestRegressor
5
+ from sklearn.metrics import mean_absolute_error, mean_squared_error
6
+
7
+ SCRIPT_ROOT = Path(__file__).resolve().parents[2] # .../rirmega
8
+ DEF_DATA_ROOT = os.getenv("RIRMEGA_DATA_DIR", str(SCRIPT_ROOT / "data/audio/rir_output_50k"))
9
+ DEF_META = Path(DEF_DATA_ROOT) / "metadata" / "metadata.csv"
10
+ RT60_KEYS_ORDER = ["rt60","drr_db","c50_db", "c80_db","band_rt60s.125", "band_rt60s.250","band_rt60s.500","band_rt60s.1000","band_rt60s.2000","band_rt60s.4000"]
11
+
12
+ def _parse_metrics(s):
13
+ if s is None: return {}
14
+ s = str(s).strip()
15
+ if not s: return {}
16
+ for fn in (json.loads, ast.literal_eval):
17
+ try:
18
+ v = fn(s)
19
+ if isinstance(v, dict): return v
20
+ except Exception: pass
21
+ return {}
22
+
23
+ def _deep(d, k):
24
+ cur = d
25
+ for part in k.split("."):
26
+ if not isinstance(cur, dict) or part not in cur: return None
27
+ cur = cur[part]
28
+ return cur
29
+
30
+ def get_rt60(metrics, key=None):
31
+ d = _parse_metrics(metrics)
32
+ if not d: return None
33
+ if key:
34
+ v = _deep(d, key) if "." in key else d.get(key)
35
+ try: return float(v) if v is not None else None
36
+ except Exception: return None
37
+ for k in RT60_KEYS_ORDER:
38
+ v = _deep(d, k) if "." in k else d.get(k)
39
+ try:
40
+ if v is not None: return float(v)
41
+ except Exception: pass
42
+ return None
43
+
44
+ def feats(path: Path):
45
+ y, sr = sf.read(str(path), dtype="float32", always_2d=False)
46
+ if isinstance(y, np.ndarray) and y.ndim > 1: y = y[:,0]
47
+ y = y.astype(np.float32, copy=False)
48
+ y /= (np.max(np.abs(y)) + 1e-9)
49
+ e = np.abs(y); e_mean, e_std = float(e.mean()), float(e.std())
50
+ e_skew = float((np.mean(((e - e_mean) / (e_std + 1e-9)) ** 3)))
51
+ e_kurt = float((np.mean(((e - e_mean) / (e_std + 1e-9)) ** 4)))
52
+ ce = np.cumsum(y[::-1] ** 2)[::-1] + 1e-12
53
+ edc_db = 10*np.log10(ce/ce[0]); n=len(edc_db); i1=int(0.05*n); i2=max(int(0.35*n), i1+5)
54
+ slope = float(np.polyfit(np.arange(i1,i2), edc_db[i1:i2], 1)[0])
55
+ Y=np.fft.rfft(y); mag=np.abs(Y); idx=np.arange(len(mag))
56
+ centroid = float((idx*mag).sum()/(mag.sum()+1e-9))
57
+ return np.array([e_mean,e_std,e_skew,e_kurt,slope,centroid], dtype=np.float32)
58
+
59
+ def main():
60
+ ap = argparse.ArgumentParser("RT60 baseline (compact schema)")
61
+ ap.add_argument("--meta", default=str(DEF_META))
62
+ ap.add_argument("--data-root", default=str(DEF_DATA_ROOT))
63
+ ap.add_argument("--split-valid", default="valid")
64
+ ap.add_argument("--target", default=None)
65
+ args = ap.parse_args()
66
+
67
+ meta = pd.read_csv(args.meta)
68
+ if not {"wav","metrics","split"}.issubset(meta.columns):
69
+ raise SystemExit("metadata.csv must have wav, metrics, split columns")
70
+
71
+ # normalize splits
72
+ meta["split"] = meta["split"].astype(str).str.strip().str.lower()
73
+ tr = meta[meta["split"]=="train"].copy()
74
+ va = meta[meta["split"]==args.split_valid.lower()].copy()
75
+ if len(tr)==0: raise SystemExit("no train rows found")
76
+ if len(va)==0:
77
+ va = tr.sample(frac=0.10, random_state=42)
78
+ tr = tr.drop(va.index)
79
+ print(f"[INFO] no '{args.split_valid}' rows; using 10% of train as validation. train={len(tr)} valid={len(va)}")
80
+ else:
81
+ print(f"[INFO] using explicit splits: train={len(tr)} {args.split_valid}={len(va)}")
82
+
83
+ base = Path(args.data_root)
84
+
85
+ def build(df):
86
+ X=[]; y=[]
87
+ missing_audio=0; missing_target=0
88
+ for _,r in df.iterrows():
89
+ t = get_rt60(r["metrics"], args.target)
90
+ if t is None:
91
+ missing_target += 1
92
+ continue
93
+ p = Path(r["wav"])
94
+ p = p if p.is_absolute() else (base / p)
95
+ if not p.exists():
96
+ missing_audio += 1
97
+ continue
98
+ X.append(feats(p)); y.append(float(t))
99
+ return (np.stack(X), np.array(y, dtype=np.float32)) if X else (None,None), (missing_audio, missing_target)
100
+
101
+ (Xtr,ytr), miss_tr = build(tr)
102
+ (Xva,yva), miss_va = build(va)
103
+ if Xtr is None or Xva is None:
104
+ raise SystemExit(f"No usable samples: train missing (audio,target)={miss_tr}, "
105
+ f"valid missing (audio,target)={miss_va}. "
106
+ f"Make sure files exist under {base} and CSV 'wav' paths match.")
107
+
108
+ m = RandomForestRegressor(n_estimators=400, random_state=0, n_jobs=-1).fit(Xtr,ytr)
109
+ pred = m.predict(Xva)
110
+ mae = mean_absolute_error(yva, pred)
111
+ rmse = math.sqrt(mean_squared_error(yva, pred))
112
+ print(f"Samples: train={len(Xtr)} valid={len(Xva)} | MAE={mae:.4f}s RMSE={rmse:.4f}s")
113
+
114
+ if __name__ == "__main__":
115
+ main()
checksums/SHA256SUMS.txt ADDED
Binary file (354 Bytes). View file
 
data-mini/metadata/metadata.csv ADDED
The diff for this file is too large to render. See raw diff
 
rirmega/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ RIR-Mega dataset loader package.
3
+
4
+ Usage (after uploading to Hugging Face):
5
+ from datasets import load_dataset
6
+ ds = load_dataset("mandipgoswami/rirmega", name="default")
7
+ """
8
+ from .dataset import RIRMega
9
+
10
+ __all__ = ["RIRMega"]
rirmega/dataset.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import csv
3
+ import datasets
4
+
5
+ _CITATION = (
6
+ "@dataset{RIRMega2025, title={RIR-Mega}, author={Goswami, Mandip}, "
7
+ "year={2025}, doi={10.5281/zenodo.17387402}}"
8
+ )
9
+
10
+ _DESCRIPTION = (
11
+ "RIR-Mega loader for the compact metadata schema: "
12
+ "id,family,split,seed_room,fs,wav,room_size,absorption,absorption_bands,"
13
+ "max_order,source,microphone,array,metrics,rng_seed (+ optional checksum_sha256)."
14
+ )
15
+
16
+ _HOMEPAGE = "https://github.com/mandip42/rirmega"
17
+
18
+
19
+ def _to_int(value: str, default: int = 0) -> int:
20
+ try:
21
+ return int(float(value))
22
+ except Exception:
23
+ return default
24
+
25
+
26
+ class RIRMega(datasets.GeneratorBasedBuilder):
27
+ """
28
+ Hugging Face Datasets loader for RIR-Mega (compact schema).
29
+ Expects a CSV at {DATA_ROOT}/metadata/metadata.csv with the columns:
30
+
31
+ id,family,split,seed_room,fs,wav,room_size,absorption,absorption_bands,
32
+ max_order,source,microphone,array,metrics,rng_seed
33
+
34
+ Optional column:
35
+ checksum_sha256
36
+
37
+ DATA_ROOT defaults to "data" (relative to current working dir), or you can set:
38
+ RIRMEGA_DATA_DIR=/absolute/path/to/data
39
+ """
40
+ VERSION = datasets.Version("1.0.0")
41
+ BUILDER_CONFIGS = [
42
+ datasets.BuilderConfig(
43
+ name="default",
44
+ version=VERSION,
45
+ description="RIR-Mega compact schema loader",
46
+ ),
47
+ ]
48
+
49
+ def _info(self) -> datasets.DatasetInfo:
50
+ # Keep raw CSV columns as strings so users can parse downstream as they prefer.
51
+ # Provide normalized 'sample_rate' (int) in addition to the raw 'fs' (string).
52
+ features = datasets.Features(
53
+ {
54
+ # decodes to (array, sr) when accessed
55
+ "audio": datasets.Audio(sampling_rate=None),
56
+
57
+ # normalized/common fields
58
+ "id": datasets.Value("string"),
59
+ "split": datasets.Value("string"),
60
+ "file_path": datasets.Value("string"), # from 'wav' (CSV)
61
+ "sample_rate": datasets.Value("int32"), # from 'fs' (CSV; parsed)
62
+
63
+ # raw CSV columns preserved (strings)
64
+ "family": datasets.Value("string"),
65
+ "seed_room": datasets.Value("string"),
66
+ "fs": datasets.Value("string"),
67
+ "wav": datasets.Value("string"),
68
+ "room_size": datasets.Value("string"),
69
+ "absorption": datasets.Value("string"),
70
+ "absorption_bands": datasets.Value("string"),
71
+ "max_order": datasets.Value("string"),
72
+ "source": datasets.Value("string"),
73
+ "microphone": datasets.Value("string"),
74
+ "array": datasets.Value("string"),
75
+ "metrics": datasets.Value("string"),
76
+ "rng_seed": datasets.Value("string"),
77
+
78
+ # optional checksum column (if you ran make_checksums.py)
79
+ "checksum_sha256": datasets.Value("string"),
80
+ }
81
+ )
82
+ return datasets.DatasetInfo(
83
+ description=_DESCRIPTION,
84
+ features=features,
85
+ homepage=_HOMEPAGE,
86
+ citation=_CITATION,
87
+ )
88
+
89
+ def _split_generators(self, dl_manager):
90
+ # Data root can be overridden via env var; otherwise defaults to ./data
91
+ data_root = os.getenv("RIRMEGA_DATA_DIR", "data")
92
+ meta_path = os.path.join(data_root, "metadata", "metadata.csv")
93
+
94
+ # We create three split generators and filter by 'split' in _generate_examples
95
+ return [
96
+ datasets.SplitGenerator(
97
+ name=datasets.Split.TRAIN,
98
+ gen_kwargs={"meta": meta_path, "want": "train", "root": data_root},
99
+ ),
100
+ datasets.SplitGenerator(
101
+ name=datasets.Split.VALIDATION,
102
+ gen_kwargs={"meta": meta_path, "want": "valid", "root": data_root},
103
+ ),
104
+ datasets.SplitGenerator(
105
+ name=datasets.Split.TEST,
106
+ gen_kwargs={"meta": meta_path, "want": "test", "root": data_root},
107
+ ),
108
+ ]
109
+
110
+ def _generate_examples(self, meta: str, want: str, root: str):
111
+ """
112
+ Yields examples filtered by the `split` column (train|valid|test).
113
+ Resolves relative wav paths under `root` (data root) and passes absolute
114
+ paths through as-is.
115
+ """
116
+ with open(meta, newline="", encoding="utf-8") as f:
117
+ reader = csv.DictReader(f)
118
+ # Normalize header names once (in case of BOM/spacing)
119
+ headers = [h.strip().lstrip("\ufeff") for h in (reader.fieldnames or [])]
120
+ H = {h.lower(): h for h in headers}
121
+
122
+ required = {"id", "split", "fs", "wav"}
123
+ if not required.issubset(set(h.lower() for h in headers)):
124
+ raise ValueError(
125
+ "metadata.csv missing required columns. "
126
+ f"Need {sorted(required)}; have {headers}"
127
+ )
128
+
129
+ for idx, row in enumerate(reader):
130
+ split = (row.get(H["split"], "") or "").strip().lower()
131
+ if split != want:
132
+ continue
133
+
134
+ rid = (row.get(H["id"], "") or "").strip() or str(idx)
135
+ fs_raw = (row.get(H["fs"], "") or "").strip()
136
+ wav_raw = (row.get(H["wav"], "") or "").strip()
137
+
138
+ # Normalize sample rate
139
+ sr = _to_int(fs_raw, default=0)
140
+
141
+ # Resolve file path for the 'audio' feature
142
+ if wav_raw and not os.path.isabs(wav_raw):
143
+ file_abs = os.path.join(root, wav_raw)
144
+ else:
145
+ file_abs = wav_raw
146
+
147
+ # Build example dict (keep all known columns if present)
148
+ ex = {
149
+ # Audio is lazy-decoded by HF (path stored; bytes=None)
150
+ "audio": {"path": file_abs, "bytes": None},
151
+
152
+ # normalized/common fields
153
+ "id": rid,
154
+ "split": split,
155
+ "file_path": wav_raw,
156
+ "sample_rate": sr,
157
+
158
+ # raw CSV fields
159
+ "family": (row.get(H.get("family", ""), "") or ""),
160
+ "seed_room": (row.get(H.get("seed_room", ""), "") or ""),
161
+ "fs": fs_raw,
162
+ "wav": wav_raw,
163
+ "room_size": (row.get(H.get("room_size", ""), "") or ""),
164
+ "absorption": (row.get(H.get("absorption", ""), "") or ""),
165
+ "absorption_bands": (row.get(H.get("absorption_bands", ""), "") or ""),
166
+ "max_order": (row.get(H.get("max_order", ""), "") or ""),
167
+ "source": (row.get(H.get("source", ""), "") or ""),
168
+ "microphone": (row.get(H.get("microphone", ""), "") or ""),
169
+ "array": (row.get(H.get("array", ""), "") or ""),
170
+ "metrics": (row.get(H.get("metrics", ""), "") or ""),
171
+ "rng_seed": (row.get(H.get("rng_seed", ""), "") or ""),
172
+
173
+ # optional checksum
174
+ "checksum_sha256": (row.get("checksum_sha256", "") or row.get("CHECKSUM_SHA256", "") or ""),
175
+ }
176
+
177
+ yield idx, ex
rirmega/dataset.py~ ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import csv
3
+ import datasets
4
+
5
+ _CITATION = (
6
+ "@dataset{RIRMega2025, title={RIR-Mega}, author={Goswami, Mandip}, "
7
+ "year={2025}, doi={10.5281/zenodo.17387402}}"
8
+ )
9
+
10
+ _DESCRIPTION = (
11
+ "RIR-Mega loader for the compact metadata schema: "
12
+ "id,family,split,seed_room,fs,wav,room_size,absorption,absorption_bands,"
13
+ "max_order,source,microphone,array,metrics,rng_seed (+ optional checksum_sha256)."
14
+ )
15
+
16
+ _HOMEPAGE = "https://github.com/mandip42/rirmega"
17
+
18
+
19
+ def _to_int(value: str, default: int = 0) -> int:
20
+ try:
21
+ return int(float(value))
22
+ except Exception:
23
+ return default
24
+
25
+
26
+ class RIRMega(datasets.GeneratorBasedBuilder):
27
+ """
28
+ Hugging Face Datasets loader for RIR-Mega (compact schema).
29
+ Expects a CSV at {DATA_ROOT}/metadata/metadata.csv with the columns:
30
+
31
+ id,family,split,seed_room,fs,wav,room_size,absorption,absorption_bands,
32
+ max_order,source,microphone,array,metrics,rng_seed
33
+
34
+ Optional column:
35
+ checksum_sha256
36
+
37
+ DATA_ROOT defaults to "data" (relative to current working dir), or you can set:
38
+ RIRMEGA_DATA_DIR=/absolute/path/to/data
39
+ """
40
+ VERSION = datasets.Version("1.0.0")
41
+ BUILDER_CONFIGS = [
42
+ datasets.BuilderConfig(
43
+ name="default",
44
+ version=VERSION,
45
+ description="RIR-Mega compact schema loader",
46
+ ),
47
+ ]
48
+
49
+ def _info(self) -> datasets.DatasetInfo:
50
+ # Keep raw CSV columns as strings so users can parse downstream as they prefer.
51
+ # Provide normalized 'sample_rate' (int) in addition to the raw 'fs' (string).
52
+ features = datasets.Features(
53
+ {
54
+ # decodes to (array, sr) when accessed
55
+ "audio": datasets.Audio(sampling_rate=None),
56
+
57
+ # normalized/common fields
58
+ "id": datasets.Value("string"),
59
+ "split": datasets.Value("string"),
60
+ "file_path": datasets.Value("string"), # from 'wav' (CSV)
61
+ "sample_rate": datasets.Value("int32"), # from 'fs' (CSV; parsed)
62
+
63
+ # raw CSV columns preserved (strings)
64
+ "family": datasets.Value("string"),
65
+ "seed_room": datasets.Value("string"),
66
+ "fs": datasets.Value("string"),
67
+ "wav": datasets.Value("string"),
68
+ "room_size": datasets.Value("string"),
69
+ "absorption": datasets.Value("string"),
70
+ "absorption_bands": datasets.Value("string"),
71
+ "max_order": datasets.Value("string"),
72
+ "source": datasets.Value("string"),
73
+ "microphone": datasets.Value("string"),
74
+ "array": datasets.Value("string"),
75
+ "metrics": datasets.Value("string"),
76
+ "rng_seed": datasets.Value("string"),
77
+
78
+ # optional checksum column (if you ran make_checksums.py)
79
+ "checksum_sha256": datasets.Value("string"),
80
+ }
81
+ )
82
+ return datasets.DatasetInfo(
83
+ description=_DESCRIPTION,
84
+ features=features,
85
+ homepage=_HOMEPAGE,
86
+ citation=_CITATION,
87
+ )
88
+
89
+ def _split_generators(self, dl_manager):
90
+ # Data root can be overridden via env var; otherwise defaults to ./data
91
+ data_root = os.getenv("RIRMEGA_DATA_DIR", "data")
92
+ meta_path = os.path.join(data_root, "metadata", "metadata.csv")
93
+
94
+ # We create three split generators and filter by 'split' in _generate_examples
95
+ return [
96
+ datasets.SplitGenerator(
97
+ name=datasets.Split.TRAIN,
98
+ gen_kwargs={"meta": meta_path, "want": "train", "root": data_root},
99
+ ),
100
+ datasets.SplitGenerator(
101
+ name=datasets.Split.VALIDATION,
102
+ gen_kwargs={"meta": meta_path, "want": "valid", "root": data_root},
103
+ ),
104
+ datasets.SplitGenerator(
105
+ name=datasets.Split.TEST,
106
+ gen_kwargs={"meta": meta_path, "want": "test", "root": data_root},
107
+ ),
108
+ ]
109
+
110
+ def _generate_examples(self, meta: str, want: str, root: str):
111
+ """
112
+ Yields examples filtered by the `split` column (train|valid|test).
113
+ Resolves relative wav paths under `root` (data root) and passes absolute
114
+ paths through as-is.
115
+ """
116
+ with open(meta, newline="", encoding="utf-8") as f:
117
+ reader = csv.DictReader(f)
118
+ # Normalize header names once (in case of BOM/spacing)
119
+ headers = [h.strip().lstrip("\ufeff") for h in (reader.fieldnames or [])]
120
+ H = {h.lower(): h for h in headers}
121
+
122
+ required = {"id", "split", "fs", "wav"}
123
+ if not required.issubset(set(h.lower() for h in headers)):
124
+ raise ValueError(
125
+ "metadata.csv missing required columns. "
126
+ f"Need {sorted(required)}; have {headers}"
127
+ )
128
+
129
+ for idx, row in enumerate(reader):
130
+ split = (row.get(H["split"], "") or "").strip().lower()
131
+ if split != want:
132
+ continue
133
+
134
+ rid = (row.get(H["id"], "") or "").strip() or str(idx)
135
+ fs_raw = (row.get(H["fs"], "") or "").strip()
136
+ wav_raw = (row.get(H["wav"], "") or "").strip()
137
+
138
+ # Normalize sample rate
139
+ sr = _to_int(fs_raw, default=0)
140
+
141
+ # Resolve file path for the 'audio' feature
142
+ if wav_raw and not os.path.isabs(wav_raw):
143
+ file_abs = os.path.join(root, wav_raw)
144
+ else:
145
+ file_abs = wav_raw
146
+
147
+ # Build example dict (keep all known columns if present)
148
+ ex = {
149
+ # Audio is lazy-decoded by HF (path stored; bytes=None)
150
+ "audio": {"path": file_abs, "bytes": None},
151
+
152
+ # normalized/common fields
153
+ "id": rid,
154
+ "split": split,
155
+ "file_path": wav_raw,
156
+ "sample_rate": sr,
157
+
158
+ # raw CSV fields
159
+ "family": (row.get(H.get("family", ""), "") or ""),
160
+ "seed_room": (row.get(H.get("seed_room", ""), "") or ""),
161
+ "fs": fs_raw,
162
+ "wav": wav_raw,
163
+ "room_size": (row.get(H.get("room_size", ""), "") or ""),
164
+ "absorption": (row.get(H.get("absorption", ""), "") or ""),
165
+ "absorption_bands": (row.get(H.get("absorption_bands", ""), "") or ""),
166
+ "max_order": (row.get(H.get("max_order", ""), "") or ""),
167
+ "source": (row.get(H.get("source", ""), "") or ""),
168
+ "microphone": (row.get(H.get("microphone", ""), "") or ""),
169
+ "array": (row.get(H.get("array", ""), "") or ""),
170
+ "metrics": (row.get(H.get("metrics", ""), "") or ""),
171
+ "rng_seed": (row.get(H.get("rng_seed", ""), "") or ""),
172
+
173
+ # optional checksum
174
+ "checksum_sha256": (row.get("checksum_sha256", "") or row.get("CHECKSUM_SHA256", "") or ""),
175
+ }
176
+
177
+ yield idx, ex
rirmega/schema.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+
3
+ @dataclass
4
+ class RIRMeta:
5
+ """Typed view of the compact metadata schema (all fields optional except id, split, fs, wav)."""
6
+ id: str
7
+ split: str
8
+ fs: str # kept as string in CSV; parsed to int in loader as sample_rate
9
+ wav: str
10
+
11
+ # Optional / pass-through fields (kept as strings; parse downstream as needed)
12
+ family: str = ""
13
+ seed_room: str = ""
14
+ room_size: str = "" # e.g., "[12.0, 8.0, 3.2]" or "12x8x3.2"
15
+ absorption: str = "" # free-form or dict-like string
16
+ absorption_bands: str = "" # free-form or dict-like string
17
+ max_order: str = ""
18
+ source: str = "" # often dict-like string
19
+ microphone: str = "" # often dict-like string
20
+ array: str = "" # often dict-like string
21
+ metrics: str = "" # often dict-like string
22
+ rng_seed: str = ""
23
+ checksum_sha256: str = "" # optional column added by make_checksums.py
samples/clean.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfee1216b41a63a8910767d3a8410ccdc69ff8a16ef81776d13449353ec679a7
3
+ size 256044
samples/rir_000003.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf57dd545496cfddc21d70226dfed089ea49afdf89ffd8185061bbcc6a61b6b8
3
+ size 285868
samples/rir_000053.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2f6d0e47667b4cab6b196f2edaf11a8f1fb48eb9fb4a8e62d21b38732d06f57
3
+ size 144252
scripts/make_checksums.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # scripts/make_checksums.py
2
+ import argparse, csv, hashlib, pathlib, sys, os
3
+ from collections import defaultdict
4
+
5
+ def sha256(p, buf=1024*1024):
6
+ h = hashlib.sha256()
7
+ with open(p, "rb") as f:
8
+ while chunk := f.read(buf):
9
+ h.update(chunk)
10
+ return h.hexdigest()
11
+
12
+ def norm_header(h: str) -> str:
13
+ return (h or "").strip().lstrip("\ufeff").lower().replace(" ", "_")
14
+
15
+ def norm_path_for_match(p: str) -> str:
16
+ # Normalize separators and remove leading .\ or ./, make POSIX-ish
17
+ q = p.replace("\\", "/")
18
+ while q.startswith("./") or q.startswith(".\\"):
19
+ q = q[2:]
20
+ return q
21
+
22
+ def main():
23
+ ap = argparse.ArgumentParser()
24
+ ap.add_argument("--meta", default="data/metadata/metadata.csv",
25
+ help="Path to metadata.csv")
26
+ ap.add_argument("--audio-root", default="data/audio",
27
+ help="Root folder containing audio files")
28
+ ap.add_argument("--path-col", default=None,
29
+ help="Column name that holds paths (default: auto-detect; prefers 'file_path')")
30
+ ap.add_argument("--checksum-col", default="checksum_sha256",
31
+ help="Column name to write sha256 into")
32
+ args = ap.parse_args()
33
+
34
+ root = pathlib.Path(__file__).resolve().parents[1]
35
+ audio_root = (root / args.audio_root).resolve()
36
+ meta_path = (root / args.meta).resolve()
37
+
38
+ if not meta_path.exists():
39
+ sys.exit(f"[ERR] metadata file not found: {meta_path}")
40
+ if not audio_root.exists():
41
+ print(f"[WARN] audio root not found yet: {audio_root}")
42
+
43
+ # Build map: relative path (POSIX) -> sha256
44
+ print("[INFO] Scanning audio files for checksums…")
45
+ hashmap = {}
46
+ for p in audio_root.rglob("*"):
47
+ if p.suffix.lower() not in {".wav", ".flac"} or not p.is_file():
48
+ continue
49
+ rel = p.relative_to(root).as_posix()
50
+ hashmap[norm_path_for_match(rel)] = sha256(p)
51
+
52
+ # Read CSV with dialect sniffing and normalized headers
53
+ print(f"[INFO] Reading metadata: {meta_path}")
54
+ raw = meta_path.read_text(encoding="utf-8", errors="replace")
55
+ try:
56
+ dialect = csv.Sniffer().sniff(raw.splitlines()[0] if raw else ",")
57
+ except Exception:
58
+ dialect = csv.excel # fallback to comma
59
+
60
+ rows = []
61
+ with open(meta_path, newline="", encoding="utf-8", errors="replace") as f:
62
+ reader = csv.reader(f, dialect)
63
+ try:
64
+ headers = next(reader)
65
+ except StopIteration:
66
+ sys.exit("[ERR] metadata.csv appears empty.")
67
+
68
+ norm_headers = [norm_header(h) for h in headers]
69
+ hdr_map = {norm_header(h): i for i, h in enumerate(headers)}
70
+
71
+ # Choose the path column
72
+ candidate_names = [norm_header(args.path_col)] if args.path_col else [
73
+ "file_path", "filepath", "path", "relative_path", "audio_path", "wav", "rir_path"
74
+ ]
75
+ path_col_norm = next((c for c in candidate_names if c in hdr_map), None)
76
+ if not path_col_norm:
77
+ msg = (f"[ERR] Could not find a path column. Looked for any of: "
78
+ f"{candidate_names}. Available columns: {norm_headers}")
79
+ sys.exit(msg)
80
+
81
+ checksum_col_norm = norm_header(args.checksum_col)
82
+ # If checksum column absent, append it
83
+ if checksum_col_norm not in hdr_map:
84
+ headers.append(args.checksum_col)
85
+ norm_headers.append(checksum_col_norm)
86
+ checksum_idx = len(headers) - 1
87
+ else:
88
+ checksum_idx = hdr_map[checksum_col_norm]
89
+
90
+ path_idx = hdr_map[path_col_norm]
91
+
92
+ # Process rows
93
+ rows.append(headers) # header row for writing back
94
+ for i, row in enumerate(reader, start=1):
95
+ # pad short rows
96
+ if len(row) < len(headers):
97
+ row += [""] * (len(headers) - len(row))
98
+ # Normalize the path for lookup
99
+ csv_path_raw = (row[path_idx] or "").strip()
100
+ if not csv_path_raw:
101
+ print(f"[WARN] row {i}: empty path cell; leaving checksum blank")
102
+ rows.append(row)
103
+ continue
104
+
105
+ # Try multiple lookup strategies
106
+ candidates = []
107
+
108
+ # 1) CSV path as given (normalized)
109
+ candidates.append(norm_path_for_match(csv_path_raw))
110
+
111
+ # 2) If CSV path is absolute, try making it relative to project root
112
+ p = pathlib.Path(csv_path_raw)
113
+ if p.is_absolute():
114
+ try:
115
+ rel = p.relative_to(root).as_posix()
116
+ candidates.append(norm_path_for_match(rel))
117
+ except Exception:
118
+ pass
119
+
120
+ # 3) If CSV path is relative to audio_root
121
+ try:
122
+ rel2 = (audio_root / csv_path_raw).resolve().relative_to(root).as_posix()
123
+ candidates.append(norm_path_for_match(rel2))
124
+ except Exception:
125
+ pass
126
+
127
+ # 4) Fallback: match by basename if unique
128
+ basename = pathlib.Path(csv_path_raw).name
129
+ if basename:
130
+ # build once a reverse index by basename
131
+ pass
132
+
133
+ # deduplicate candidates
134
+ candidates = list(dict.fromkeys(candidates))
135
+
136
+ sha = ""
137
+ for cand in candidates:
138
+ sha = hashmap.get(cand, "")
139
+ if sha:
140
+ break
141
+
142
+ # As a last resort, basename matching (unique)
143
+ if not sha and basename:
144
+ matches = [v for k, v in hashmap.items() if pathlib.Path(k).name == basename]
145
+ if len(matches) == 1:
146
+ sha = matches[0]
147
+
148
+ row[checksum_idx] = sha
149
+ if not sha:
150
+ print(f"[WARN] row {i}: no match for '{csv_path_raw}' (tried {len(candidates)} candidates)")
151
+ rows.append(row)
152
+
153
+ # Write back CSV (same dialect; UTF-8)
154
+ print(f"[INFO] Writing updated metadata with checksums → {meta_path}")
155
+ with open(meta_path, "w", newline="", encoding="utf-8") as f:
156
+ writer = csv.writer(f, dialect)
157
+ writer.writerows(rows)
158
+
159
+ print("[DONE] Checksums inserted. "
160
+ f"Found hashes for ~{sum(1 for r in rows[1:] if r[checksum_idx])} rows.")
161
+
162
+ if __name__ == "__main__":
163
+ main()
scripts/make_mini_subset.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # scripts/make_mini_subset.py
2
+ # Create a tiny subset under data-mini/ for quick demos/Spaces.
3
+ import os, shutil
4
+ from pathlib import Path
5
+ import pandas as pd
6
+
7
+ ROOT = Path(__file__).resolve().parents[1]
8
+ SRC_META = ROOT / "data" / "metadata" / "metadata.csv"
9
+ SRC_DATA = ROOT / "data"
10
+ DST = ROOT / "data-mini"
11
+
12
+ N_TRAIN = 200
13
+ N_VALID = 50
14
+
15
+ def main():
16
+ if not SRC_META.exists():
17
+ raise SystemExit(f"Missing: {SRC_META}")
18
+ df = pd.read_csv(SRC_META)
19
+ if "wav" not in df.columns or "split" not in df.columns:
20
+ raise SystemExit("metadata.csv must have 'wav' and 'split' columns")
21
+
22
+ train = df[df["split"].str.lower()=="train"].sample(min(N_TRAIN, sum(df["split"].str.lower()=="train")), random_state=0)
23
+ valid = df[df["split"].str.lower()=="valid"]
24
+ if len(valid)==0:
25
+ valid = df[df["split"].str.lower()=="train"].drop(train.index).sample(min(N_VALID, len(df)), random_state=0)
26
+ valid = valid.assign(split="valid")
27
+
28
+ sub = pd.concat([train, valid], ignore_index=True).copy()
29
+ if DST.exists(): shutil.rmtree(DST)
30
+ (DST / "metadata").mkdir(parents=True, exist_ok=True)
31
+
32
+ copied = 0
33
+ for _, r in sub.iterrows():
34
+ wav = str(r["wav"]).strip()
35
+ src = (SRC_DATA / wav) if not os.path.isabs(wav) else Path(wav)
36
+ if not src.exists():
37
+ continue
38
+ dest = DST / wav
39
+ dest.parent.mkdir(parents=True, exist_ok=True)
40
+ shutil.copy2(src, dest)
41
+ copied += 1
42
+
43
+ sub.to_csv(DST / "metadata" / "metadata.csv", index=False)
44
+ print(f"Copied {copied} files. Wrote {DST / 'metadata' / 'metadata.csv'}")
45
+
46
+ if __name__ == "__main__":
47
+ main()
scripts/validate_metadata.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # scripts/validate_metadata.py
2
+ import csv, sys, pathlib
3
+
4
+ # Two accepted schemas:
5
+ REQUIRED_A = {"rir_id","file_path","sample_rate","rir_length","method",
6
+ "src_x","src_y","src_z","mic_x","mic_y","mic_z",
7
+ "distance","azimuth_deg","elevation_deg",
8
+ "rt60_t20","rt60_t30","drr_db","room_l","room_w","room_h","vol_m3","split"}
9
+ REQUIRED_B = {"id","split","fs","wav"} # compact schema
10
+
11
+ def find_default_meta():
12
+ here = pathlib.Path(__file__).resolve().parents[1]
13
+ candidates = [
14
+ here / "data" / "metadata" / "metadata.csv",
15
+ here / "data" / "metadata.csv",
16
+ ]
17
+ for p in candidates:
18
+ if p.exists():
19
+ return p
20
+ return None
21
+
22
+ def validate(path: pathlib.Path):
23
+ with path.open(newline="", encoding="utf-8", errors="replace") as f:
24
+ r = csv.DictReader(f)
25
+ headers = { (h or "").strip().lstrip("\ufeff").lower() for h in (r.fieldnames or []) }
26
+ if REQUIRED_A.issubset(headers):
27
+ schema = "A"
28
+ elif REQUIRED_B.issubset(headers):
29
+ schema = "B"
30
+ else:
31
+ missingA = sorted(REQUIRED_A - headers)
32
+ missingB = sorted(REQUIRED_B - headers)
33
+ raise SystemExit(
34
+ "metadata.csv does not match a known schema.\n"
35
+ f"- Missing for Schema A: {missingA}\n"
36
+ f"- Missing for Schema B: {missingB}\n"
37
+ f"Headers seen: {sorted(headers)}"
38
+ )
39
+
40
+ for i,row in enumerate(r, 1):
41
+ if schema == "A":
42
+ if not (row.get("file_path") or "").strip():
43
+ raise SystemExit(f"row {i}: empty file_path")
44
+ try:
45
+ if int(float(row["sample_rate"])) <= 0:
46
+ raise ValueError
47
+ except Exception:
48
+ raise SystemExit(f"row {i}: invalid sample_rate -> {row.get('sample_rate')}")
49
+ try:
50
+ if int(float(row["rir_length"])) <= 0:
51
+ raise ValueError
52
+ except Exception:
53
+ raise SystemExit(f"row {i}: invalid rir_length -> {row.get('rir_length')}")
54
+ else:
55
+ if not (row.get("wav") or "").strip():
56
+ raise SystemExit(f"row {i}: empty wav path")
57
+ try:
58
+ if int(float(row["fs"])) <= 0:
59
+ raise ValueError
60
+ except Exception:
61
+ raise SystemExit(f"row {i}: invalid fs -> {row.get('fs')}")
62
+
63
+ print(f"metadata OK ✅ ({schema=}) @ {path}")
64
+
65
+ if __name__ == "__main__":
66
+ # Accept optional path; fallback to default
67
+ p = None
68
+ if len(sys.argv) >= 2 and sys.argv[1].strip():
69
+ p = pathlib.Path(sys.argv[1]).expanduser().resolve()
70
+ if not p.exists():
71
+ raise SystemExit(f"[ERR] Not found: {p}")
72
+ else:
73
+ p = find_default_meta()
74
+ if p is None:
75
+ raise SystemExit("Usage: python scripts/validate_metadata.py <path/to/metadata.csv>\n"
76
+ "Could not auto-find data/metadata/metadata.csv")
77
+ validate(p)
space_app/README.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # RIR-Mega Space App (mini baseline)
2
+
3
+ - Copy this folder's contents into a new Hugging Face **Space** (SDK: Gradio).
4
+ - Also upload a tiny `data-mini/` next to `app.py` in the Space repo (or set `RIRMEGA_DATA_DIR`).
5
+ - Press "Run baseline" to compute MAE/RMSE on the mini subset.
space_app/app.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Simple Gradio app to run the RT60 baseline on a mini subset.
2
+ import os, json, ast
3
+ from pathlib import Path
4
+ import gradio as gr
5
+ import numpy as np
6
+ import pandas as pd
7
+ import soundfile as sf
8
+ from sklearn.ensemble import RandomForestRegressor
9
+
10
+ ROOT = Path(__file__).resolve().parents[1]
11
+ DATA_ROOT = Path(os.getenv("RIRMEGA_DATA_DIR", ROOT / "data-mini"))
12
+ META = DATA_ROOT / "metadata" / "metadata.csv"
13
+
14
+ RT60_KEYS_ORDER = ["rt60","drr_db","c50_db","c80_db",
15
+ "band_rt60s.125","band_rt60s.250","band_rt60s.500",
16
+ "band_rt60s.1000","band_rt60s.2000","band_rt60s.4000"]
17
+
18
+ def _parse_metrics(s):
19
+ if s is None: return {}
20
+ s = str(s).strip()
21
+ if not s: return {}
22
+ for fn in (json.loads, ast.literal_eval):
23
+ try:
24
+ v = fn(s)
25
+ if isinstance(v, dict): return v
26
+ except Exception: pass
27
+ return {}
28
+
29
+ def _deep(d,k):
30
+ cur = d
31
+ for part in k.split("."):
32
+ if not isinstance(cur, dict) or part not in cur: return None
33
+ cur = cur[part]
34
+ return cur
35
+
36
+ def feats(path: Path):
37
+ y, sr = sf.read(str(path), dtype="float32", always_2d=False)
38
+ import numpy as np
39
+ if isinstance(y, np.ndarray) and y.ndim > 1: y = y[:,0]
40
+ y = y.astype(np.float32, copy=False)
41
+ y /= (np.max(np.abs(y)) + 1e-9)
42
+ e = np.abs(y)
43
+ e_mean, e_std = float(e.mean()), float(e.std())
44
+ e_skew = float((np.mean(((e - e_mean) / (e_std + 1e-9)) ** 3)))
45
+ e_kurt = float((np.mean(((e - e_mean) / (e_std + 1e-9)) ** 4)))
46
+ ce = np.cumsum(y[::-1] ** 2)[::-1] + 1e-12
47
+ edc_db = 10*np.log10(ce/ce[0]); n=len(edc_db); i1=int(0.05*n); i2=max(int(0.35*n), i1+5)
48
+ slope = float(np.polyfit(np.arange(i1,i2), edc_db[i1:i2], 1)[0])
49
+ Y=np.fft.rfft(y); mag=np.abs(Y); idx=np.arange(len(mag))
50
+ centroid = float((idx*mag).sum()/(mag.sum()+1e-9))
51
+ return np.array([e_mean,e_std,e_skew,e_kurt,slope,centroid], dtype=np.float32)
52
+
53
+ def run_baseline(target_key):
54
+ if not META.exists():
55
+ return "No metadata found."
56
+ df = pd.read_csv(META)
57
+ if "split" not in df.columns or "wav" not in df.columns or "metrics" not in df.columns:
58
+ return "metadata.csv missing columns."
59
+ df["split"] = df["split"].astype(str).str.lower()
60
+ tr = df[df["split"]=="train"]
61
+ va = df[df["split"]=="valid"]
62
+ if len(va)==0:
63
+ va = tr.sample(frac=0.25, random_state=0)
64
+ tr = tr.drop(va.index)
65
+
66
+ def build(d):
67
+ import numpy as np
68
+ X=[]; y=[]
69
+ for _,r in d.iterrows():
70
+ dct = _parse_metrics(r["metrics"])
71
+ val = _deep(dct, target_key) if "." in target_key else dct.get(target_key)
72
+ if val is None: continue
73
+ p = Path(r["wav"])
74
+ p = p if p.is_absolute() else (DATA_ROOT / p)
75
+ if not p.exists(): continue
76
+ X.append(feats(p)); y.append(float(val))
77
+ if not X: return None, None
78
+ return np.stack(X), np.array(y, dtype=np.float32)
79
+
80
+ Xtr,ytr = build(tr)
81
+ Xva,yva = build(va)
82
+ if Xtr is None or Xva is None:
83
+ return "No usable samples for chosen target."
84
+ m = RandomForestRegressor(n_estimators=300, random_state=0).fit(Xtr,ytr)
85
+ pred = m.predict(Xva)
86
+ import numpy as np
87
+ mae = float(np.mean(np.abs(yva-pred)))
88
+ rmse = float(np.sqrt(np.mean((yva-pred)**2)))
89
+ return f"MAE={mae:.4f}s RMSE={rmse:.4f}s (n_train={len(Xtr)}, n_valid={len(Xva)})"
90
+
91
+ import gradio as gr
92
+ with gr.Blocks() as demo:
93
+ gr.Markdown("# RIR-Mega: RT60 Baseline (mini)")
94
+ target = gr.Dropdown(choices=RT60_KEYS_ORDER, value="rt60", label="Target key in metrics")
95
+ out = gr.Markdown()
96
+ btn = gr.Button("Run baseline")
97
+ btn.click(run_baseline, [target], [out])
98
+
99
+ if __name__ == "__main__":
100
+ demo.launch()
space_app/requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ gradio>=4.0.0
2
+ scikit-learn
3
+ numpy
4
+ pandas
5
+ soundfile