Datasets:
File size: 6,247 Bytes
ee24db9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 |
# scripts/make_checksums.py
import argparse, csv, hashlib, pathlib, sys, os
from collections import defaultdict
def sha256(p, buf=1024*1024):
h = hashlib.sha256()
with open(p, "rb") as f:
while chunk := f.read(buf):
h.update(chunk)
return h.hexdigest()
def norm_header(h: str) -> str:
return (h or "").strip().lstrip("\ufeff").lower().replace(" ", "_")
def norm_path_for_match(p: str) -> str:
# Normalize separators and remove leading .\ or ./, make POSIX-ish
q = p.replace("\\", "/")
while q.startswith("./") or q.startswith(".\\"):
q = q[2:]
return q
def main():
ap = argparse.ArgumentParser()
ap.add_argument("--meta", default="data/metadata/metadata.csv",
help="Path to metadata.csv")
ap.add_argument("--audio-root", default="data/audio",
help="Root folder containing audio files")
ap.add_argument("--path-col", default=None,
help="Column name that holds paths (default: auto-detect; prefers 'file_path')")
ap.add_argument("--checksum-col", default="checksum_sha256",
help="Column name to write sha256 into")
args = ap.parse_args()
root = pathlib.Path(__file__).resolve().parents[1]
audio_root = (root / args.audio_root).resolve()
meta_path = (root / args.meta).resolve()
if not meta_path.exists():
sys.exit(f"[ERR] metadata file not found: {meta_path}")
if not audio_root.exists():
print(f"[WARN] audio root not found yet: {audio_root}")
# Build map: relative path (POSIX) -> sha256
print("[INFO] Scanning audio files for checksums…")
hashmap = {}
for p in audio_root.rglob("*"):
if p.suffix.lower() not in {".wav", ".flac"} or not p.is_file():
continue
rel = p.relative_to(root).as_posix()
hashmap[norm_path_for_match(rel)] = sha256(p)
# Read CSV with dialect sniffing and normalized headers
print(f"[INFO] Reading metadata: {meta_path}")
raw = meta_path.read_text(encoding="utf-8", errors="replace")
try:
dialect = csv.Sniffer().sniff(raw.splitlines()[0] if raw else ",")
except Exception:
dialect = csv.excel # fallback to comma
rows = []
with open(meta_path, newline="", encoding="utf-8", errors="replace") as f:
reader = csv.reader(f, dialect)
try:
headers = next(reader)
except StopIteration:
sys.exit("[ERR] metadata.csv appears empty.")
norm_headers = [norm_header(h) for h in headers]
hdr_map = {norm_header(h): i for i, h in enumerate(headers)}
# Choose the path column
candidate_names = [norm_header(args.path_col)] if args.path_col else [
"file_path", "filepath", "path", "relative_path", "audio_path", "wav", "rir_path"
]
path_col_norm = next((c for c in candidate_names if c in hdr_map), None)
if not path_col_norm:
msg = (f"[ERR] Could not find a path column. Looked for any of: "
f"{candidate_names}. Available columns: {norm_headers}")
sys.exit(msg)
checksum_col_norm = norm_header(args.checksum_col)
# If checksum column absent, append it
if checksum_col_norm not in hdr_map:
headers.append(args.checksum_col)
norm_headers.append(checksum_col_norm)
checksum_idx = len(headers) - 1
else:
checksum_idx = hdr_map[checksum_col_norm]
path_idx = hdr_map[path_col_norm]
# Process rows
rows.append(headers) # header row for writing back
for i, row in enumerate(reader, start=1):
# pad short rows
if len(row) < len(headers):
row += [""] * (len(headers) - len(row))
# Normalize the path for lookup
csv_path_raw = (row[path_idx] or "").strip()
if not csv_path_raw:
print(f"[WARN] row {i}: empty path cell; leaving checksum blank")
rows.append(row)
continue
# Try multiple lookup strategies
candidates = []
# 1) CSV path as given (normalized)
candidates.append(norm_path_for_match(csv_path_raw))
# 2) If CSV path is absolute, try making it relative to project root
p = pathlib.Path(csv_path_raw)
if p.is_absolute():
try:
rel = p.relative_to(root).as_posix()
candidates.append(norm_path_for_match(rel))
except Exception:
pass
# 3) If CSV path is relative to audio_root
try:
rel2 = (audio_root / csv_path_raw).resolve().relative_to(root).as_posix()
candidates.append(norm_path_for_match(rel2))
except Exception:
pass
# 4) Fallback: match by basename if unique
basename = pathlib.Path(csv_path_raw).name
if basename:
# build once a reverse index by basename
pass
# deduplicate candidates
candidates = list(dict.fromkeys(candidates))
sha = ""
for cand in candidates:
sha = hashmap.get(cand, "")
if sha:
break
# As a last resort, basename matching (unique)
if not sha and basename:
matches = [v for k, v in hashmap.items() if pathlib.Path(k).name == basename]
if len(matches) == 1:
sha = matches[0]
row[checksum_idx] = sha
if not sha:
print(f"[WARN] row {i}: no match for '{csv_path_raw}' (tried {len(candidates)} candidates)")
rows.append(row)
# Write back CSV (same dialect; UTF-8)
print(f"[INFO] Writing updated metadata with checksums → {meta_path}")
with open(meta_path, "w", newline="", encoding="utf-8") as f:
writer = csv.writer(f, dialect)
writer.writerows(rows)
print("[DONE] Checksums inserted. "
f"Found hashes for ~{sum(1 for r in rows[1:] if r[checksum_idx])} rows.")
if __name__ == "__main__":
main()
|