sourav-das commited on
Commit
7dfae77
·
verified ·
1 Parent(s): 16a1520

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .claude/settings.local.json +7 -0
  2. .dockerignore +6 -0
  3. .gitattributes +12 -0
  4. Dockerfile +29 -0
  5. README.md +4 -4
  6. WORKFLOWS.md +11 -0
  7. backend/__init__.py +0 -0
  8. backend/__pycache__/__init__.cpython-313.pyc +0 -0
  9. backend/__pycache__/file_manager.cpython-313.pyc +0 -0
  10. backend/__pycache__/main.cpython-313.pyc +0 -0
  11. backend/__pycache__/separator.cpython-313.pyc +0 -0
  12. backend/__pycache__/source_import.cpython-313.pyc +0 -0
  13. backend/__pycache__/task_queue.cpython-313.pyc +0 -0
  14. backend/examples/default/Bass.mp3 +3 -0
  15. backend/examples/default/Drums.mp3 +3 -0
  16. backend/examples/default/Guitar.mp3 +3 -0
  17. backend/examples/default/Other.mp3 +3 -0
  18. backend/examples/default/Piano.mp3 +3 -0
  19. backend/examples/default/README.md +11 -0
  20. backend/examples/default/Vocals.mp3 +3 -0
  21. backend/examples/default/original.mp3 +3 -0
  22. backend/file_manager.py +124 -0
  23. backend/main.py +271 -0
  24. backend/requirements.txt +11 -0
  25. backend/separator.py +248 -0
  26. backend/source_import.py +293 -0
  27. backend/task_queue.py +100 -0
  28. frontend/dist/assets/index-CiF4mW7R.js +0 -0
  29. frontend/dist/assets/index-P-4oQdbx.css +1 -0
  30. frontend/dist/index.html +14 -0
  31. frontend/index.html +13 -0
  32. frontend/node_modules/.bin/baseline-browser-mapping +3 -0
  33. frontend/node_modules/.bin/baseline-browser-mapping.cmd +3 -0
  34. frontend/node_modules/.bin/baseline-browser-mapping.ps1 +3 -0
  35. frontend/node_modules/.bin/browserslist +3 -0
  36. frontend/node_modules/.bin/browserslist.cmd +3 -0
  37. frontend/node_modules/.bin/browserslist.ps1 +3 -0
  38. frontend/node_modules/.bin/esbuild +3 -0
  39. frontend/node_modules/.bin/esbuild.cmd +3 -0
  40. frontend/node_modules/.bin/esbuild.ps1 +3 -0
  41. frontend/node_modules/.bin/jiti +3 -0
  42. frontend/node_modules/.bin/jiti.cmd +3 -0
  43. frontend/node_modules/.bin/jiti.ps1 +3 -0
  44. frontend/node_modules/.bin/jsesc +3 -0
  45. frontend/node_modules/.bin/jsesc.cmd +3 -0
  46. frontend/node_modules/.bin/jsesc.ps1 +3 -0
  47. frontend/node_modules/.bin/json5 +3 -0
  48. frontend/node_modules/.bin/json5.cmd +3 -0
  49. frontend/node_modules/.bin/json5.ps1 +3 -0
  50. frontend/node_modules/.bin/nanoid +3 -0
.claude/settings.local.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "permissions": {
3
+ "allow": [
4
+ "Bash(docker run:*)"
5
+ ]
6
+ }
7
+ }
.dockerignore ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ frontend/node_modules
2
+ frontend/dist
3
+ **/__pycache__
4
+ *.pyc
5
+ .git
6
+ plan.md
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ backend/examples/default/Bass.mp3 filter=lfs diff=lfs merge=lfs -text
37
+ backend/examples/default/Drums.mp3 filter=lfs diff=lfs merge=lfs -text
38
+ backend/examples/default/Guitar.mp3 filter=lfs diff=lfs merge=lfs -text
39
+ backend/examples/default/original.mp3 filter=lfs diff=lfs merge=lfs -text
40
+ backend/examples/default/Other.mp3 filter=lfs diff=lfs merge=lfs -text
41
+ backend/examples/default/Piano.mp3 filter=lfs diff=lfs merge=lfs -text
42
+ backend/examples/default/Vocals.mp3 filter=lfs diff=lfs merge=lfs -text
43
+ frontend/node_modules/@esbuild/win32-x64/esbuild.exe filter=lfs diff=lfs merge=lfs -text
44
+ frontend/node_modules/@rollup/rollup-win32-x64-gnu/rollup.win32-x64-gnu.node filter=lfs diff=lfs merge=lfs -text
45
+ frontend/node_modules/@rollup/rollup-win32-x64-msvc/rollup.win32-x64-msvc.node filter=lfs diff=lfs merge=lfs -text
46
+ frontend/node_modules/@tailwindcss/oxide-win32-x64-msvc/tailwindcss-oxide.win32-x64-msvc.node filter=lfs diff=lfs merge=lfs -text
47
+ frontend/node_modules/lightningcss-win32-x64-msvc/lightningcss.win32-x64-msvc.node filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Stage 1: Build frontend
2
+ FROM node:20-slim AS frontend-build
3
+ WORKDIR /app/frontend
4
+ COPY frontend/package.json frontend/package-lock.json ./
5
+ RUN npm ci
6
+ COPY frontend/ ./
7
+ RUN npm run build
8
+
9
+ # Stage 2: Runtime
10
+ FROM python:3.11-slim
11
+
12
+ RUN apt-get update && apt-get install -y --no-install-recommends \
13
+ ffmpeg gcc libc6-dev && rm -rf /var/lib/apt/lists/*
14
+
15
+ WORKDIR /app
16
+
17
+ COPY backend/requirements.txt backend/requirements.txt
18
+ RUN pip install --no-cache-dir -r backend/requirements.txt
19
+
20
+ COPY --from=frontend-build /app/frontend/dist frontend/dist
21
+ COPY backend/ backend/
22
+
23
+ EXPOSE 7860
24
+
25
+ RUN useradd -m -u 1000 user
26
+ USER user
27
+ ENV HOME=/home/user PATH=/home/user/.local/bin:$PATH
28
+
29
+ CMD ["python", "-m", "uvicorn", "backend.main:app", "--host", "0.0.0.0", "--port", "7860"]
README.md CHANGED
@@ -1,10 +1,10 @@
1
  ---
2
  title: Stem Separator
3
- emoji: 👁
4
  colorFrom: purple
5
- colorTo: yellow
6
  sdk: docker
 
7
  pinned: false
 
8
  ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: Stem Separator
3
+ emoji: "\U0001F3B5"
4
  colorFrom: purple
5
+ colorTo: pink
6
  sdk: docker
7
+ app_port: 7860
8
  pinned: false
9
+ license: mit
10
  ---
 
 
WORKFLOWS.md ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # stem-separator development workflows
2
+
3
+ # to run locally
4
+
5
+ ```bash
6
+ docker build -t stem-separator .
7
+ docker run -p 7860:7860 stem-separator // for cpu only (slower)
8
+ OR
9
+ docker run --gpus all -p 7860:7860 stem-separator // for gpu enabled (much faster)
10
+ ```
11
+ Then open http://localhost:7860
backend/__init__.py ADDED
File without changes
backend/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (116 Bytes). View file
 
backend/__pycache__/file_manager.cpython-313.pyc ADDED
Binary file (6.39 kB). View file
 
backend/__pycache__/main.cpython-313.pyc ADDED
Binary file (13.6 kB). View file
 
backend/__pycache__/separator.cpython-313.pyc ADDED
Binary file (12.1 kB). View file
 
backend/__pycache__/source_import.cpython-313.pyc ADDED
Binary file (13.2 kB). View file
 
backend/__pycache__/task_queue.cpython-313.pyc ADDED
Binary file (4.42 kB). View file
 
backend/examples/default/Bass.mp3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ade21887af48358d366ae8d7949d221a1ce010e92146fa0079c7d682ad8ba32
3
+ size 567422
backend/examples/default/Drums.mp3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c79ed43997db1b848b1103fe361fa48dfc5598f48bfc20437ef2051f192d7ac
3
+ size 567422
backend/examples/default/Guitar.mp3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fa01630da476372fb4f72550e61a6ff5bad594f337941f7e700d924b56494ce
3
+ size 567422
backend/examples/default/Other.mp3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f303284a10ffcd3c88b9e502234404de5b9befd60639de55b1574b5bf3407bb
3
+ size 567422
backend/examples/default/Piano.mp3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24401442c2da04b20cf794e39b480f8275cfc367d3659f1a45e1b2a49901af88
3
+ size 567422
backend/examples/default/README.md ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Place the bundled example audio files for the demo flow in this directory.
2
+
3
+ Expected filenames:
4
+
5
+ - `original.mp3`
6
+ - `Vocals.mp3`
7
+ - `Drums.mp3`
8
+ - `Bass.mp3`
9
+ - `Guitar.mp3`
10
+ - `Piano.mp3`
11
+ - `Other.mp3`
backend/examples/default/Vocals.mp3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba2bd33b2e66055c21f75d1058627de88783456c9c7d54a62a598888407637da
3
+ size 567422
backend/examples/default/original.mp3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efb3d5f483fa75556f22abd1470fbffd425106c8907707166e79c4a45a16ea19
3
+ size 378252
backend/file_manager.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import time
4
+ import uuid
5
+ import asyncio
6
+ import json
7
+ from pathlib import Path
8
+
9
+ BASE_DIR = Path("/tmp/stem-sep")
10
+ MAX_AGE_SECONDS = 30 * 60 # 30 minutes
11
+ CLEANUP_INTERVAL = 5 * 60 # 5 minutes
12
+
13
+
14
+ def create_job() -> str:
15
+ job_id = uuid.uuid4().hex[:12]
16
+ job_dir = BASE_DIR / job_id
17
+ job_dir.mkdir(parents=True, exist_ok=True)
18
+ # Touch a marker so we can track creation time
19
+ (job_dir / ".created").write_text(str(time.time()))
20
+ return job_id
21
+
22
+
23
+ def get_job_dir(job_id: str) -> Path:
24
+ return BASE_DIR / job_id
25
+
26
+
27
+ def save_upload(job_id: str, filename: str, content: bytes) -> Path:
28
+ job_dir = get_job_dir(job_id)
29
+ ext = Path(filename).suffix or ".wav"
30
+ input_path = job_dir / f"input{ext}"
31
+ input_path.write_bytes(content)
32
+ return input_path
33
+
34
+
35
+ def save_imported_audio(job_id: str, source_filename: str, content: bytes) -> Path:
36
+ return save_upload(job_id, source_filename, content)
37
+
38
+
39
+ def get_input_file(job_id: str) -> Path | None:
40
+ job_dir = get_job_dir(job_id)
41
+ if not job_dir.exists():
42
+ return None
43
+
44
+ for file in job_dir.iterdir():
45
+ if file.is_file() and file.name.startswith("input"):
46
+ return file
47
+ return None
48
+
49
+
50
+ def get_metadata_path(job_id: str) -> Path:
51
+ return get_job_dir(job_id) / "metadata.json"
52
+
53
+
54
+ def save_job_metadata(job_id: str, metadata: dict) -> Path:
55
+ path = get_metadata_path(job_id)
56
+ path.write_text(json.dumps(metadata, indent=2), encoding="utf-8")
57
+ return path
58
+
59
+
60
+ def load_job_metadata(job_id: str) -> dict | None:
61
+ path = get_metadata_path(job_id)
62
+ if not path.exists():
63
+ return None
64
+ return json.loads(path.read_text(encoding="utf-8"))
65
+
66
+
67
+ def get_output_dir(job_id: str) -> Path:
68
+ out = get_job_dir(job_id) / "stems"
69
+ out.mkdir(parents=True, exist_ok=True)
70
+ return out
71
+
72
+
73
+ def list_stem_files(job_id: str) -> dict[str, str]:
74
+ """Find all stem audio files, checking stems subdir and job root."""
75
+ result = {}
76
+ audio_exts = {".wav", ".mp3", ".flac", ".aac"}
77
+ for check_dir in [get_output_dir(job_id), get_job_dir(job_id)]:
78
+ if not check_dir.exists():
79
+ continue
80
+ for f in check_dir.iterdir():
81
+ if f.is_file() and f.suffix.lower() in audio_exts and not f.name.startswith("input") and f.stem not in result:
82
+ result[f.stem] = f.name
83
+ return result
84
+
85
+
86
+ def get_file_path(job_id: str, filename: str) -> Path | None:
87
+ # Check stems dir first, then job root
88
+ stem_path = get_output_dir(job_id) / filename
89
+ if stem_path.exists():
90
+ return stem_path
91
+ root_path = get_job_dir(job_id) / filename
92
+ if root_path.exists():
93
+ return root_path
94
+ return None
95
+
96
+
97
+ def delete_job(job_id: str):
98
+ job_dir = get_job_dir(job_id)
99
+ if job_dir.exists():
100
+ shutil.rmtree(job_dir, ignore_errors=True)
101
+
102
+
103
+ async def cleanup_loop():
104
+ """Background task: delete job dirs older than MAX_AGE_SECONDS."""
105
+ while True:
106
+ await asyncio.sleep(CLEANUP_INTERVAL)
107
+ try:
108
+ if not BASE_DIR.exists():
109
+ continue
110
+ now = time.time()
111
+ for entry in BASE_DIR.iterdir():
112
+ if not entry.is_dir():
113
+ continue
114
+ marker = entry / ".created"
115
+ if marker.exists():
116
+ created = float(marker.read_text().strip())
117
+ if now - created > MAX_AGE_SECONDS:
118
+ shutil.rmtree(entry, ignore_errors=True)
119
+ else:
120
+ # No marker — use mtime
121
+ if now - entry.stat().st_mtime > MAX_AGE_SECONDS:
122
+ shutil.rmtree(entry, ignore_errors=True)
123
+ except Exception:
124
+ pass
backend/main.py ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import io
3
+ import mimetypes
4
+ import zipfile
5
+ from pathlib import Path
6
+ from typing import Literal
7
+
8
+ from fastapi import FastAPI, UploadFile, File, HTTPException
9
+ from fastapi.responses import FileResponse, StreamingResponse
10
+ from fastapi.staticfiles import StaticFiles
11
+ from pydantic import BaseModel
12
+ from sse_starlette.sse import EventSourceResponse
13
+
14
+ from backend import file_manager
15
+ from backend.source_import import SourceImportError, import_source
16
+ from backend.task_queue import enqueue_job, get_job_progress, worker_loop, jobs
17
+
18
+ import json
19
+
20
+ app = FastAPI(title="Stem Separator")
21
+
22
+ MAX_UPLOAD_SIZE = 100 * 1024 * 1024 # 100MB
23
+ EXAMPLE_NAME = "default"
24
+ EXAMPLE_DIR = Path(__file__).parent / "examples" / EXAMPLE_NAME
25
+ EXAMPLE_ORIGINAL_FILE = "original.mp3"
26
+ EXAMPLE_STEM_FILES = {
27
+ "Vocals": "Vocals.mp3",
28
+ "Drums": "Drums.mp3",
29
+ "Bass": "Bass.mp3",
30
+ "Guitar": "Guitar.mp3",
31
+ "Piano": "Piano.mp3",
32
+ "Other": "Other.mp3",
33
+ }
34
+
35
+
36
+ # --- Startup / Shutdown ---
37
+
38
+ @app.on_event("startup")
39
+ async def startup():
40
+ asyncio.create_task(worker_loop())
41
+ asyncio.create_task(file_manager.cleanup_loop())
42
+
43
+
44
+ # --- API Routes ---
45
+
46
+ @app.post("/api/upload")
47
+ async def upload_file(file: UploadFile = File(...)):
48
+ content = await file.read()
49
+ if len(content) > MAX_UPLOAD_SIZE:
50
+ raise HTTPException(413, "File too large (max 100MB)")
51
+
52
+ allowed_ext = {".wav", ".mp3", ".flac", ".ogg", ".m4a", ".aac", ".wma", ".opus"}
53
+ ext = Path(file.filename or "file.wav").suffix.lower()
54
+ if ext not in allowed_ext:
55
+ raise HTTPException(400, f"Unsupported format: {ext}")
56
+
57
+ job_id = file_manager.create_job()
58
+ file_manager.save_upload(job_id, file.filename or "input.wav", content)
59
+
60
+ return {"job_id": job_id, "filename": file.filename}
61
+
62
+
63
+ class SeparateRequest(BaseModel):
64
+ job_id: str
65
+ stems: list[str]
66
+ output_format: Literal["wav", "mp3", "aac"] = "wav"
67
+
68
+
69
+ class ImportUrlRequest(BaseModel):
70
+ url: str
71
+
72
+
73
+ @app.post("/api/separate")
74
+ async def separate(req: SeparateRequest):
75
+ job_dir = file_manager.get_job_dir(req.job_id)
76
+ if not job_dir.exists():
77
+ raise HTTPException(404, "Job not found")
78
+
79
+ valid_stems = {"Vocals", "Drums", "Bass", "Guitar", "Piano", "Other"}
80
+ requested = [s for s in req.stems if s in valid_stems]
81
+ if not requested:
82
+ raise HTTPException(400, "No valid stems selected")
83
+
84
+ ok = await enqueue_job(req.job_id, requested, req.output_format)
85
+ if not ok:
86
+ raise HTTPException(429, "Queue full, try again later")
87
+
88
+ return {"status": "queued", "job_id": req.job_id}
89
+
90
+
91
+ @app.post("/api/import-url")
92
+ async def import_url(req: ImportUrlRequest):
93
+ try:
94
+ imported = await asyncio.to_thread(import_source, req.url)
95
+ except SourceImportError as exc:
96
+ raise HTTPException(400, str(exc)) from exc
97
+ except Exception as exc:
98
+ raise HTTPException(500, f"Failed to import source: {exc}") from exc
99
+
100
+ return {
101
+ "job_id": imported.job_id,
102
+ "filename": imported.filename,
103
+ "source_url": imported.source_url,
104
+ "resolved_url": imported.resolved_url,
105
+ "title": imported.title,
106
+ "platform": imported.platform,
107
+ }
108
+
109
+
110
+ @app.get("/api/progress/{job_id}")
111
+ async def progress_stream(job_id: str):
112
+ async def event_generator():
113
+ last_sent = None
114
+ while True:
115
+ prog = get_job_progress(job_id)
116
+ if prog is None:
117
+ yield {"data": json.dumps({"state": "unknown", "progress": 0, "message": "Job not found"})}
118
+ return
119
+
120
+ current = {
121
+ "state": prog.state,
122
+ "progress": prog.progress,
123
+ "message": prog.message,
124
+ }
125
+ if prog.stems is not None:
126
+ current["stems"] = prog.stems
127
+ if prog.error is not None:
128
+ current["error"] = prog.error
129
+
130
+ # Only send if changed
131
+ current_json = json.dumps(current)
132
+ if current_json != last_sent:
133
+ yield {"data": current_json}
134
+ last_sent = current_json
135
+
136
+ if prog.state in ("done", "error"):
137
+ return
138
+
139
+ await asyncio.sleep(0.3)
140
+
141
+ return EventSourceResponse(event_generator(), ping=10)
142
+
143
+
144
+ @app.get("/api/audio/{job_id}/{filename}")
145
+ async def serve_audio(job_id: str, filename: str):
146
+ path = file_manager.get_file_path(job_id, filename)
147
+ if path is None:
148
+ raise HTTPException(404, "File not found")
149
+ return build_file_response(path, inline=True)
150
+
151
+
152
+ @app.get("/api/download/{job_id}/all")
153
+ async def download_all(job_id: str):
154
+ stems = file_manager.list_stem_files(job_id)
155
+ if not stems:
156
+ raise HTTPException(404, "No stems found")
157
+
158
+ buf = io.BytesIO()
159
+ with zipfile.ZipFile(buf, "w", zipfile.ZIP_DEFLATED) as zf:
160
+ for stem_name, filename in stems.items():
161
+ filepath = file_manager.get_file_path(job_id, filename)
162
+ if filepath and filepath.exists():
163
+ zf.write(filepath, filename)
164
+ buf.seek(0)
165
+
166
+ return StreamingResponse(
167
+ buf,
168
+ media_type="application/zip",
169
+ headers={"Content-Disposition": "attachment; filename=stems.zip"},
170
+ )
171
+
172
+
173
+ @app.get("/api/download/{job_id}/{filename}")
174
+ async def download_stem(job_id: str, filename: str):
175
+ path = file_manager.get_file_path(job_id, filename)
176
+ if path is None:
177
+ raise HTTPException(404, "File not found")
178
+ return build_file_response(path, download_name=filename)
179
+
180
+
181
+ @app.delete("/api/job/{job_id}")
182
+ async def delete_job(job_id: str):
183
+ file_manager.delete_job(job_id)
184
+ jobs.pop(job_id, None)
185
+ return {"status": "deleted"}
186
+
187
+
188
+ @app.get("/api/examples/default")
189
+ async def get_example_output():
190
+ original_path = get_example_file(EXAMPLE_ORIGINAL_FILE, allow_original=True)
191
+ stems = []
192
+ for stem_name, filename in EXAMPLE_STEM_FILES.items():
193
+ stem_path = get_example_file(filename)
194
+ stems.append(
195
+ {
196
+ "name": stem_name,
197
+ "filename": stem_path.name,
198
+ "audioUrl": f"/api/examples/{EXAMPLE_NAME}/audio/{stem_path.name}",
199
+ "downloadUrl": f"/api/examples/{EXAMPLE_NAME}/download/{stem_path.name}",
200
+ }
201
+ )
202
+
203
+ return {
204
+ "original": {
205
+ "filename": original_path.name,
206
+ "audioUrl": f"/api/examples/{EXAMPLE_NAME}/audio/{original_path.name}",
207
+ },
208
+ "stems": stems,
209
+ "downloadAllUrl": f"/api/examples/{EXAMPLE_NAME}/download/all",
210
+ }
211
+
212
+
213
+ @app.get("/api/examples/default/audio/{filename}")
214
+ async def serve_example_audio(filename: str):
215
+ path = get_example_file(filename, allow_original=True)
216
+ return build_file_response(path, inline=True)
217
+
218
+
219
+ @app.get("/api/examples/default/download/all")
220
+ async def download_all_example_stems():
221
+ buf = io.BytesIO()
222
+ with zipfile.ZipFile(buf, "w", zipfile.ZIP_DEFLATED) as zf:
223
+ for filename in EXAMPLE_STEM_FILES.values():
224
+ path = get_example_file(filename)
225
+ zf.write(path, path.name)
226
+ buf.seek(0)
227
+
228
+ return StreamingResponse(
229
+ buf,
230
+ media_type="application/zip",
231
+ headers={"Content-Disposition": "attachment; filename=example-stems.zip"},
232
+ )
233
+
234
+
235
+ @app.get("/api/examples/default/download/{filename}")
236
+ async def download_example_file(filename: str):
237
+ path = get_example_file(filename)
238
+ return build_file_response(path, download_name=path.name)
239
+
240
+
241
+ # --- Static Files (React build) - MUST be last ---
242
+
243
+ frontend_dist = Path(__file__).parent.parent / "frontend" / "dist"
244
+ if frontend_dist.exists():
245
+ app.mount("/", StaticFiles(directory=str(frontend_dist), html=True), name="frontend")
246
+
247
+
248
+ def build_file_response(path: Path, inline: bool = False, download_name: str | None = None) -> FileResponse:
249
+ media_type = mimetypes.guess_type(path.name)[0] or "application/octet-stream"
250
+ headers = {"Accept-Ranges": "bytes"}
251
+ if download_name is not None:
252
+ headers["Content-Disposition"] = f'attachment; filename="{download_name}"'
253
+ elif inline:
254
+ headers["Content-Disposition"] = f'inline; filename="{path.name}"'
255
+
256
+ return FileResponse(path, media_type=media_type, headers=headers)
257
+
258
+
259
+ def get_example_file(filename: str, allow_original: bool = False) -> Path:
260
+ valid_files = set(EXAMPLE_STEM_FILES.values())
261
+ if allow_original:
262
+ valid_files.add(EXAMPLE_ORIGINAL_FILE)
263
+
264
+ if filename not in valid_files:
265
+ raise HTTPException(404, "Example file not found")
266
+
267
+ path = EXAMPLE_DIR / filename
268
+ if not path.exists():
269
+ raise HTTPException(404, f"Missing example asset: {filename}")
270
+
271
+ return path
backend/requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ fastapi>=0.104.0
2
+ uvicorn[standard]>=0.24.0
3
+ python-multipart>=0.0.6
4
+ sse-starlette>=1.8.0
5
+ audio-separator[cpu]>=0.17.0
6
+ huggingface_hub>=0.20.0
7
+ pydub>=0.25.1
8
+ aiofiles>=23.2.1
9
+ soundfile>=0.12.0
10
+ torchaudio
11
+ yt-dlp>=2025.1.15
backend/separator.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ from typing import Callable
4
+
5
+ import numpy as np
6
+ import soundfile as sf
7
+ import torch
8
+ import torchaudio
9
+ import yaml
10
+ from pydub import AudioSegment
11
+ from huggingface_hub import hf_hub_download
12
+ from audio_separator.separator.uvr_lib_v5.roformer.bs_roformer import BSRoformer
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
17
+ logger.info(f"Using device: {DEVICE}")
18
+
19
+ if DEVICE.type == "cpu":
20
+ cpu_count = os.cpu_count() or 1
21
+ torch.set_num_threads(cpu_count)
22
+ torch.set_num_interop_threads(max(1, cpu_count // 2))
23
+ logger.info(f"CPU mode: set torch threads={cpu_count}, interop={max(1, cpu_count // 2)}")
24
+
25
+ MODEL_REPO = "jarredou/BS-ROFO-SW-Fixed"
26
+ MODEL_FILENAME = "BS-Rofo-SW-Fixed.ckpt"
27
+ MODEL_CONFIG = "BS-Rofo-SW-Fixed.yaml"
28
+ MODEL_DIR = "/tmp/models"
29
+
30
+ # Stem order matches the model's training config
31
+ STEM_ORDER = ["bass", "drums", "other", "vocals", "guitar", "piano"]
32
+ STEM_NAME_MAP = {
33
+ "bass": "Bass",
34
+ "drums": "Drums",
35
+ "other": "Other",
36
+ "vocals": "Vocals",
37
+ "guitar": "Guitar",
38
+ "piano": "Piano",
39
+ }
40
+
41
+
42
+ class StemSeparatorService:
43
+ _instance = None
44
+ _model_loaded = False
45
+
46
+ def __new__(cls):
47
+ if cls._instance is None:
48
+ cls._instance = super().__new__(cls)
49
+ return cls._instance
50
+
51
+ def load_model(self):
52
+ if self._model_loaded:
53
+ return
54
+
55
+ os.makedirs(MODEL_DIR, exist_ok=True)
56
+ logger.info(f"Downloading model from HF Hub: {MODEL_REPO}")
57
+
58
+ hf_hub_download(repo_id=MODEL_REPO, filename=MODEL_FILENAME, local_dir=MODEL_DIR)
59
+ hf_hub_download(repo_id=MODEL_REPO, filename=MODEL_CONFIG, local_dir=MODEL_DIR)
60
+
61
+ # Parse config
62
+ with open(os.path.join(MODEL_DIR, MODEL_CONFIG)) as f:
63
+ config = yaml.load(f, Loader=yaml.FullLoader)
64
+
65
+ model_cfg = config["model"]
66
+ audio_cfg = config.get("audio", {})
67
+ inference_cfg = config.get("inference", {})
68
+
69
+ self.sample_rate = audio_cfg.get("sample_rate", 44100)
70
+ self.chunk_size = audio_cfg.get("chunk_size", 588800)
71
+ self.num_overlap = inference_cfg.get("num_overlap", 2)
72
+
73
+ # Use flash_attn only on CUDA where it's supported
74
+ use_flash = DEVICE.type == "cuda"
75
+
76
+ # Create model directly — bypass audio-separator's Separator wrapper entirely
77
+ self.model = BSRoformer(
78
+ dim=model_cfg["dim"],
79
+ depth=model_cfg["depth"],
80
+ stereo=model_cfg.get("stereo", True),
81
+ num_stems=model_cfg.get("num_stems", 6),
82
+ time_transformer_depth=model_cfg.get("time_transformer_depth", 1),
83
+ freq_transformer_depth=model_cfg.get("freq_transformer_depth", 1),
84
+ linear_transformer_depth=model_cfg.get("linear_transformer_depth", 0),
85
+ freqs_per_bands=tuple(model_cfg["freqs_per_bands"]),
86
+ dim_head=model_cfg.get("dim_head", 64),
87
+ heads=model_cfg.get("heads", 8),
88
+ attn_dropout=model_cfg.get("attn_dropout", 0.1),
89
+ ff_dropout=model_cfg.get("ff_dropout", 0.1),
90
+ flash_attn=use_flash,
91
+ dim_freqs_in=model_cfg.get("dim_freqs_in", 1025),
92
+ stft_n_fft=model_cfg.get("stft_n_fft", 2048),
93
+ stft_hop_length=model_cfg.get("stft_hop_length", 512),
94
+ stft_win_length=model_cfg.get("stft_win_length", 2048),
95
+ stft_normalized=model_cfg.get("stft_normalized", False),
96
+ mask_estimator_depth=model_cfg.get("mask_estimator_depth", 2),
97
+ multi_stft_resolution_loss_weight=model_cfg.get("multi_stft_resolution_loss_weight", 1.0),
98
+ multi_stft_resolutions_window_sizes=tuple(
99
+ model_cfg.get("multi_stft_resolutions_window_sizes", (4096, 2048, 1024, 512, 256))
100
+ ),
101
+ multi_stft_hop_size=model_cfg.get("multi_stft_hop_size", 147),
102
+ multi_stft_normalized=model_cfg.get("multi_stft_normalized", False),
103
+ mlp_expansion_factor=model_cfg.get("mlp_expansion_factor", 4),
104
+ use_torch_checkpoint=model_cfg.get("use_torch_checkpoint", False),
105
+ skip_connection=model_cfg.get("skip_connection", False),
106
+ )
107
+
108
+ # Load checkpoint weights
109
+ ckpt_path = os.path.join(MODEL_DIR, MODEL_FILENAME)
110
+ try:
111
+ state_dict = torch.load(ckpt_path, map_location=DEVICE, weights_only=True)
112
+ except TypeError:
113
+ state_dict = torch.load(ckpt_path, map_location=DEVICE)
114
+
115
+ if isinstance(state_dict, dict) and "state_dict" in state_dict:
116
+ state_dict = state_dict["state_dict"]
117
+ elif isinstance(state_dict, dict) and "model" in state_dict:
118
+ state_dict = state_dict["model"]
119
+
120
+ self.model.load_state_dict(state_dict)
121
+ self.model.to(DEVICE)
122
+ self.model.eval()
123
+
124
+ logger.info(f"BS-RoFormer model loaded successfully on {DEVICE}")
125
+ self._model_loaded = True
126
+
127
+ def _process_audio(self, audio_tensor: torch.Tensor, progress_callback) -> torch.Tensor:
128
+ """Run inference with chunking and overlap-add."""
129
+ chunk_size = self.chunk_size
130
+ step = chunk_size // self.num_overlap
131
+
132
+ channels, total_samples = audio_tensor.shape
133
+ num_stems = 6
134
+
135
+ # Pad so we cover the full audio
136
+ pad_needed = max(0, chunk_size - total_samples)
137
+ if total_samples > chunk_size:
138
+ remainder = (total_samples - chunk_size) % step
139
+ if remainder != 0:
140
+ pad_needed = step - remainder
141
+
142
+ if pad_needed > 0:
143
+ audio_tensor = torch.nn.functional.pad(audio_tensor, (0, pad_needed))
144
+
145
+ padded_len = audio_tensor.shape[1]
146
+
147
+ # Move input to device
148
+ audio_tensor = audio_tensor.to(DEVICE)
149
+
150
+ # Output accumulators (keep on CPU to save GPU memory)
151
+ result = torch.zeros(num_stems, channels, padded_len)
152
+ weight = torch.zeros(padded_len)
153
+
154
+ # Hann window for smooth crossfading
155
+ window = torch.hann_window(chunk_size, device=DEVICE)
156
+
157
+ # Build chunk positions
158
+ starts = list(range(0, padded_len - chunk_size + 1, step))
159
+ total_chunks = len(starts)
160
+
161
+ for i, start in enumerate(starts):
162
+ chunk = audio_tensor[:, start : start + chunk_size]
163
+
164
+ with torch.no_grad():
165
+ # BSRoformer: (batch, channels, time) -> (batch, stems, channels, time)
166
+ output = self.model(chunk.unsqueeze(0))
167
+
168
+ output = output.squeeze(0) # (stems, channels, time)
169
+
170
+ # Move output to CPU for accumulation
171
+ output_cpu = output.cpu()
172
+ window_cpu = window.cpu()
173
+ result[:, :, start : start + chunk_size] += output_cpu * window_cpu
174
+ weight[start : start + chunk_size] += window_cpu
175
+
176
+ frac = (i + 1) / total_chunks
177
+ progress_callback("separating", 0.2 + frac * 0.7)
178
+
179
+ # Normalize by overlap weight
180
+ result = result / weight.clamp(min=1e-8).unsqueeze(0).unsqueeze(0)
181
+
182
+ # Remove padding
183
+ return result[:, :, :total_samples]
184
+
185
+ def separate(
186
+ self,
187
+ input_path: str,
188
+ output_dir: str,
189
+ stems: list[str],
190
+ output_format: str,
191
+ progress_callback: Callable[[str, float], None],
192
+ ) -> dict[str, str]:
193
+ progress_callback("loading_model", 0.05)
194
+ self.load_model()
195
+
196
+ progress_callback("separating", 0.15)
197
+
198
+ # Load audio
199
+ audio, sr = sf.read(input_path)
200
+ if audio.ndim == 1:
201
+ audio = np.stack([audio, audio], axis=1) # Mono to stereo
202
+
203
+ audio_tensor = torch.tensor(audio.T, dtype=torch.float32) # (channels, samples)
204
+
205
+ # Resample if needed
206
+ if sr != self.sample_rate:
207
+ resampler = torchaudio.transforms.Resample(sr, self.sample_rate)
208
+ audio_tensor = resampler(audio_tensor)
209
+
210
+ # Run inference
211
+ separated = self._process_audio(audio_tensor, progress_callback)
212
+
213
+ progress_callback("finalizing", 0.92)
214
+
215
+ # Save requested stems
216
+ result: dict[str, str] = {}
217
+ for i, stem_key in enumerate(STEM_ORDER):
218
+ canonical = STEM_NAME_MAP[stem_key]
219
+ if canonical in stems:
220
+ stem_audio = separated[i].numpy().T # (samples, channels)
221
+ # Clip to prevent clipping artifacts
222
+ stem_audio = np.clip(stem_audio, -1.0, 1.0)
223
+ clean_name = f"{canonical}.{output_format}"
224
+ out_path = os.path.join(output_dir, clean_name)
225
+ self._write_output(out_path, stem_audio, output_format)
226
+ result[canonical] = clean_name
227
+
228
+ progress_callback("done", 1.0)
229
+ return result
230
+
231
+ def _write_output(self, output_path: str, stem_audio: np.ndarray, output_format: str):
232
+ if output_format == "wav":
233
+ sf.write(output_path, stem_audio, self.sample_rate, subtype="FLOAT")
234
+ return
235
+
236
+ pcm = (stem_audio * 32767.0).astype(np.int16)
237
+ segment = AudioSegment(
238
+ data=pcm.tobytes(),
239
+ sample_width=2,
240
+ frame_rate=self.sample_rate,
241
+ channels=pcm.shape[1] if pcm.ndim > 1 else 1,
242
+ )
243
+
244
+ export_format = "mp3" if output_format == "mp3" else "adts"
245
+ export_kwargs = {"format": export_format}
246
+ if output_format in {"mp3", "aac"}:
247
+ export_kwargs["bitrate"] = "192k"
248
+ segment.export(output_path, **export_kwargs)
backend/source_import.py ADDED
@@ -0,0 +1,293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import re
3
+ import shutil
4
+ import tempfile
5
+ from dataclasses import dataclass
6
+ from pathlib import Path
7
+ from urllib.parse import parse_qs, quote, urlparse
8
+ from urllib.request import Request, urlopen
9
+
10
+ from yt_dlp import YoutubeDL
11
+
12
+ from backend import file_manager
13
+
14
+
15
+ USER_AGENT = (
16
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
17
+ "AppleWebKit/537.36 (KHTML, like Gecko) "
18
+ "Chrome/122.0.0.0 Safari/537.36"
19
+ )
20
+
21
+
22
+ @dataclass
23
+ class ImportedTrack:
24
+ job_id: str
25
+ filename: str
26
+ source_url: str
27
+ resolved_url: str | None
28
+ title: str
29
+ platform: str
30
+
31
+
32
+ class SourceImportError(Exception):
33
+ pass
34
+
35
+
36
+ def import_source(url: str) -> ImportedTrack:
37
+ normalized_url = normalize_url(url)
38
+ platform = classify_platform(normalized_url)
39
+ job_id = file_manager.create_job()
40
+ job_dir = file_manager.get_job_dir(job_id)
41
+
42
+ try:
43
+ if platform in {"youtube", "ytmusic"}:
44
+ title, display_filename = download_youtube_source(
45
+ normalized_url,
46
+ job_dir,
47
+ )
48
+ metadata = {
49
+ "source_kind": platform,
50
+ "source_url": normalized_url,
51
+ "resolved_url": normalized_url,
52
+ "title": title,
53
+ "filename": display_filename,
54
+ }
55
+ file_manager.save_job_metadata(job_id, metadata)
56
+ return ImportedTrack(
57
+ job_id=job_id,
58
+ filename=display_filename,
59
+ source_url=normalized_url,
60
+ resolved_url=normalized_url,
61
+ title=title,
62
+ platform=platform,
63
+ )
64
+
65
+ track_title, primary_artist = fetch_spotify_track_metadata(normalized_url)
66
+ search_query = f"{track_title} {primary_artist} audio"
67
+ resolved_url = resolve_youtube_search(search_query)
68
+ title, display_filename = download_youtube_source(
69
+ resolved_url,
70
+ job_dir,
71
+ title_hint=f"{track_title} - {primary_artist}",
72
+ )
73
+ metadata = {
74
+ "source_kind": "spotify",
75
+ "source_url": normalized_url,
76
+ "resolved_url": resolved_url,
77
+ "title": title,
78
+ "filename": display_filename,
79
+ "spotify_track_title": track_title,
80
+ "spotify_primary_artist": primary_artist,
81
+ }
82
+ file_manager.save_job_metadata(job_id, metadata)
83
+ return ImportedTrack(
84
+ job_id=job_id,
85
+ filename=display_filename,
86
+ source_url=normalized_url,
87
+ resolved_url=resolved_url,
88
+ title=title,
89
+ platform="spotify",
90
+ )
91
+ except Exception:
92
+ file_manager.delete_job(job_id)
93
+ raise
94
+
95
+
96
+ def normalize_url(url: str) -> str:
97
+ value = url.strip()
98
+ if not value:
99
+ raise SourceImportError("Paste a YouTube, YouTube Music, or Spotify track link")
100
+ return value
101
+
102
+
103
+ def classify_platform(url: str) -> str:
104
+ parsed = urlparse(url)
105
+ host = parsed.netloc.lower().removeprefix("www.")
106
+ path = parsed.path
107
+ query = parse_qs(parsed.query)
108
+
109
+ if host in {"youtube.com", "m.youtube.com", "youtu.be"}:
110
+ if "list" in query:
111
+ raise SourceImportError("Playlist links are not supported yet")
112
+ if host == "youtu.be":
113
+ return "youtube"
114
+ if path.startswith("/watch") or path.startswith("/shorts/"):
115
+ return "youtube"
116
+ if path.startswith("/playlist") or path.startswith("/channel/") or path.startswith("/@"):
117
+ raise SourceImportError("Only single YouTube video links are supported")
118
+ raise SourceImportError("Unsupported YouTube link")
119
+
120
+ if host == "music.youtube.com":
121
+ if "list" in query:
122
+ raise SourceImportError("Playlist links are not supported yet")
123
+ if path.startswith("/watch"):
124
+ return "ytmusic"
125
+ raise SourceImportError("Only single YouTube Music track links are supported")
126
+
127
+ if host == "open.spotify.com":
128
+ parts = [part for part in path.split("/") if part]
129
+ if len(parts) >= 2 and parts[0] == "track":
130
+ return "spotify"
131
+ if parts and parts[0] in {"album", "playlist", "artist", "show", "episode"}:
132
+ raise SourceImportError("Only single Spotify track links are supported")
133
+ raise SourceImportError("Unsupported Spotify link")
134
+
135
+ raise SourceImportError("Unsupported link. Use YouTube, YouTube Music, or Spotify track URLs")
136
+
137
+
138
+ def download_youtube_source(
139
+ source_url: str,
140
+ job_dir: Path,
141
+ title_hint: str | None = None,
142
+ ) -> tuple[str, str]:
143
+ temp_dir = Path(tempfile.mkdtemp(prefix="import-", dir=str(job_dir)))
144
+
145
+ try:
146
+ options = {
147
+ "format": "bestaudio/best",
148
+ "paths": {"home": str(temp_dir)},
149
+ "outtmpl": {"default": "downloaded.%(ext)s"},
150
+ "quiet": True,
151
+ "no_warnings": True,
152
+ "noplaylist": True,
153
+ "extract_flat": False,
154
+ "postprocessors": [
155
+ {
156
+ "key": "FFmpegExtractAudio",
157
+ "preferredcodec": "wav",
158
+ }
159
+ ],
160
+ }
161
+
162
+ with YoutubeDL(options) as ydl:
163
+ info = ydl.extract_info(source_url, download=True)
164
+
165
+ output_path = find_downloaded_audio(temp_dir)
166
+ content = output_path.read_bytes()
167
+ display_title = sanitize_title(title_hint or info.get("title") or output_path.stem)
168
+ display_filename = f"{display_title}.wav"
169
+ file_manager.save_imported_audio(job_dir.name, display_filename, content)
170
+ return display_title, display_filename
171
+ except SourceImportError:
172
+ raise
173
+ except Exception as exc:
174
+ raise SourceImportError(f"Failed to download source audio: {exc}") from exc
175
+ finally:
176
+ shutil.rmtree(temp_dir, ignore_errors=True)
177
+
178
+
179
+ def find_downloaded_audio(temp_dir: Path) -> Path:
180
+ audio_files = sorted(
181
+ [
182
+ path
183
+ for path in temp_dir.iterdir()
184
+ if path.is_file()
185
+ and path.suffix.lower() in {".wav", ".mp3", ".m4a", ".aac", ".flac", ".opus", ".ogg"}
186
+ ],
187
+ key=lambda path: path.stat().st_mtime,
188
+ reverse=True,
189
+ )
190
+ if not audio_files:
191
+ raise SourceImportError("Downloaded source did not produce a playable audio file")
192
+ return audio_files[0]
193
+
194
+
195
+ def fetch_spotify_track_metadata(url: str) -> tuple[str, str]:
196
+ oembed_url = f"https://open.spotify.com/oembed?url={quote(url, safe='')}"
197
+ request = Request(oembed_url, headers={"User-Agent": USER_AGENT})
198
+
199
+ try:
200
+ with urlopen(request, timeout=20) as response:
201
+ payload = json.loads(response.read().decode("utf-8"))
202
+ title = payload.get("title", "").strip()
203
+ artist = payload.get("author_name", "").strip()
204
+ if title and artist:
205
+ return title, artist
206
+ except Exception:
207
+ pass
208
+
209
+ html = fetch_text(url)
210
+ title = first_match(
211
+ html,
212
+ [
213
+ r'<meta property="og:title" content="([^"]+)"',
214
+ r"<title>([^<]+)</title>",
215
+ ],
216
+ )
217
+ artist = first_match(
218
+ html,
219
+ [
220
+ r'<meta name="music:musician_description" content="([^"]+)"',
221
+ r'"artists"\s*:\s*\[\s*\{\s*"name"\s*:\s*"([^"]+)"',
222
+ r'"byArtist"\s*:\s*\{\s*"name"\s*:\s*"([^"]+)"',
223
+ ],
224
+ )
225
+
226
+ if not title or not artist:
227
+ raise SourceImportError("Could not read Spotify track metadata from the public page")
228
+
229
+ return clean_spotify_title(title), artist.strip()
230
+
231
+
232
+ def resolve_youtube_search(query: str) -> str:
233
+ options = {
234
+ "quiet": True,
235
+ "no_warnings": True,
236
+ "extract_flat": "in_playlist",
237
+ "noplaylist": True,
238
+ }
239
+
240
+ try:
241
+ with YoutubeDL(options) as ydl:
242
+ info = ydl.extract_info(f"ytsearch1:{query}", download=False)
243
+ except Exception as exc:
244
+ raise SourceImportError(f"Failed to search YouTube for a Spotify track match: {exc}") from exc
245
+
246
+ entries = info.get("entries") or []
247
+ if not entries:
248
+ raise SourceImportError("No matching YouTube source was found for this Spotify track")
249
+
250
+ entry = entries[0]
251
+ resolved_url = entry.get("webpage_url") or entry.get("url")
252
+ if resolved_url and not str(resolved_url).startswith("http"):
253
+ video_id = entry.get("id") or resolved_url
254
+ resolved_url = f"https://www.youtube.com/watch?v={video_id}"
255
+ if not resolved_url:
256
+ raise SourceImportError("Resolved YouTube match did not include a downloadable URL")
257
+ return resolved_url
258
+
259
+
260
+ def fetch_text(url: str) -> str:
261
+ request = Request(url, headers={"User-Agent": USER_AGENT})
262
+ with urlopen(request, timeout=20) as response:
263
+ return response.read().decode("utf-8", errors="ignore")
264
+
265
+
266
+ def first_match(text: str, patterns: list[str]) -> str | None:
267
+ for pattern in patterns:
268
+ match = re.search(pattern, text, re.IGNORECASE)
269
+ if match:
270
+ return unescape_html(match.group(1))
271
+ return None
272
+
273
+
274
+ def sanitize_title(value: str) -> str:
275
+ clean = re.sub(r"[\\/:*?\"<>|]+", " ", value)
276
+ clean = re.sub(r"\s+", " ", clean).strip().strip(".")
277
+ return clean[:120] or "Imported Track"
278
+
279
+
280
+ def clean_spotify_title(value: str) -> str:
281
+ title = value.replace(" | Spotify", "").strip()
282
+ if " - song and lyrics by " in title.lower():
283
+ title = re.split(r"\s+-\s+song and lyrics by\s+", title, flags=re.IGNORECASE)[0]
284
+ return title.strip()
285
+
286
+
287
+ def unescape_html(value: str) -> str:
288
+ return (
289
+ value.replace("&amp;", "&")
290
+ .replace("&quot;", '"')
291
+ .replace("&#x27;", "'")
292
+ .replace("&#39;", "'")
293
+ )
backend/task_queue.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ from dataclasses import dataclass
3
+
4
+ from backend.separator import StemSeparatorService
5
+ from backend import file_manager
6
+
7
+
8
+ @dataclass
9
+ class JobProgress:
10
+ state: str = "queued"
11
+ progress: float = 0.0
12
+ message: str = "Waiting in queue..."
13
+ stems: dict[str, str] | None = None
14
+ error: str | None = None
15
+
16
+
17
+ # Shared state
18
+ jobs: dict[str, JobProgress] = {}
19
+ _queue: asyncio.Queue | None = None
20
+
21
+
22
+ def get_queue() -> asyncio.Queue:
23
+ global _queue
24
+ if _queue is None:
25
+ _queue = asyncio.Queue(maxsize=5)
26
+ return _queue
27
+
28
+
29
+ def get_job_progress(job_id: str) -> JobProgress | None:
30
+ return jobs.get(job_id)
31
+
32
+
33
+ async def enqueue_job(job_id: str, stems: list[str], output_format: str) -> bool:
34
+ """Enqueue a separation job. Returns False if queue is full."""
35
+ q = get_queue()
36
+ if q.full():
37
+ return False
38
+ jobs[job_id] = JobProgress()
39
+ await q.put((job_id, stems, output_format))
40
+ return True
41
+
42
+
43
+ async def worker_loop():
44
+ """Single worker that processes separation jobs sequentially."""
45
+ separator = StemSeparatorService()
46
+ q = get_queue()
47
+
48
+ while True:
49
+ job_id, stems, output_format = await q.get()
50
+ try:
51
+ progress = jobs.get(job_id)
52
+ if progress is None:
53
+ progress = JobProgress()
54
+ jobs[job_id] = progress
55
+
56
+ def update_progress(state: str, pct: float):
57
+ progress.state = state
58
+ progress.progress = pct
59
+ messages = {
60
+ "loading_model": "Loading BS-RoFormer model...",
61
+ "separating": "Separating stems...",
62
+ "finalizing": "Finalizing output files...",
63
+ "done": "Separation complete!",
64
+ }
65
+ progress.message = messages.get(state, f"{state}...")
66
+
67
+ input_file = file_manager.get_input_file(job_id)
68
+ if input_file is None:
69
+ progress.state = "error"
70
+ progress.error = "Input file not found"
71
+ progress.message = "Error: input file not found"
72
+ continue
73
+
74
+ output_dir = str(file_manager.get_output_dir(job_id))
75
+
76
+ # Run separation in a thread to avoid blocking the event loop
77
+ loop = asyncio.get_event_loop()
78
+ result = await loop.run_in_executor(
79
+ None,
80
+ separator.separate,
81
+ str(input_file),
82
+ output_dir,
83
+ stems,
84
+ output_format,
85
+ update_progress,
86
+ )
87
+
88
+ progress.state = "done"
89
+ progress.progress = 1.0
90
+ progress.message = "Separation complete!"
91
+ progress.stems = result
92
+
93
+ except Exception as e:
94
+ progress = jobs.get(job_id, JobProgress())
95
+ progress.state = "error"
96
+ progress.error = str(e)
97
+ progress.message = f"Error: {e}"
98
+ jobs[job_id] = progress
99
+ finally:
100
+ q.task_done()
frontend/dist/assets/index-CiF4mW7R.js ADDED
The diff for this file is too large to render. See raw diff
 
frontend/dist/assets/index-P-4oQdbx.css ADDED
@@ -0,0 +1 @@
 
 
1
+ /*! tailwindcss v4.2.1 | MIT License | https://tailwindcss.com */@layer properties{@supports (((-webkit-hyphens:none)) and (not (margin-trim:inline))) or ((-moz-orient:inline) and (not (color:rgb(from red r g b)))){*,:before,:after,::backdrop{--tw-space-y-reverse:0;--tw-border-style:solid;--tw-gradient-position:initial;--tw-gradient-from:#0000;--tw-gradient-via:#0000;--tw-gradient-to:#0000;--tw-gradient-stops:initial;--tw-gradient-via-stops:initial;--tw-gradient-from-position:0%;--tw-gradient-via-position:50%;--tw-gradient-to-position:100%;--tw-font-weight:initial;--tw-shadow:0 0 #0000;--tw-shadow-color:initial;--tw-shadow-alpha:100%;--tw-inset-shadow:0 0 #0000;--tw-inset-shadow-color:initial;--tw-inset-shadow-alpha:100%;--tw-ring-color:initial;--tw-ring-shadow:0 0 #0000;--tw-inset-ring-color:initial;--tw-inset-ring-shadow:0 0 #0000;--tw-ring-inset:initial;--tw-ring-offset-width:0px;--tw-ring-offset-color:#fff;--tw-ring-offset-shadow:0 0 #0000;--tw-duration:initial;--tw-ease:initial}}}@layer theme{:root,:host{--font-sans:ui-sans-serif, system-ui, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";--font-mono:ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;--color-red-400:oklch(70.4% .191 22.216);--color-red-500:oklch(63.7% .237 25.331);--color-pink-500:oklch(65.6% .241 354.308);--color-white:#fff;--spacing:.25rem;--container-3xl:48rem;--text-xs:.75rem;--text-xs--line-height:calc(1 / .75);--text-sm:.875rem;--text-sm--line-height:calc(1.25 / .875);--text-base:1rem;--text-base--line-height: 1.5 ;--text-lg:1.125rem;--text-lg--line-height:calc(1.75 / 1.125);--text-2xl:1.5rem;--text-2xl--line-height:calc(2 / 1.5);--text-3xl:1.875rem;--text-3xl--line-height: 1.2 ;--font-weight-medium:500;--font-weight-semibold:600;--font-weight-bold:700;--radius-lg:.5rem;--radius-xl:.75rem;--ease-out:cubic-bezier(0, 0, .2, 1);--default-transition-duration:.15s;--default-transition-timing-function:cubic-bezier(.4, 0, .2, 1);--default-font-family:var(--font-sans);--default-mono-font-family:var(--font-mono);--color-bg-primary:#0a0a0f;--color-bg-secondary:#13131a;--color-bg-card:#1a1a24;--color-bg-hover:#252530;--color-text-primary:#e8e8ef;--color-text-secondary:#8888a0;--color-accent:#7c3aed;--color-accent-hover:#6d28d9;--color-border:#2a2a38}}@layer base{*,:after,:before,::backdrop{box-sizing:border-box;border:0 solid;margin:0;padding:0}::file-selector-button{box-sizing:border-box;border:0 solid;margin:0;padding:0}html,:host{-webkit-text-size-adjust:100%;-moz-tab-size:4;tab-size:4;line-height:1.5;font-family:var(--default-font-family,ui-sans-serif, system-ui, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji");font-feature-settings:var(--default-font-feature-settings,normal);font-variation-settings:var(--default-font-variation-settings,normal);-webkit-tap-highlight-color:transparent}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;-webkit-text-decoration:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:var(--default-mono-font-family,ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace);font-feature-settings:var(--default-mono-font-feature-settings,normal);font-variation-settings:var(--default-mono-font-variation-settings,normal);font-size:1em}small{font-size:80%}sub,sup{vertical-align:baseline;font-size:75%;line-height:0;position:relative}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}:-moz-focusring{outline:auto}progress{vertical-align:baseline}summary{display:list-item}ol,ul,menu{list-style:none}img,svg,video,canvas,audio,iframe,embed,object{vertical-align:middle;display:block}img,video{max-width:100%;height:auto}button,input,select,optgroup,textarea{font:inherit;font-feature-settings:inherit;font-variation-settings:inherit;letter-spacing:inherit;color:inherit;opacity:1;background-color:#0000;border-radius:0}::file-selector-button{font:inherit;font-feature-settings:inherit;font-variation-settings:inherit;letter-spacing:inherit;color:inherit;opacity:1;background-color:#0000;border-radius:0}:where(select:is([multiple],[size])) optgroup{font-weight:bolder}:where(select:is([multiple],[size])) optgroup option{padding-inline-start:20px}::file-selector-button{margin-inline-end:4px}::placeholder{opacity:1}@supports (not ((-webkit-appearance:-apple-pay-button))) or (contain-intrinsic-size:1px){::placeholder{color:currentColor}@supports (color:color-mix(in lab,red,red)){::placeholder{color:color-mix(in oklab,currentcolor 50%,transparent)}}}textarea{resize:vertical}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-date-and-time-value{min-height:1lh;text-align:inherit}::-webkit-datetime-edit{display:inline-flex}::-webkit-datetime-edit-fields-wrapper{padding:0}::-webkit-datetime-edit{padding-block:0}::-webkit-datetime-edit-year-field{padding-block:0}::-webkit-datetime-edit-month-field{padding-block:0}::-webkit-datetime-edit-day-field{padding-block:0}::-webkit-datetime-edit-hour-field{padding-block:0}::-webkit-datetime-edit-minute-field{padding-block:0}::-webkit-datetime-edit-second-field{padding-block:0}::-webkit-datetime-edit-millisecond-field{padding-block:0}::-webkit-datetime-edit-meridiem-field{padding-block:0}::-webkit-calendar-picker-indicator{line-height:1}:-moz-ui-invalid{box-shadow:none}button,input:where([type=button],[type=reset],[type=submit]){-webkit-appearance:button;-moz-appearance:button;appearance:button}::file-selector-button{-webkit-appearance:button;-moz-appearance:button;appearance:button}::-webkit-inner-spin-button{height:auto}::-webkit-outer-spin-button{height:auto}[hidden]:where(:not([hidden=until-found])){display:none!important}}@layer components;@layer utilities{.pointer-events-none{pointer-events:none}.relative{position:relative}.container{width:100%}@media(min-width:40rem){.container{max-width:40rem}}@media(min-width:48rem){.container{max-width:48rem}}@media(min-width:64rem){.container{max-width:64rem}}@media(min-width:80rem){.container{max-width:80rem}}@media(min-width:96rem){.container{max-width:96rem}}.mx-auto{margin-inline:auto}.mt-1{margin-top:calc(var(--spacing) * 1)}.mb-3{margin-bottom:calc(var(--spacing) * 3)}.flex{display:flex}.grid{display:grid}.hidden{display:none}.inline-flex{display:inline-flex}.h-2{height:calc(var(--spacing) * 2)}.h-2\.5{height:calc(var(--spacing) * 2.5)}.h-3{height:calc(var(--spacing) * 3)}.h-3\.5{height:calc(var(--spacing) * 3.5)}.h-4{height:calc(var(--spacing) * 4)}.h-5{height:calc(var(--spacing) * 5)}.h-8{height:calc(var(--spacing) * 8)}.h-10{height:calc(var(--spacing) * 10)}.h-12{height:calc(var(--spacing) * 12)}.h-full{height:100%}.min-h-screen{min-height:100vh}.w-2\.5{width:calc(var(--spacing) * 2.5)}.w-3{width:calc(var(--spacing) * 3)}.w-3\.5{width:calc(var(--spacing) * 3.5)}.w-4{width:calc(var(--spacing) * 4)}.w-5{width:calc(var(--spacing) * 5)}.w-8{width:calc(var(--spacing) * 8)}.w-10{width:calc(var(--spacing) * 10)}.w-12{width:calc(var(--spacing) * 12)}.w-\[85px\]{width:85px}.w-full{width:100%}.max-w-3xl{max-width:var(--container-3xl)}.min-w-0{min-width:calc(var(--spacing) * 0)}.flex-shrink-0{flex-shrink:0}.flex-grow{flex-grow:1}.cursor-not-allowed{cursor:not-allowed}.cursor-pointer{cursor:pointer}.grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.flex-col{flex-direction:column}.items-center{align-items:center}.justify-between{justify-content:space-between}.justify-center{justify-content:center}.gap-1\.5{gap:calc(var(--spacing) * 1.5)}.gap-2{gap:calc(var(--spacing) * 2)}.gap-3{gap:calc(var(--spacing) * 3)}:where(.space-y-2>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing) * 2) * var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing) * 2) * calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-3>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing) * 3) * var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing) * 3) * calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-6>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing) * 6) * var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing) * 6) * calc(1 - var(--tw-space-y-reverse)))}.truncate{text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.overflow-hidden{overflow:hidden}.rounded-full{border-radius:3.40282e38px}.rounded-lg{border-radius:var(--radius-lg)}.rounded-xl{border-radius:var(--radius-xl)}.border{border-style:var(--tw-border-style);border-width:1px}.border-2{border-style:var(--tw-border-style);border-width:2px}.border-dashed{--tw-border-style:dashed;border-style:dashed}.border-accent{border-color:var(--color-accent)}.border-border{border-color:var(--color-border)}.border-red-500\/30{border-color:#fb2c364d}@supports (color:color-mix(in lab,red,red)){.border-red-500\/30{border-color:color-mix(in oklab,var(--color-red-500) 30%,transparent)}}.border-transparent{border-color:#0000}.bg-accent{background-color:var(--color-accent)}.bg-accent\/10{background-color:#7c3aed1a}@supports (color:color-mix(in lab,red,red)){.bg-accent\/10{background-color:color-mix(in oklab,var(--color-accent) 10%,transparent)}}.bg-bg-card{background-color:var(--color-bg-card)}.bg-bg-hover{background-color:var(--color-bg-hover)}.bg-bg-primary{background-color:var(--color-bg-primary)}.bg-red-500\/10{background-color:#fb2c361a}@supports (color:color-mix(in lab,red,red)){.bg-red-500\/10{background-color:color-mix(in oklab,var(--color-red-500) 10%,transparent)}}.bg-gradient-to-r{--tw-gradient-position:to right in oklab;background-image:linear-gradient(var(--tw-gradient-stops))}.from-accent{--tw-gradient-from:var(--color-accent);--tw-gradient-stops:var(--tw-gradient-via-stops,var(--tw-gradient-position), var(--tw-gradient-from) var(--tw-gradient-from-position), var(--tw-gradient-to) var(--tw-gradient-to-position))}.to-pink-500{--tw-gradient-to:var(--color-pink-500);--tw-gradient-stops:var(--tw-gradient-via-stops,var(--tw-gradient-position), var(--tw-gradient-from) var(--tw-gradient-from-position), var(--tw-gradient-to) var(--tw-gradient-to-position))}.bg-clip-text{-webkit-background-clip:text;background-clip:text}.p-3{padding:calc(var(--spacing) * 3)}.p-4{padding:calc(var(--spacing) * 4)}.p-8{padding:calc(var(--spacing) * 8)}.px-3{padding-inline:calc(var(--spacing) * 3)}.px-4{padding-inline:calc(var(--spacing) * 4)}.px-6{padding-inline:calc(var(--spacing) * 6)}.py-1\.5{padding-block:calc(var(--spacing) * 1.5)}.py-2\.5{padding-block:calc(var(--spacing) * 2.5)}.py-3{padding-block:calc(var(--spacing) * 3)}.py-6{padding-block:calc(var(--spacing) * 6)}.py-8{padding-block:calc(var(--spacing) * 8)}.text-center{text-align:center}.text-right{text-align:right}.font-mono{font-family:var(--font-mono)}.text-2xl{font-size:var(--text-2xl);line-height:var(--tw-leading,var(--text-2xl--line-height))}.text-base{font-size:var(--text-base);line-height:var(--tw-leading,var(--text-base--line-height))}.text-sm{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}.text-xs{font-size:var(--text-xs);line-height:var(--tw-leading,var(--text-xs--line-height))}.font-bold{--tw-font-weight:var(--font-weight-bold);font-weight:var(--font-weight-bold)}.font-medium{--tw-font-weight:var(--font-weight-medium);font-weight:var(--font-weight-medium)}.font-semibold{--tw-font-weight:var(--font-weight-semibold);font-weight:var(--font-weight-semibold)}.text-accent{color:var(--color-accent)}.text-red-400{color:var(--color-red-400)}.text-text-primary{color:var(--color-text-primary)}.text-text-secondary{color:var(--color-text-secondary)}.text-transparent{color:#0000}.text-white{color:var(--color-white)}.opacity-50{opacity:.5}.shadow-lg{--tw-shadow:0 10px 15px -3px var(--tw-shadow-color,#0000001a), 0 4px 6px -4px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-accent\/25{--tw-shadow-color:#7c3aed40}@supports (color:color-mix(in lab,red,red)){.shadow-accent\/25{--tw-shadow-color:color-mix(in oklab, color-mix(in oklab, var(--color-accent) 25%, transparent) var(--tw-shadow-alpha), transparent)}}.transition-all{transition-property:all;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-colors{transition-property:color,background-color,border-color,outline-color,text-decoration-color,fill,stroke,--tw-gradient-from,--tw-gradient-via,--tw-gradient-to;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.duration-200{--tw-duration:.2s;transition-duration:.2s}.duration-300{--tw-duration:.3s;transition-duration:.3s}.ease-out{--tw-ease:var(--ease-out);transition-timing-function:var(--ease-out)}@media(hover:hover){.hover\:border-text-secondary:hover{border-color:var(--color-text-secondary)}.hover\:bg-accent-hover:hover{background-color:var(--color-accent-hover)}.hover\:bg-bg-hover:hover{background-color:var(--color-bg-hover)}.hover\:bg-bg-hover\/50:hover{background-color:#25253080}@supports (color:color-mix(in lab,red,red)){.hover\:bg-bg-hover\/50:hover{background-color:color-mix(in oklab,var(--color-bg-hover) 50%,transparent)}}.hover\:text-accent-hover:hover{color:var(--color-accent-hover)}.hover\:text-text-primary:hover{color:var(--color-text-primary)}.hover\:shadow-accent\/40:hover{--tw-shadow-color:#7c3aed66}@supports (color:color-mix(in lab,red,red)){.hover\:shadow-accent\/40:hover{--tw-shadow-color:color-mix(in oklab, color-mix(in oklab, var(--color-accent) 40%, transparent) var(--tw-shadow-alpha), transparent)}}}.active\:scale-\[0\.98\]:active{scale:.98}.disabled\:opacity-30:disabled{opacity:.3}.disabled\:opacity-50:disabled{opacity:.5}@media(min-width:40rem){.sm\:w-24{width:calc(var(--spacing) * 24)}.sm\:flex-row{flex-direction:row}.sm\:items-center{align-items:center}.sm\:gap-4{gap:calc(var(--spacing) * 4)}}@media(min-width:48rem){.md\:grid-cols-3{grid-template-columns:repeat(3,minmax(0,1fr))}.md\:p-4{padding:calc(var(--spacing) * 4)}.md\:p-5{padding:calc(var(--spacing) * 5)}.md\:p-12{padding:calc(var(--spacing) * 12)}.md\:py-12{padding-block:calc(var(--spacing) * 12)}.md\:text-3xl{font-size:var(--text-3xl);line-height:var(--tw-leading,var(--text-3xl--line-height))}.md\:text-lg{font-size:var(--text-lg);line-height:var(--tw-leading,var(--text-lg--line-height))}}}body{background-color:var(--color-bg-primary);color:var(--color-text-primary);min-height:100vh;margin:0;font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,sans-serif}*{box-sizing:border-box}::-webkit-scrollbar{width:6px}::-webkit-scrollbar-track{background:var(--color-bg-secondary)}::-webkit-scrollbar-thumb{background:var(--color-border);border-radius:3px}@property --tw-space-y-reverse{syntax:"*";inherits:false;initial-value:0}@property --tw-border-style{syntax:"*";inherits:false;initial-value:solid}@property --tw-gradient-position{syntax:"*";inherits:false}@property --tw-gradient-from{syntax:"<color>";inherits:false;initial-value:#0000}@property --tw-gradient-via{syntax:"<color>";inherits:false;initial-value:#0000}@property --tw-gradient-to{syntax:"<color>";inherits:false;initial-value:#0000}@property --tw-gradient-stops{syntax:"*";inherits:false}@property --tw-gradient-via-stops{syntax:"*";inherits:false}@property --tw-gradient-from-position{syntax:"<length-percentage>";inherits:false;initial-value:0%}@property --tw-gradient-via-position{syntax:"<length-percentage>";inherits:false;initial-value:50%}@property --tw-gradient-to-position{syntax:"<length-percentage>";inherits:false;initial-value:100%}@property --tw-font-weight{syntax:"*";inherits:false}@property --tw-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-shadow-color{syntax:"*";inherits:false}@property --tw-shadow-alpha{syntax:"<percentage>";inherits:false;initial-value:100%}@property --tw-inset-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-inset-shadow-color{syntax:"*";inherits:false}@property --tw-inset-shadow-alpha{syntax:"<percentage>";inherits:false;initial-value:100%}@property --tw-ring-color{syntax:"*";inherits:false}@property --tw-ring-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-inset-ring-color{syntax:"*";inherits:false}@property --tw-inset-ring-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-ring-inset{syntax:"*";inherits:false}@property --tw-ring-offset-width{syntax:"<length>";inherits:false;initial-value:0}@property --tw-ring-offset-color{syntax:"*";inherits:false;initial-value:#fff}@property --tw-ring-offset-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-duration{syntax:"*";inherits:false}@property --tw-ease{syntax:"*";inherits:false}
frontend/dist/index.html ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8" />
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
6
+ <title>Stem Separator</title>
7
+ <link rel="icon" type="image/svg+xml" href="data:image/svg+xml,<svg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 100 100'><text y='.9em' font-size='90'>🎵</text></svg>" />
8
+ <script type="module" crossorigin src="/assets/index-CiF4mW7R.js"></script>
9
+ <link rel="stylesheet" crossorigin href="/assets/index-P-4oQdbx.css">
10
+ </head>
11
+ <body>
12
+ <div id="root"></div>
13
+ </body>
14
+ </html>
frontend/index.html ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8" />
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
6
+ <title>Stem Separator</title>
7
+ <link rel="icon" type="image/svg+xml" href="data:image/svg+xml,<svg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 100 100'><text y='.9em' font-size='90'>🎵</text></svg>" />
8
+ </head>
9
+ <body>
10
+ <div id="root"></div>
11
+ <script type="module" src="/src/main.tsx"></script>
12
+ </body>
13
+ </html>
frontend/node_modules/.bin/baseline-browser-mapping ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0d5012244d9133cb5851e14e2a4300a067b28f777610d92201495cf56ad383c
3
+ size 423
frontend/node_modules/.bin/baseline-browser-mapping.cmd ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:546308c1cb7900c7bc19001ca144e8034017372cf204945637b0777fb6e0914d
3
+ size 342
frontend/node_modules/.bin/baseline-browser-mapping.ps1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c329cbf021bcef4808042139875afd4946833e2564a0e92ed8950bd4f7442ed3
3
+ size 873
frontend/node_modules/.bin/browserslist ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54fdec30a2fdafa92270bd5f6f8b469eae04d9d691a0efa5c81e5f9720ce43db
3
+ size 387
frontend/node_modules/.bin/browserslist.cmd ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4256324c48333044314aba80247152ce152284f6198f073471082263ad68e000
3
+ size 324
frontend/node_modules/.bin/browserslist.ps1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29aadbfb1ce80642e7c4d82a68013b8e22298eabb2b780fcda40e960fd9fe114
3
+ size 801
frontend/node_modules/.bin/esbuild ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:020b954fd13a2c5ee8aabd5a73e24c10a4750099ed5e02f07dc7265c6758a054
3
+ size 387
frontend/node_modules/.bin/esbuild.cmd ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b00afc3761267058834ab0968f58748f0422efe0eb0321540544770cafbf7a69
3
+ size 324
frontend/node_modules/.bin/esbuild.ps1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a24efc13fb6ef9def6c5aaf7787e7203d0982e2ab8627b64a41eb033945b7f5
3
+ size 801
frontend/node_modules/.bin/jiti ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d520004d5e2263d19e15b4428f84242e88a8b996aa856208a3c3ba6c44f916a2
3
+ size 391
frontend/node_modules/.bin/jiti.cmd ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b161a3ade2fd9a6ae94bb6c4af14828436bb0f3c32ee0bbfb036d68c2e59083
3
+ size 326
frontend/node_modules/.bin/jiti.ps1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40363e635374e55b3243679de83e59dfa1eaf38f7b5d9837b748cad64d28d32d
3
+ size 809
frontend/node_modules/.bin/jsesc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e602c28ae389059f8432b215bf7cf139ddf94a0115b7d6f407ed7ec44009b8b4
3
+ size 379
frontend/node_modules/.bin/jsesc.cmd ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b87fd711d42d591a1f58ff7e1ab5853fec23500124199234e067db1ecf3cac1f
3
+ size 320
frontend/node_modules/.bin/jsesc.ps1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1fea8d4906bc84471a56ca344efa060ff05d6d1cc5e924ce80c74a2468e4634
3
+ size 785
frontend/node_modules/.bin/json5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6dbbbcf1513aa4adede8edd73bf10cdf666bd2c0ee7fb5b92a9ecc688a278016
3
+ size 381
frontend/node_modules/.bin/json5.cmd ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac1c85ef42fd4756851c82bcced541694345d9e5196f36f25fef83d2cb4409cd
3
+ size 321
frontend/node_modules/.bin/json5.ps1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1928bf0cbc643164307fc9b7529a03657bbf58a74e552c1cd241305c6fafa754
3
+ size 789
frontend/node_modules/.bin/nanoid ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49446fa7e9ea1b36b205bab243f2928f1230d8dbeb09881eef1d039149ad3238
3
+ size 391