SaltProphet commited on
Commit
f04ed54
·
verified ·
1 Parent(s): 0651351

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +151 -366
app.py CHANGED
@@ -1,395 +1,180 @@
1
  import gradio as gr
2
- import librosa
3
- import numpy as np
4
  import os
5
  import shutil
6
  import zipfile
7
- import tempfile
8
- import soundfile as sf
9
- import traceback
 
10
  import subprocess
11
- from typing import Tuple, List
 
12
 
13
  # --- Configuration ---
14
- OUTPUT_FOLDER_NAME = "PRO_LOOP_PACK"
15
-
16
- # Mapping of model selection to Spleeter config and resulting stem types
17
- STEM_MODELS = {
18
- '2-Stems (Vocals/Inst)': {
19
- 'spleeter_config': '2stems',
20
- 'stems': ['vocals', 'accompaniment'], # Spleeter output names
21
- 'display_stems': ['Vocals', 'Instrumental'] # User-facing names
22
- },
23
- '4-Stems (Drums, Bass, Vocals, Other)': {
24
- 'spleeter_config': '4stems',
25
- 'stems': ['vocals', 'drums', 'bass', 'other'],
26
- 'display_stems': ['Vocals', 'Drums', 'Bass', 'Other']
27
- },
28
- '5-Stems (Drums, Bass, Vocals, Piano, Other)': {
29
- 'spleeter_config': '5stems',
30
- 'stems': ['vocals', 'drums', 'bass', 'piano', 'other'],
31
- 'display_stems': ['Vocals', 'Drums', 'Bass', 'Piano', 'Other']
32
- },
33
- }
34
- LOOP_BAR_LENGTHS = [4, 6, 8]
35
-
36
- # Key Detection Templates (as defined previously)
37
- KEY_TEMPLATES = {
38
- 'major': [6.35, 2.23, 3.48, 2.33, 4.38, 4.09, 2.52, 5.19, 2.16, 3.61, 3.28, 2.91],
39
- 'minor': [6.33, 2.68, 3.52, 5.38, 2.60, 3.53, 2.54, 4.75, 3.98, 2.91, 3.03, 3.34]
40
- }
41
- NOTES = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']
42
-
43
- # --- Utility Functions ---
44
-
45
- def save_segment(filepath: str, audio_data: np.ndarray, sr: int):
46
- """Utility function to save a NumPy audio array as a WAV file."""
47
- # Spleeter outputs 44100Hz audio, so we explicitly set the sample rate
48
- sf.write(filepath, audio_data, sr, format='WAV', subtype='PCM_16')
49
-
50
- def detect_key_and_mode(y: np.ndarray, sr: int) -> str:
51
- """Estimates the musical key (e.g., 'C Major' or 'A Minor')."""
52
- try:
53
- chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
54
- chroma_mean = np.mean(chroma, axis=1)
55
- chroma_mean /= chroma_mean.sum()
56
-
57
- best_key = "Unknown"
58
- max_correlation = -1.0
59
-
60
- for i, note in enumerate(NOTES):
61
- # Check major keys
62
- major_template = np.roll(KEY_TEMPLATES['major'], i)
63
- corr_major = np.dot(chroma_mean, major_template)
64
-
65
- if corr_major > max_correlation:
66
- max_correlation = corr_major
67
- best_key = f"{note} Major"
68
-
69
- # Check minor keys
70
- minor_template = np.roll(KEY_TEMPLATES['minor'], i)
71
- corr_minor = np.dot(chroma_mean, minor_template)
72
-
73
- if corr_minor > max_correlation:
74
- max_correlation = corr_minor
75
- best_key = f"{note} Minor"
76
-
77
- if max_correlation < 0.2:
78
- return "KeyDetectionAmbiguous"
79
-
80
- return best_key.replace(' ', '')
81
-
82
- except Exception as e:
83
- print(f"Key Detection Failed: {e}")
84
- return "KeyDetectionFailed"
85
 
86
- def separate_stems(audio_path: str, model_name: str, output_dir: str) -> str:
87
  """
88
- Executes Spleeter separation via subprocess.
89
- Requires 'spleeter' package to be installed in the environment.
90
  """
91
- spleeter_config = STEM_MODELS[model_name]['spleeter_config']
92
-
93
- # Spleeter output folder will be a subfolder named after the input file (without extension)
94
- # We clean this up later.
95
-
96
- # Spleeter command: spleeter separate -o {output_dir} -p {config} {input_file}
97
- command = [
98
- "spleeter", "separate",
99
- "-o", output_dir,
100
- "-p", f"spleeter:{spleeter_config}",
101
- audio_path
102
- ]
103
-
104
- try:
105
- # Run Spleeter command
106
- result = subprocess.run(command, check=True, capture_output=True, text=True)
107
- print("Spleeter Output:", result.stdout)
108
- print("Spleeter Errors:", result.stderr)
109
-
110
- # Spleeter creates a sub-directory based on the input filename.
111
- # We need to find that subdirectory.
112
- base_filename = os.path.splitext(os.path.basename(audio_path))[0]
113
- spleeter_output_path = os.path.join(output_dir, base_filename)
114
-
115
- if not os.path.isdir(spleeter_output_path):
116
- raise FileNotFoundError(f"Spleeter output directory not found at: {spleeter_output_path}")
117
-
118
- return spleeter_output_path
119
-
120
- except subprocess.CalledProcessError as e:
121
- raise RuntimeError(f"Spleeter command failed. Check if 'spleeter' is installed. Output: {e.stdout}, Error: {e.stderr}")
122
- except Exception as e:
123
- raise RuntimeError(f"Error during Spleeter execution: {e}")
124
-
125
- # --- Main Processing Function ---
126
-
127
- def create_market_ready_pack(
128
- audio_file_path: str,
129
- one_shot_sensitivity: float,
130
- stem_model_selection: str,
131
- progress=gr.Progress()
132
- ) -> Tuple[str | None, str]:
133
- """
134
- Processes the input audio file, generates loops and one-shots,
135
- and packages them into a market-ready ZIP file.
136
- """
137
- temp_dir = None
138
-
139
- if not audio_file_path:
140
- return None, "Error: Please upload an audio file before proceeding."
141
 
142
  try:
143
- # 1. Setup Temporary Directories
144
- temp_dir = tempfile.mkdtemp()
145
- output_root = os.path.join(temp_dir, OUTPUT_FOLDER_NAME)
146
- os.makedirs(output_root, exist_ok=True)
147
-
148
- progress(0.05, desc="Loading and Verifying Audio...")
149
-
150
- # Robust Audio Loading (Load full mix for analysis)
151
- y_full, sr = librosa.load(audio_file_path, sr=None, mono=True)
152
- if y_full.size == 0:
153
- raise ValueError("Loaded audio is empty.")
154
-
155
- # 2. Advanced Audio Analysis (Tempo and Key)
156
- progress(0.15, desc="Analyzing Tempo and Musical Key...")
157
-
158
- tempo = 120.0
159
- start_sample = 0
160
- key_mode_name = "120BPM_UnknownKey"
161
-
162
- try:
163
- tempo, beat_frames = librosa.beat.beat_track(y=y_full, sr=sr, trim=True)
164
- key_mode_name = detect_key_and_mode(y_full, sr)
165
 
166
- samples_per_beat = int((60 / tempo) * sr)
167
- start_sample = librosa.frames_to_samples(beat_frames[0]) if beat_frames.size > 0 else 0
168
-
169
- gr.Info(f"Analysis Complete: {int(tempo)} BPM, {key_mode_name}.")
170
- key_mode_name = f"{int(tempo)}BPM_{key_mode_name}"
 
 
 
 
 
 
 
 
 
 
 
 
171
 
 
 
 
 
 
 
 
172
  except Exception as e:
173
- gr.Warning(f"Warning: Tempo or Key detection failed ({e}). Using default 120 BPM and 'Unknown Key'.")
174
- samples_per_beat = int((60 / 120.0) * sr) # Fallback beat timing
175
-
176
- # 3. REAL STEM SEPARATION using Spleeter
177
- progress(0.25, desc=f"Separating Stems using {stem_model_selection} model...")
178
 
179
- spleeter_output_path = separate_stems(audio_file_path, stem_model_selection, output_root)
180
- spleeter_stems = STEM_MODELS[stem_model_selection]['stems']
181
- display_stems = STEM_MODELS[stem_model_selection]['display_stems']
182
-
183
- # Dictionary to hold the audio data for each stem from Spleeter's output
184
- stem_audio_data = {}
185
- for spleeter_name, display_name in zip(spleeter_stems, display_stems):
186
- stem_filepath = os.path.join(spleeter_output_path, f"{spleeter_name}.wav")
187
- if not os.path.exists(stem_filepath):
188
- gr.Warning(f"Stem file not found for {display_name}. Skipping this stem.")
189
- continue
190
-
191
- # Load the separated stem audio (it will be aligned and resampled by Spleeter)
192
- # We enforce mono loading for consistent processing later
193
- y_stem, sr_stem = librosa.load(stem_filepath, sr=sr, mono=True)
194
-
195
- # Align the start of the stem using the previously detected global beat
196
- y_stem_aligned = y_stem[start_sample:]
197
- stem_audio_data[display_name] = y_stem_aligned
198
-
199
- # Clean up Spleeter's intermediate directory
200
- shutil.rmtree(spleeter_output_path)
201
-
202
- if not stem_audio_data:
203
- raise RuntimeError("No separated stems were successfully processed. Check Spleeter output.")
204
-
205
- # 4. Generate Loops (4, 6, 8 Bars)
206
- progress(0.45, desc="Generating Time-Aligned Loops...")
207
-
208
- for stem_name, y_stem in stem_audio_data.items():
209
- loops_dir = os.path.join(output_root, 'LOOPS', stem_name)
210
- os.makedirs(loops_dir, exist_ok=True)
 
 
 
 
 
 
 
 
211
 
212
- samples_per_bar = samples_per_beat * 4 # Assuming 4/4 time signature
 
 
213
 
214
- for num_bars in LOOP_BAR_LENGTHS:
215
- samples_per_loop = samples_per_bar * num_bars
 
 
216
 
217
- for i in range(0, len(y_stem) - samples_per_loop + 1, samples_per_loop):
218
- try:
219
- loop_segment = y_stem[i:i + samples_per_loop]
220
-
221
- if len(loop_segment) < samples_per_loop * 0.9:
222
- continue
223
-
224
- index = i // samples_per_loop + 1
225
- # Naming convention: {BPM_Key}_{Stem}_{Bars}Bar_{Index}.wav
226
- filename = f"{key_mode_name}_{stem_name}_{num_bars}Bar_{index:02d}.wav"
227
- save_segment(os.path.join(loops_dir, filename), loop_segment, sr)
228
- except Exception as e:
229
- gr.Warning(f"Error slicing {num_bars}-bar loop for {stem_name}: {e}")
230
- continue
231
-
232
- # 5. Generate One-Shots (Transient Detection)
233
- progress(0.70, desc="Generating One-Shots (Transient Detection)...")
234
-
235
- # Sensitivity mapping: 1=Few/Loud (large pre_max), 10=Many/Quiet (small pre_max)
236
- pre_max_frames = int(12 - one_shot_sensitivity)
237
- if pre_max_frames < 2: pre_max_frames = 2
238
-
239
- pre_slice_samples = int(sr * 0.05)
240
- post_slice_samples = int(sr * 0.25)
241
-
242
- for stem_name, y_stem in stem_audio_data.items():
243
- shots_dir = os.path.join(output_root, 'ONESHOTS', stem_name)
244
- os.makedirs(shots_dir, exist_ok=True)
245
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
246
  try:
247
- o_env = librosa.onset.onset_strength(y=y_stem, sr=sr, aggregate=np.median)
248
- onset_frames = librosa.onset.onset_detect(
249
- onset_envelope=o_env,
250
- sr=sr,
251
- units='frames',
252
- pre_max=pre_max_frames,
253
- post_max=pre_max_frames // 2,
254
- wait=10
255
- )
256
- onset_samples = librosa.frames_to_samples(onset_frames)
257
 
258
- for i, sample_index in enumerate(onset_samples):
259
- start = max(0, sample_index - pre_slice_samples)
260
- end = min(len(y_stem), sample_index + post_slice_samples)
261
-
262
- shot_segment = y_stem[start:end]
263
-
264
- if len(shot_segment) > int(sr * 0.05):
265
- filename = f"{key_mode_name}_{stem_name}_OneShot_{i+1:03d}.wav"
266
- save_segment(os.path.join(shots_dir, filename), shot_segment, sr)
267
  except Exception as e:
268
- gr.Warning(f"Error during One-Shot detection for {stem_name}. Skipping. Details: {e}")
269
- continue
270
-
271
-
272
- # 6. Packaging (License and ZIP)
273
- progress(0.90, desc="Creating License and Packaging Files...")
274
-
275
- # Create the License.txt file
276
- license_content = f"""
277
- -- PROFESSIONAL LOOP PACK LICENSE AGREEMENT --
278
 
279
- Product: {OUTPUT_FOLDER_NAME}
280
- BPM/Key Reference: {key_mode_name}
281
- Separation Model Used: {stem_model_selection}
 
 
 
282
 
283
- 1. Royalty-Free Use: All sounds, loops, and one-shots within this pack are
284
- 100% royalty-free for commercial use in musical compositions, sound design,
285
- and public performances. You may use them in your own tracks and sell those
286
- tracks without owing any additional royalties to the creator.
287
-
288
- 2. Restrictions: Redistribution, repackaging, or re-selling of the individual
289
- sounds or loops as part of another sound library or sample pack is strictly
290
- prohibited.
291
-
292
- 3. Generated: {os.uname().nodename}
293
- """
294
-
295
- license_filepath = os.path.join(output_root, 'License.txt')
296
- with open(license_filepath, 'w') as f:
297
- f.write(license_content.strip())
298
-
299
- # Create the final ZIP file
300
- zip_filename = os.path.join(temp_dir, f"{OUTPUT_FOLDER_NAME}_{key_mode_name}.zip")
301
-
302
- with zipfile.ZipFile(zip_filename, 'w', zipfile.ZIP_DEFLATED) as zf:
303
- for root, dirs, files in os.walk(output_root):
304
- for file in files:
305
- full_path = os.path.join(root, file)
306
- relative_path = os.path.relpath(full_path, temp_dir)
307
- zf.write(full_path, relative_path)
308
-
309
- progress(1.0, desc="Packaging Complete!")
310
-
311
- shutil.rmtree(output_root)
312
-
313
- return zip_filename, f"✅ Success! Your market-ready '{os.path.basename(zip_filename)}' is ready. Key/BPM: {key_mode_name}. Stems Processed: {', '.join(display_stems)}. Download below."
314
 
315
  except Exception as e:
316
- error_message = f"Critical Error: {e}"
317
- print(f"Full Traceback: {traceback.format_exc()}")
318
-
319
- if temp_dir and os.path.exists(temp_dir):
320
- shutil.rmtree(temp_dir)
321
- gr.Warning("Cleaned up temporary files after failure.")
322
-
323
- return None, f" Processing failed. {error_message}. If Spleeter failed, ensure it is installed correctly."
324
-
325
-
326
- # --- Gradio Interface Definition ---
327
-
328
- with gr.Blocks(title="Market-Ready Loop Pack Generator") as demo:
329
- gr.Markdown(
330
- """
331
- # 🎧 Professional Loop Pack Automation Tool
332
-
333
- Upload a full music track, select your stem separation model, and generate a
334
- complete, royalty-free sample pack including time-aligned loops and transient-detected one-shots.
335
- """
336
- )
337
-
338
- with gr.Row():
339
- audio_input = gr.Audio(
340
- type="filepath",
341
- sources=["upload"],
342
- label="1. Upload Full Mix Audio File (WAV/MP3/FLAC)",
343
- )
344
-
345
- stem_model_input = gr.Dropdown(
346
- label="2. Select Stem Separation Model",
347
- choices=list(STEM_MODELS.keys()),
348
- value='4-Stems (Drums, Bass, Vocals, Other)',
349
- allow_custom_value=False,
350
- info="Choose the number and type of stems to split the audio into (requires Spleeter installation)."
351
- )
352
-
353
- with gr.Row():
354
- sensitivity_slider = gr.Slider(
355
- minimum=1,
356
- maximum=10,
357
- step=1,
358
- value=6,
359
- label="3. One-Shot Sensitivity (1=Few/Loud, 10=Many/Quiet)",
360
- info="Controls the transient detection threshold for one-shot slicing."
361
- )
362
-
363
- generate_button = gr.Button("🚀 Generate Loop Pack", variant="primary")
364
-
365
- with gr.Column(scale=1):
366
- status_output = gr.Textbox(label="Status / Feedback", interactive=False)
367
- zip_output = gr.File(label="4. Download Final Loop Pack ZIP")
368
-
369
- # Define the core process action
370
- generate_button.click(
371
- fn=create_market_ready_pack,
372
- inputs=[audio_input, sensitivity_slider, stem_model_input],
373
- outputs=[zip_output, status_output]
374
- )
375
-
376
- gr.Markdown(
377
- """
378
- ---
379
- **Final Pack Structure (Example):**
380
- - `PRO_LOOP_PACK_128BPM_CMinor.zip`
381
- - `License.txt`
382
- - `LOOPS/`
383
- - `Drums/` (e.g., `128BPM_CMinor_Drums_4Bar_01.wav`)
384
- - `Bass/`
385
- - `Vocals/`
386
- - ... (based on model selected)
387
- - `ONESHOTS/`
388
- - `Drums/` (e.g., `128BPM_CMinor_Drums_OneShot_001.wav`)
389
- - `Bass/`
390
- - ...
391
- """
392
- )
393
 
394
  if __name__ == "__main__":
395
- demo.launch(enable_queue=True)
 
1
  import gradio as gr
 
 
2
  import os
3
  import shutil
4
  import zipfile
5
+ import librosa
6
+ import numpy as np
7
+ from pydub import AudioSegment
8
+ from moviepy.editor import AudioFileClip, ImageClip
9
  import subprocess
10
+ from pathlib import Path
11
+ import sys
12
 
13
  # --- Configuration ---
14
+ # We use Path objects for robust cross-platform compatibility
15
+ OUTPUT_DIR = Path("nightpulse_output")
16
+ TEMP_DIR = Path("temp_processing")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
+ def process_track(audio_file, cover_art_image):
19
  """
20
+ Main pipeline function.
21
+ Returns: (zip_path, video_path)
22
  """
23
+ # Initialize return variables to None to prevent 'UnboundLocalError'
24
+ zip_path = None
25
+ video_path = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
  try:
28
+ # --- 0. Input Validation (Robustness Check) ---
29
+ if not audio_file:
30
+ raise ValueError("No audio file provided. Please upload a track.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
+ # --- 1. Setup Directories ---
33
+ # Clean previous runs to prevent file mixing
34
+ if OUTPUT_DIR.exists():
35
+ shutil.rmtree(OUTPUT_DIR)
36
+ if TEMP_DIR.exists():
37
+ shutil.rmtree(TEMP_DIR)
38
+ OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
39
+ TEMP_DIR.mkdir(parents=True, exist_ok=True)
40
+
41
+ filename = Path(audio_file).stem
42
+
43
+ # --- 2. Analyze BPM & Key (Librosa) ---
44
+ print(f"Analyzing {filename}...")
45
+ try:
46
+ # Load 60s for better context, mono=True for BPM analysis
47
+ y, sr = librosa.load(audio_file, duration=60, mono=True)
48
+ tempo, _ = librosa.beat.beat_track(y=y, sr=sr)
49
 
50
+ # Robustness: Handle different librosa versions returning array vs float
51
+ if np.ndim(tempo) > 0:
52
+ detected_bpm = int(round(tempo[0]))
53
+ else:
54
+ detected_bpm = int(round(tempo))
55
+
56
+ print(f"Detected BPM: {detected_bpm}")
57
  except Exception as e:
58
+ print(f"BPM Detection Warning: {e}")
59
+ detected_bpm = 120 # Safe Fallback
 
 
 
60
 
61
+ # --- 3. AI Stem Separation (Demucs) ---
62
+ print("Separating stems with Demucs...")
63
+ try:
64
+ # We call demucs as a module using sys.executable to ensure we use the correct python environment
65
+ subprocess.run([
66
+ sys.executable, "-m", "demucs",
67
+ "-n", "htdemucs",
68
+ "--out", str(TEMP_DIR),
69
+ audio_file
70
+ ], check=True, capture_output=True)
71
+ except subprocess.CalledProcessError as e:
72
+ # Capture the specific error from the subprocess
73
+ raise RuntimeError(f"Demucs processing failed. Error: {e.stderr.decode()}")
74
+
75
+ # Locate separated stems (Robust Path Finding)
76
+ demucs_out = TEMP_DIR / "htdemucs"
77
+ # Demucs might normalize filenames (spaces -> underscores), so we just find the first folder
78
+ track_folder = next(demucs_out.iterdir(), None)
79
+
80
+ if not track_folder:
81
+ raise FileNotFoundError("Demucs output folder could not be found.")
82
+
83
+ drums_path = track_folder / "drums.wav"
84
+ melody_path = track_folder / "other.wav"
85
+ bass_path = track_folder / "bass.wav"
86
+
87
+ if not drums_path.exists():
88
+ raise FileNotFoundError(f"Stems were not generated in {track_folder}")
89
+
90
+ # --- 4. Loop Logic (Pydub) ---
91
+ # Calculate duration of 8 bars in milliseconds
92
+ if detected_bpm <= 0: detected_bpm = 120
93
+ ms_per_beat = (60 / detected_bpm) * 1000
94
+ eight_bars_ms = ms_per_beat * 4 * 8
95
+
96
+ def create_loop(source_path, output_name):
97
+ if not source_path.exists():
98
+ return None, None
99
+
100
+ audio = AudioSegment.from_wav(str(source_path))
101
 
102
+ # Smart Chop: Grab the "middle" 8 bars to avoid intro/outro silence
103
+ start_time = len(audio) // 3
104
+ end_time = start_time + eight_bars_ms
105
 
106
+ # Safety check if audio is shorter than 8 bars
107
+ if len(audio) < end_time:
108
+ start_time = 0
109
+ end_time = min(len(audio), eight_bars_ms)
110
 
111
+ loop = audio[start_time:end_time]
112
+ # 15ms fade to prevent clicks
113
+ loop = loop.fade_in(15).fade_out(15).normalize()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
 
115
+ out_filename = f"{detected_bpm}BPM_{output_name}.wav"
116
+ out_file = OUTPUT_DIR / out_filename
117
+ loop.export(out_file, format="wav")
118
+ return out_file, loop
119
+
120
+ # Generate Loops
121
+ loop_drums_path, _ = create_loop(drums_path, "DrumLoop")
122
+ loop_melody_path, melody_audio = create_loop(melody_path, "MelodyLoop")
123
+ create_loop(bass_path, "BassLoop")
124
+
125
+ # --- 5. Video Generation (MoviePy) ---
126
+ # Logic: Only generate video if User uploaded Art AND we successfully made a melody loop
127
+ if cover_art_image is not None and loop_melody_path is not None:
128
+ print("Rendering Promo Video...")
129
  try:
130
+ video_out_path = OUTPUT_DIR / "Promo_Video_Reel.mp4"
131
+
132
+ audio_clip = AudioFileClip(str(loop_melody_path))
133
+ image_clip = ImageClip(cover_art_image)
134
+
135
+ # Resize logic: Fit to width 1080 (standard), maintain aspect ratio
136
+ image_clip = image_clip.resize(width=1080)
 
 
 
137
 
138
+ # Set duration to match audio loop
139
+ image_clip = image_clip.set_duration(audio_clip.duration)
140
+ image_clip = image_clip.set_audio(audio_clip)
141
+ image_clip.fps = 24
142
+
143
+ image_clip.write_videofile(str(video_out_path), codec="libx264", audio_codec="aac", logger=None)
144
+ video_path = str(video_out_path)
 
 
145
  except Exception as e:
146
+ print(f"Video generation skipped due to error: {e}")
147
+ # We don't fail the whole pipeline here, we just skip the video part
148
+ video_path = None
 
 
 
 
 
 
 
149
 
150
+ # --- 6. Zip It Up ---
151
+ zip_file_path = "NightPulse_Pack.zip"
152
+ with zipfile.ZipFile(zip_file_path, 'w') as zipf:
153
+ for file in OUTPUT_DIR.iterdir():
154
+ zipf.write(file, file.name)
155
+ zip_path = zip_file_path
156
 
157
+ return zip_path, video_path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158
 
159
  except Exception as e:
160
+ # This catches ANY crash and shows it in the UI as a red Error box
161
+ raise gr.Error(f"System Error: {str(e)}")
162
+
163
+ # --- UI Definition ---
164
+ iface = gr.Interface(
165
+ fn=process_track,
166
+ inputs=[
167
+ gr.Audio(type="filepath", label="Upload Suno Track (MP3/WAV)"),
168
+ gr.Image(type="filepath", label="Upload Cover Art (Optional)")
169
+ ],
170
+ outputs=[
171
+ gr.File(label="Download Completed Pack (ZIP)"),
172
+ gr.Video(label="Preview Promo Video")
173
+ ],
174
+ title="Night Pulse Audio | Automator",
175
+ description="<b>Night Pulse Pipeline v1.0</b><br>Upload a Suno track to automatically separate stems, normalize, chop loops, and generate a promo video.",
176
+ theme="default"
177
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
 
179
  if __name__ == "__main__":
180
+ iface.launch()