SaltProphet commited on
Commit
e119a74
·
verified ·
1 Parent(s): 60833ca

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -56
app.py CHANGED
@@ -17,7 +17,7 @@ TEMP_DIR = Path("temp_processing")
17
  # --- Core Logic Functions ---
18
 
19
  def analyze_and_separate(audio_file):
20
- """Phase 1: Separate Stems and return file paths for preview"""
21
  try:
22
  if not audio_file:
23
  raise ValueError("No audio file provided.")
@@ -38,56 +38,65 @@ def analyze_and_separate(audio_file):
38
  bpm = int(round(tempo[0])) if np.ndim(tempo) > 0 else int(round(tempo))
39
  print(f"Detected BPM: {bpm}")
40
 
41
- # 2. Demucs Separation
42
- print("Separating stems...")
 
43
  subprocess.run([
44
- sys.executable, "-m", "demucs", "-n", "htdemucs", "--out", str(TEMP_DIR), audio_file
45
  ], check=True, capture_output=True)
46
 
47
  # 3. Locate Stems
48
- demucs_out = TEMP_DIR / "htdemucs"
49
  track_folder = next(demucs_out.iterdir(), None)
50
  if not track_folder: raise FileNotFoundError("Demucs failed to output files.")
51
 
 
 
52
  drums = track_folder / "drums.wav"
53
  bass = track_folder / "bass.wav"
54
- melody = track_folder / "other.wav"
 
55
  vocals = track_folder / "vocals.wav"
 
56
 
57
  # Return paths to the UI and the BPM
58
- return str(drums), str(bass), str(melody), str(vocals), bpm, str(track_folder)
59
 
60
  except Exception as e:
61
  raise gr.Error(f"Separation Failed: {str(e)}")
62
 
63
  def package_and_export(track_folder_str, bpm, start_offset_sec, cover_art):
64
- """Phase 2: Chop Loops, Generate Video (Zoom Effect), Zip"""
65
  try:
66
  track_folder = Path(track_folder_str)
67
- drums = track_folder / "drums.wav"
68
- bass = track_folder / "bass.wav"
69
- melody = track_folder / "other.wav"
70
- vocals = track_folder / "vocals.wav"
71
-
72
- # 1. Save Full Stems
73
- for stem, name in [(drums, "Drums"), (bass, "Bass"), (melody, "Melody"), (vocals, "Vocals")]:
74
- if stem.exists():
75
- shutil.copy(stem, OUTPUT_DIR / "Stems" / f"{bpm}BPM_Full_{name}.wav")
 
 
 
 
 
76
 
77
  # 2. Create Loops (With User Offset)
78
  ms_per_beat = (60 / bpm) * 1000
79
  eight_bars_ms = ms_per_beat * 4 * 8
80
  start_ms = start_offset_sec * 1000
81
 
82
- created_loops = {} # Store paths for video generation
83
 
84
  def make_loop(src, name):
85
  if not src.exists(): return None
86
  audio = AudioSegment.from_wav(str(src))
87
 
88
- # Use User Offset
89
  end_ms = start_ms + eight_bars_ms
90
- if len(audio) < end_ms: # Fallback if offset is too late
91
  s = 0
92
  e = min(len(audio), eight_bars_ms)
93
  else:
@@ -98,11 +107,15 @@ def package_and_export(track_folder_str, bpm, start_offset_sec, cover_art):
98
  loop.export(out_path, format="wav")
99
  return out_path
100
 
101
- created_loops['drums'] = make_loop(drums, "DrumLoop")
102
- created_loops['bass'] = make_loop(bass, "BassLoop")
103
- created_loops['melody'] = make_loop(melody, "MelodyLoop")
 
 
 
 
104
 
105
- # 3. Generate Video (Slow Zoom Effect)
106
  video_path = None
107
  if cover_art and created_loops['melody']:
108
  print("Rendering Dynamic Video...")
@@ -110,31 +123,16 @@ def package_and_export(track_folder_str, bpm, start_offset_sec, cover_art):
110
  audio_clip = AudioFileClip(str(created_loops['melody']))
111
  duration = audio_clip.duration
112
 
113
- # Load Image
114
  img = ImageClip(cover_art).resize(width=1080)
115
 
116
- # THE ZOOM EFFECT: Resize image from 1.0x to 1.1x over time
117
- # We crop the center to handle the zoom so it doesn't change output size
118
- w, h = img.size
119
-
120
- def zoom_effect(t):
121
- # Zoom factor goes from 1.0 to 1.15
122
- scale = 1 + 0.15 * (t / duration)
123
- return scale
124
-
125
- # Apply zoom (this is a bit heavy, simple resize is safer for CPU)
126
- # Alternative: Simple Pan or just Static if Pillow is tricky.
127
- # Let's do a simple resize animation
128
- img = img.resize(lambda t : 1 + 0.04*t) # Slow 4% zoom
129
  img = img.set_position(('center', 'center'))
130
  img = img.set_duration(duration)
131
  img = img.set_audio(audio_clip)
132
 
133
- # Composite to ensure frame size stays constant (1080 width)
134
- # Note: resizing makes it grow, we need to crop or center.
135
- # For simplicity in v1, we let it grow slightly or stick to static if 'resize' fails.
136
- # We will use the simple robust method:
137
-
138
  final_clip = CompositeVideoClip([img], size=(1080, 1920))
139
  final_clip.duration = duration
140
  final_clip.audio = audio_clip
@@ -160,9 +158,9 @@ def package_and_export(track_folder_str, bpm, start_offset_sec, cover_art):
160
 
161
  # --- GUI (Blocks) ---
162
 
163
- with gr.Blocks(title="Night Pulse | Command Center", theme=gr.themes.Base()) as app:
164
- gr.Markdown("# 🎛️ Night Pulse Audio | Command Center")
165
- gr.Markdown("Transform Suno tracks into commercial sample packs.")
166
 
167
  # State storage
168
  stored_folder = gr.State()
@@ -172,34 +170,38 @@ with gr.Blocks(title="Night Pulse | Command Center", theme=gr.themes.Base()) as
172
  with gr.Column(scale=1):
173
  input_audio = gr.Audio(type="filepath", label="1. Upload Master Track")
174
  input_art = gr.Image(type="filepath", label="Cover Art (9:16)")
175
- btn_analyze = gr.Button("🔍 Phase 1: Deconstruct & Analyze", variant="primary")
176
 
177
  with gr.Column(scale=1):
178
  gr.Markdown("### 2. Stem Preview")
179
- # Audio Players
180
- p_drums = gr.Audio(label="Drums Stem")
181
- p_bass = gr.Audio(label="Bass Stem")
182
- p_melody = gr.Audio(label="Melody Stem")
183
- p_vocals = gr.Audio(label="Vocals Stem")
 
 
 
 
184
 
185
  gr.Markdown("---")
186
 
187
  with gr.Row():
188
  with gr.Column():
189
- gr.Markdown("### 3. Loop Settings")
190
- slider_start = gr.Slider(minimum=0, maximum=120, value=15, label="Loop Start Time (Seconds)", info="Where should the 8-bar loop start?")
191
  btn_package = gr.Button("📦 Phase 2: Package & Export", variant="primary")
192
 
193
  with gr.Column():
194
  gr.Markdown("### 4. Final Output")
195
  out_zip = gr.File(label="Download Pack (ZIP)")
196
- out_video = gr.Video(label="Promo Video (Dynamic)")
197
 
198
  # Events
199
  btn_analyze.click(
200
  fn=analyze_and_separate,
201
  inputs=[input_audio],
202
- outputs=[p_drums, p_bass, p_melody, p_vocals, stored_bpm, stored_folder]
203
  )
204
 
205
  btn_package.click(
 
17
  # --- Core Logic Functions ---
18
 
19
  def analyze_and_separate(audio_file):
20
+ """Phase 1: Separate 6 Stems (Drums, Bass, Guitar, Piano, Vocals, Other)"""
21
  try:
22
  if not audio_file:
23
  raise ValueError("No audio file provided.")
 
38
  bpm = int(round(tempo[0])) if np.ndim(tempo) > 0 else int(round(tempo))
39
  print(f"Detected BPM: {bpm}")
40
 
41
+ # 2. Demucs Separation (Using 6-Stem Model)
42
+ print("Separating stems (6-Stem Model)...")
43
+ # We use '-n htdemucs_6s' to get Guitar and Piano separation
44
  subprocess.run([
45
+ sys.executable, "-m", "demucs", "-n", "htdemucs_6s", "--out", str(TEMP_DIR), audio_file
46
  ], check=True, capture_output=True)
47
 
48
  # 3. Locate Stems
49
+ demucs_out = TEMP_DIR / "htdemucs_6s" # Note the folder name change
50
  track_folder = next(demucs_out.iterdir(), None)
51
  if not track_folder: raise FileNotFoundError("Demucs failed to output files.")
52
 
53
+ # Map all 6 stems (Demucs outputs specific names)
54
+ # Note: If a stem (like guitar) is silent, Demucs still creates the file.
55
  drums = track_folder / "drums.wav"
56
  bass = track_folder / "bass.wav"
57
+ guitar = track_folder / "guitar.wav"
58
+ piano = track_folder / "piano.wav"
59
  vocals = track_folder / "vocals.wav"
60
+ other = track_folder / "other.wav"
61
 
62
  # Return paths to the UI and the BPM
63
+ return str(drums), str(bass), str(guitar), str(piano), str(other), str(vocals), bpm, str(track_folder)
64
 
65
  except Exception as e:
66
  raise gr.Error(f"Separation Failed: {str(e)}")
67
 
68
  def package_and_export(track_folder_str, bpm, start_offset_sec, cover_art):
69
+ """Phase 2: Chop Loops, Generate Video, Zip"""
70
  try:
71
  track_folder = Path(track_folder_str)
72
+ # Re-map paths
73
+ stems = {
74
+ "Drums": track_folder / "drums.wav",
75
+ "Bass": track_folder / "bass.wav",
76
+ "Guitar": track_folder / "guitar.wav",
77
+ "Piano": track_folder / "piano.wav",
78
+ "Synths": track_folder / "other.wav",
79
+ "Vocals": track_folder / "vocals.wav"
80
+ }
81
+
82
+ # 1. Save Full Stems (Copy to Stems folder)
83
+ for name, path in stems.items():
84
+ if path.exists():
85
+ shutil.copy(path, OUTPUT_DIR / "Stems" / f"{bpm}BPM_Full_{name}.wav")
86
 
87
  # 2. Create Loops (With User Offset)
88
  ms_per_beat = (60 / bpm) * 1000
89
  eight_bars_ms = ms_per_beat * 4 * 8
90
  start_ms = start_offset_sec * 1000
91
 
92
+ created_loops = {}
93
 
94
  def make_loop(src, name):
95
  if not src.exists(): return None
96
  audio = AudioSegment.from_wav(str(src))
97
 
 
98
  end_ms = start_ms + eight_bars_ms
99
+ if len(audio) < end_ms:
100
  s = 0
101
  e = min(len(audio), eight_bars_ms)
102
  else:
 
107
  loop.export(out_path, format="wav")
108
  return out_path
109
 
110
+ # Generate loops for all 6 stems
111
+ created_loops['melody'] = make_loop(stems['Synths'], "SynthLoop") # Primary logic for video
112
+ make_loop(stems['Drums'], "DrumLoop")
113
+ make_loop(stems['Bass'], "BassLoop")
114
+ make_loop(stems['Guitar'], "GuitarLoop")
115
+ make_loop(stems['Piano'], "PianoLoop")
116
+ make_loop(stems['Vocals'], "VocalChop")
117
 
118
+ # 3. Generate Video (Using the Synth/Other loop as the audio base)
119
  video_path = None
120
  if cover_art and created_loops['melody']:
121
  print("Rendering Dynamic Video...")
 
123
  audio_clip = AudioFileClip(str(created_loops['melody']))
124
  duration = audio_clip.duration
125
 
126
+ # Load and Resize Image
127
  img = ImageClip(cover_art).resize(width=1080)
128
 
129
+ # Simple Zoom Animation (Resize from 1.0 to 1.05)
130
+ # We use a lambda function for the resize filter
131
+ img = img.resize(lambda t : 1 + 0.02*t)
 
 
 
 
 
 
 
 
 
 
132
  img = img.set_position(('center', 'center'))
133
  img = img.set_duration(duration)
134
  img = img.set_audio(audio_clip)
135
 
 
 
 
 
 
136
  final_clip = CompositeVideoClip([img], size=(1080, 1920))
137
  final_clip.duration = duration
138
  final_clip.audio = audio_clip
 
158
 
159
  # --- GUI (Blocks) ---
160
 
161
+ with gr.Blocks(title="Night Pulse | Command Center (6-Stem)", theme=gr.themes.Base()) as app:
162
+ gr.Markdown("# 🎛️ Night Pulse | 6-Stem Command Center")
163
+ gr.Markdown("Deconstruct audio into 6 stems: Drums, Bass, Guitar, Piano, Vocals, Synths.")
164
 
165
  # State storage
166
  stored_folder = gr.State()
 
170
  with gr.Column(scale=1):
171
  input_audio = gr.Audio(type="filepath", label="1. Upload Master Track")
172
  input_art = gr.Image(type="filepath", label="Cover Art (9:16)")
173
+ btn_analyze = gr.Button("🔍 Phase 1: Separate (6 Stems)", variant="primary")
174
 
175
  with gr.Column(scale=1):
176
  gr.Markdown("### 2. Stem Preview")
177
+ with gr.Row():
178
+ p_drums = gr.Audio(label="Drums")
179
+ p_bass = gr.Audio(label="Bass")
180
+ with gr.Row():
181
+ p_guitar = gr.Audio(label="Guitar")
182
+ p_piano = gr.Audio(label="Piano")
183
+ with gr.Row():
184
+ p_other = gr.Audio(label="Synths/Other")
185
+ p_vocals = gr.Audio(label="Vocals")
186
 
187
  gr.Markdown("---")
188
 
189
  with gr.Row():
190
  with gr.Column():
191
+ gr.Markdown("### 3. Loop Logic")
192
+ slider_start = gr.Slider(minimum=0, maximum=120, value=15, label="Loop Start Time (Seconds)", info="Select the start point for the 8-bar loop cut.")
193
  btn_package = gr.Button("📦 Phase 2: Package & Export", variant="primary")
194
 
195
  with gr.Column():
196
  gr.Markdown("### 4. Final Output")
197
  out_zip = gr.File(label="Download Pack (ZIP)")
198
+ out_video = gr.Video(label="Promo Video")
199
 
200
  # Events
201
  btn_analyze.click(
202
  fn=analyze_and_separate,
203
  inputs=[input_audio],
204
+ outputs=[p_drums, p_bass, p_guitar, p_piano, p_other, p_vocals, stored_bpm, stored_folder]
205
  )
206
 
207
  btn_package.click(