SaltProphet commited on
Commit
7706e3a
·
verified ·
1 Parent(s): 4a8682f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +93 -82
app.py CHANGED
@@ -11,6 +11,7 @@ import matplotlib
11
  import matplotlib.pyplot as plt
12
  from scipy import signal
13
  from typing import Tuple, List, Any
 
14
 
15
  # Use a non-interactive backend for Matplotlib
16
  matplotlib.use('Agg')
@@ -61,7 +62,7 @@ def write_midi_file(notes_list: List[Tuple[int, float, float]], bpm: float, outp
61
 
62
  # Build MIDI file
63
  header = b'MThd' + (6).to_bytes(4, 'big') + (1).to_bytes(2, 'big') + (1).to_bytes(2, 'big') + division.to_bytes(2, 'big')
64
-
65
  track_data = b''
66
  for delta, event in midi_events:
67
  # Encode delta time
@@ -76,13 +77,13 @@ def write_midi_file(notes_list: List[Tuple[int, float, float]], bpm: float, outp
76
  track_data += bytes([delta_bytes[i] | 0x80])
77
  else:
78
  track_data += bytes([delta_bytes[i]])
79
-
80
  # Add event
81
  track_data += bytes(event)
82
-
83
  # End of track
84
  track_data += b'\x00\xFF\x2F\x00'
85
-
86
  track_chunk = b'MTrk' + len(track_data).to_bytes(4, 'big') + track_data
87
  midi_data = header + track_chunk
88
 
@@ -100,11 +101,11 @@ def get_harmonic_recommendations(key_str: str) -> str:
100
  "G Min": "6A", "D Min": "7A",
101
  "Gb Maj": "2B", "Cb Maj": "7B", "A# Min": "3A", "D# Maj": "11B", "G# Maj": "3B"
102
  }
103
-
104
  code = KEY_TO_CAMELOT.get(key_str, "N/A")
105
  if code == "N/A":
106
  return "N/A (Key not recognized or 'Unknown Key' detected.)"
107
-
108
  try:
109
  num = int(code[:-1])
110
  mode = code[-1]
@@ -265,7 +266,7 @@ def separate_stems(audio_file_path: str) -> Tuple[str, str, str, str, str, str,
265
  temp_dir = tempfile.mkdtemp()
266
  stems = {}
267
  stem_names = ["vocals", "drums", "bass", "other", "guitar", "piano"]
268
-
269
  for name in stem_names:
270
  stem_path = os.path.join(temp_dir, f"{name}.wav")
271
  # Create mock audio (just a portion of the original)
@@ -273,7 +274,7 @@ def separate_stems(audio_file_path: str) -> Tuple[str, str, str, str, str, str,
273
  stems[name] = stem_path
274
 
275
  return (
276
- stems["vocals"], stems["drums"], stems["bass"], stems["other"],
277
  stems["guitar"], stems["piano"], float(detected_bpm), detected_key
278
  )
279
  except Exception as e:
@@ -282,7 +283,7 @@ def separate_stems(audio_file_path: str) -> Tuple[str, str, str, str, str, str,
282
  def generate_waveform_preview(y: np.ndarray, sr: int, stem_name: str, temp_dir: str) -> str:
283
  """Generates a Matplotlib image showing the waveform."""
284
  img_path = os.path.join(temp_dir, f"{stem_name}_preview.png")
285
-
286
  plt.figure(figsize=(10, 3))
287
  y_display = librosa.to_mono(y.T) if y.ndim > 1 else y
288
  librosa.display.waveshow(y_display, sr=sr, x_axis='time', color="#4a7098")
@@ -290,27 +291,27 @@ def generate_waveform_preview(y: np.ndarray, sr: int, stem_name: str, temp_dir:
290
  plt.tight_layout()
291
  plt.savefig(img_path)
292
  plt.close()
293
-
294
  return img_path
295
 
296
  def slice_stem_real(
297
- stem_audio_path: str,
298
- loop_choice: str,
299
- sensitivity: float,
300
  stem_name: str,
301
- manual_bpm: float,
302
- time_signature: str,
303
- crossfade_ms: int,
304
- transpose_semitones: int,
305
  detected_key: str,
306
- pan_depth: float,
307
- level_depth: float,
308
- modulation_rate: str,
309
  target_dbfs: float,
310
- attack_gain: float,
311
- sustain_gain: float,
312
- filter_type: str,
313
- filter_freq: float,
314
  filter_depth: float
315
  ) -> Tuple[List[Tuple[str, str]], str]:
316
  """Slices a single stem and applies transformations."""
@@ -319,9 +320,15 @@ def slice_stem_real(
319
 
320
  try:
321
  # Load audio
322
- sample_rate, y_int = stem_audio_path
323
- y = librosa.util.buf_to_float(y_int, dtype=np.float32)
324
-
 
 
 
 
 
 
325
  if y.ndim == 0:
326
  return [], ""
327
 
@@ -428,7 +435,7 @@ def slice_stem_real(
428
  # Simple slicing at regular intervals for demo
429
  slice_length = int(sample_rate * 0.5) # 0.5 second slices
430
  num_slices = len(y) // slice_length
431
-
432
  for i in range(min(num_slices, 20)): # Limit to 20 slices
433
  start_sample = i * slice_length
434
  end_sample = min(start_sample + slice_length, len(y))
@@ -440,34 +447,34 @@ def slice_stem_real(
440
 
441
  # --- 8. VISUALIZATION GENERATION ---
442
  img_path = generate_waveform_preview(y, sample_rate, stem_name, loops_dir)
443
-
444
  return output_files, img_path
445
 
446
  except Exception as e:
447
  raise gr.Error(f"Error processing stem: {str(e)}")
448
 
449
  def slice_all_and_zip(
450
- vocals: Tuple[int, np.ndarray],
451
- drums: Tuple[int, np.ndarray],
452
- bass: Tuple[int, np.ndarray],
453
- other: Tuple[int, np.ndarray],
454
- guitar: Tuple[int, np.ndarray],
455
- piano: Tuple[int, np.ndarray],
456
- loop_choice: str,
457
- sensitivity: float,
458
- manual_bpm: float,
459
- time_signature: str,
460
- crossfade_ms: int,
461
- transpose_semitones: int,
462
  detected_key: str,
463
- pan_depth: float,
464
- level_depth: float,
465
- modulation_rate: str,
466
  target_dbfs: float,
467
- attack_gain: float,
468
- sustain_gain: float,
469
- filter_type: str,
470
- filter_freq: float,
471
  filter_depth: float
472
  ) -> str:
473
  """Slices all available stems and packages them into a ZIP file."""
@@ -476,10 +483,10 @@ def slice_all_and_zip(
476
  "vocals": vocals, "drums": drums, "bass": bass,
477
  "other": other, "guitar": guitar, "piano": piano
478
  }
479
-
480
  # Filter out None stems
481
  valid_stems = {name: data for name, data in stems_to_process.items() if data is not None}
482
-
483
  if not valid_stems:
484
  raise gr.Error("No stems to process! Please separate stems first.")
485
 
@@ -490,25 +497,28 @@ def slice_all_and_zip(
490
  with zipfile.ZipFile(zip_path, 'w') as zf:
491
  for name, data in valid_stems.items():
492
  # Create temporary file for this stem
493
- stem_temp_dir = tempfile.mkdtemp()
494
- stem_path = os.path.join(stem_temp_dir, f"{name}.wav")
495
- sf.write(stem_path, data[1], data[0])
496
-
 
497
  # Process stem
498
  sliced_files, _ = slice_stem_real(
499
- (data[0], data[1]), loop_choice, sensitivity, name,
500
  manual_bpm, time_signature, crossfade_ms, transpose_semitones, detected_key,
501
  pan_depth, level_depth, modulation_rate, target_dbfs,
502
  attack_gain, sustain_gain, filter_type, filter_freq, filter_depth
503
  )
504
-
505
  # Add files to ZIP
506
  for file_path, file_type in sliced_files:
507
  arcname = os.path.join(file_type, os.path.basename(file_path))
508
  zf.write(file_path, arcname)
509
-
510
  # Clean up stem temp files
511
- shutil.rmtree(stem_temp_dir)
 
 
512
 
513
  return zip_path
514
 
@@ -531,7 +541,7 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="red"))
531
  gr.Markdown("### 1. Separate Stems")
532
  audio_input = gr.Audio(type="filepath", label="Upload a Track")
533
  separate_btn = gr.Button("Separate & Analyze Stems", variant="primary")
534
-
535
  # Outputs for separated stems
536
  vocals_output = gr.Audio(label="Vocals", visible=False)
537
  drums_output = gr.Audio(label="Drums", visible=False)
@@ -539,13 +549,13 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="red"))
539
  other_output = gr.Audio(label="Other / Instrumental", visible=False)
540
  guitar_output = gr.Audio(label="Guitar", visible=False)
541
  piano_output = gr.Audio(label="Piano", visible=False)
542
-
543
  # Analysis results
544
  with gr.Group():
545
  gr.Markdown("### 2. Analysis & Transform")
546
  detected_bpm_key = gr.Textbox(label="Detected Tempo & Key", value="", interactive=False)
547
  harmonic_recs = gr.Textbox(label="Harmonic Mixing Recommendations", value="", interactive=False)
548
-
549
  transpose_slider = gr.Slider(
550
  minimum=-12, maximum=12, value=0, step=1,
551
  label="Transpose Loops (Semitones)",
@@ -654,32 +664,32 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="red"))
654
  gr.Markdown("### Separated Stems")
655
  with gr.Row():
656
  with gr.Column():
657
- vocals_output.render()
658
  slice_vocals_btn = gr.Button("Slice Vocals")
659
  with gr.Column():
660
- drums_output.render()
661
  slice_drums_btn = gr.Button("Slice Drums")
662
  with gr.Row():
663
  with gr.Column():
664
- bass_output.render()
665
  slice_bass_btn = gr.Button("Slice Bass")
666
  with gr.Column():
667
- other_output.render()
668
  slice_other_btn = gr.Button("Slice Other")
669
  with gr.Row():
670
  with gr.Column():
671
- guitar_output.render()
672
  slice_guitar_btn = gr.Button("Slice Guitar")
673
  with gr.Column():
674
- piano_output.render()
675
  slice_piano_btn = gr.Button("Slice Piano")
676
-
677
  # Gallery for previews
678
  gr.Markdown("### Sliced Loops Preview")
679
  loop_gallery = gr.Gallery(
680
  label="Generated Loops",
681
- columns=4,
682
- object_fit="contain",
683
  height="auto"
684
  )
685
 
@@ -709,13 +719,13 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="red"))
709
  )
710
 
711
  # Individual stem slicing
712
- def slice_and_display(stem_data, loop_choice, sensitivity, stem_name, manual_bpm, time_signature,
713
- crossfade_ms, transpose_semitones, detected_key, pan_depth, level_depth,
714
- modulation_rate, target_dbfs, attack_gain, sustain_gain, filter_type,
715
  filter_freq, filter_depth):
716
  if stem_data is None:
717
  return [], "No stem data available"
718
-
719
  try:
720
  files, img_path = slice_stem_real(
721
  stem_data, loop_choice, sensitivity, stem_name,
@@ -723,7 +733,7 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="red"))
723
  pan_depth, level_depth, modulation_rate, target_dbfs,
724
  attack_gain, sustain_gain, filter_type, filter_freq, filter_depth
725
  )
726
-
727
  # Return only WAV files for gallery display
728
  wav_files = [f[0] for f in files if f[1] == "WAV"]
729
  return wav_files + [img_path], f"Generated {len(wav_files)} slices for {stem_name}"
@@ -738,7 +748,7 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="red"))
738
  pan_depth_slider, level_depth_slider, modulation_rate_radio, lufs_target_slider,
739
  attack_gain_slider, sustain_gain_slider, filter_type_radio, filter_freq_slider, filter_depth_slider
740
  ],
741
- outputs=[loop_gallery, gr.Textbox(label="Status")]
742
  )
743
 
744
  slice_drums_btn.click(
@@ -749,7 +759,7 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="red"))
749
  pan_depth_slider, level_depth_slider, modulation_rate_radio, lufs_target_slider,
750
  attack_gain_slider, sustain_gain_slider, filter_type_radio, filter_freq_slider, filter_depth_slider
751
  ],
752
- outputs=[loop_gallery, gr.Textbox(label="Status")]
753
  )
754
 
755
  slice_bass_btn.click(
@@ -760,7 +770,7 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="red"))
760
  pan_depth_slider, level_depth_slider, modulation_rate_radio, lufs_target_slider,
761
  attack_gain_slider, sustain_gain_slider, filter_type_radio, filter_freq_slider, filter_depth_slider
762
  ],
763
- outputs=[loop_gallery, gr.Textbox(label="Status")]
764
  )
765
 
766
  slice_other_btn.click(
@@ -771,7 +781,7 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="red"))
771
  pan_depth_slider, level_depth_slider, modulation_rate_radio, lufs_target_slider,
772
  attack_gain_slider, sustain_gain_slider, filter_type_radio, filter_freq_slider, filter_depth_slider
773
  ],
774
- outputs=[loop_gallery, gr.Textbox(label="Status")]
775
  )
776
 
777
  slice_guitar_btn.click(
@@ -782,7 +792,7 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="red"))
782
  pan_depth_slider, level_depth_slider, modulation_rate_radio, lufs_target_slider,
783
  attack_gain_slider, sustain_gain_slider, filter_type_radio, filter_freq_slider, filter_depth_slider
784
  ],
785
- outputs=[loop_gallery, gr.Textbox(label="Status")]
786
  )
787
 
788
  slice_piano_btn.click(
@@ -793,9 +803,10 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="red"))
793
  pan_depth_slider, level_depth_slider, modulation_rate_radio, lufs_target_slider,
794
  attack_gain_slider, sustain_gain_slider, filter_type_radio, filter_freq_slider, filter_depth_slider
795
  ],
796
- outputs=[loop_gallery, gr.Textbox(label="Status")]
797
  )
798
 
 
799
  # Slice all stems and create ZIP
800
  slice_all_btn.click(
801
  fn=slice_all_and_zip,
@@ -816,4 +827,4 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="red"))
816
 
817
  # Launch the app
818
  if __name__ == "__main__":
819
- demo.launch()
 
11
  import matplotlib.pyplot as plt
12
  from scipy import signal
13
  from typing import Tuple, List, Any
14
+ import shutil # Import shutil for directory cleanup
15
 
16
  # Use a non-interactive backend for Matplotlib
17
  matplotlib.use('Agg')
 
62
 
63
  # Build MIDI file
64
  header = b'MThd' + (6).to_bytes(4, 'big') + (1).to_bytes(2, 'big') + (1).to_bytes(2, 'big') + division.to_bytes(2, 'big')
65
+
66
  track_data = b''
67
  for delta, event in midi_events:
68
  # Encode delta time
 
77
  track_data += bytes([delta_bytes[i] | 0x80])
78
  else:
79
  track_data += bytes([delta_bytes[i]])
80
+
81
  # Add event
82
  track_data += bytes(event)
83
+
84
  # End of track
85
  track_data += b'\x00\xFF\x2F\x00'
86
+
87
  track_chunk = b'MTrk' + len(track_data).to_bytes(4, 'big') + track_data
88
  midi_data = header + track_chunk
89
 
 
101
  "G Min": "6A", "D Min": "7A",
102
  "Gb Maj": "2B", "Cb Maj": "7B", "A# Min": "3A", "D# Maj": "11B", "G# Maj": "3B"
103
  }
104
+
105
  code = KEY_TO_CAMELOT.get(key_str, "N/A")
106
  if code == "N/A":
107
  return "N/A (Key not recognized or 'Unknown Key' detected.)"
108
+
109
  try:
110
  num = int(code[:-1])
111
  mode = code[-1]
 
266
  temp_dir = tempfile.mkdtemp()
267
  stems = {}
268
  stem_names = ["vocals", "drums", "bass", "other", "guitar", "piano"]
269
+
270
  for name in stem_names:
271
  stem_path = os.path.join(temp_dir, f"{name}.wav")
272
  # Create mock audio (just a portion of the original)
 
274
  stems[name] = stem_path
275
 
276
  return (
277
+ stems["vocals"], stems["drums"], stems["bass"], stems["other"],
278
  stems["guitar"], stems["piano"], float(detected_bpm), detected_key
279
  )
280
  except Exception as e:
 
283
  def generate_waveform_preview(y: np.ndarray, sr: int, stem_name: str, temp_dir: str) -> str:
284
  """Generates a Matplotlib image showing the waveform."""
285
  img_path = os.path.join(temp_dir, f"{stem_name}_preview.png")
286
+
287
  plt.figure(figsize=(10, 3))
288
  y_display = librosa.to_mono(y.T) if y.ndim > 1 else y
289
  librosa.display.waveshow(y_display, sr=sr, x_axis='time', color="#4a7098")
 
291
  plt.tight_layout()
292
  plt.savefig(img_path)
293
  plt.close()
294
+
295
  return img_path
296
 
297
  def slice_stem_real(
298
+ stem_audio_path: str,
299
+ loop_choice: str,
300
+ sensitivity: float,
301
  stem_name: str,
302
+ manual_bpm: float,
303
+ time_signature: str,
304
+ crossfade_ms: int,
305
+ transpose_semitones: int,
306
  detected_key: str,
307
+ pan_depth: float,
308
+ level_depth: float,
309
+ modulation_rate: str,
310
  target_dbfs: float,
311
+ attack_gain: float,
312
+ sustain_gain: float,
313
+ filter_type: str,
314
+ filter_freq: float,
315
  filter_depth: float
316
  ) -> Tuple[List[Tuple[str, str]], str]:
317
  """Slices a single stem and applies transformations."""
 
320
 
321
  try:
322
  # Load audio
323
+ # Assuming stem_audio_path is a tuple (sample_rate, audio_array) from Gradio
324
+ if isinstance(stem_audio_path, tuple) and len(stem_audio_path) == 2:
325
+ sample_rate, y_int = stem_audio_path
326
+ y = librosa.util.buf_to_float(y_int, dtype=np.float32)
327
+ else:
328
+ # Handle case where it's a filepath (from separate_stems)
329
+ y, sample_rate = librosa.load(stem_audio_path, sr=None)
330
+
331
+
332
  if y.ndim == 0:
333
  return [], ""
334
 
 
435
  # Simple slicing at regular intervals for demo
436
  slice_length = int(sample_rate * 0.5) # 0.5 second slices
437
  num_slices = len(y) // slice_length
438
+
439
  for i in range(min(num_slices, 20)): # Limit to 20 slices
440
  start_sample = i * slice_length
441
  end_sample = min(start_sample + slice_length, len(y))
 
447
 
448
  # --- 8. VISUALIZATION GENERATION ---
449
  img_path = generate_waveform_preview(y, sample_rate, stem_name, loops_dir)
450
+
451
  return output_files, img_path
452
 
453
  except Exception as e:
454
  raise gr.Error(f"Error processing stem: {str(e)}")
455
 
456
  def slice_all_and_zip(
457
+ vocals: Tuple[int, np.ndarray],
458
+ drums: Tuple[int, np.ndarray],
459
+ bass: Tuple[int, np.ndarray],
460
+ other: Tuple[int, np.ndarray],
461
+ guitar: Tuple[int, np.ndarray],
462
+ piano: Tuple[int, np.ndarray],
463
+ loop_choice: str,
464
+ sensitivity: float,
465
+ manual_bpm: float,
466
+ time_signature: str,
467
+ crossfade_ms: int,
468
+ transpose_semitones: int,
469
  detected_key: str,
470
+ pan_depth: float,
471
+ level_depth: float,
472
+ modulation_rate: str,
473
  target_dbfs: float,
474
+ attack_gain: float,
475
+ sustain_gain: float,
476
+ filter_type: str,
477
+ filter_freq: float,
478
  filter_depth: float
479
  ) -> str:
480
  """Slices all available stems and packages them into a ZIP file."""
 
483
  "vocals": vocals, "drums": drums, "bass": bass,
484
  "other": other, "guitar": guitar, "piano": piano
485
  }
486
+
487
  # Filter out None stems
488
  valid_stems = {name: data for name, data in stems_to_process.items() if data is not None}
489
+
490
  if not valid_stems:
491
  raise gr.Error("No stems to process! Please separate stems first.")
492
 
 
497
  with zipfile.ZipFile(zip_path, 'w') as zf:
498
  for name, data in valid_stems.items():
499
  # Create temporary file for this stem
500
+ # No need to save to a temp file here, can pass the tuple directly
501
+ # stem_temp_dir = tempfile.mkdtemp()
502
+ # stem_path = os.path.join(stem_temp_dir, f"{name}.wav")
503
+ # sf.write(stem_path, data[1], data[0])
504
+
505
  # Process stem
506
  sliced_files, _ = slice_stem_real(
507
+ data, loop_choice, sensitivity, name,
508
  manual_bpm, time_signature, crossfade_ms, transpose_semitones, detected_key,
509
  pan_depth, level_depth, modulation_rate, target_dbfs,
510
  attack_gain, sustain_gain, filter_type, filter_freq, filter_depth
511
  )
512
+
513
  # Add files to ZIP
514
  for file_path, file_type in sliced_files:
515
  arcname = os.path.join(file_type, os.path.basename(file_path))
516
  zf.write(file_path, arcname)
517
+
518
  # Clean up stem temp files
519
+ # shutil.rmtree(stem_temp_dir) # No temp dir created here anymore
520
+
521
+ # Note: The main temp_dir containing the zip file will be cleaned up by Gradio
522
 
523
  return zip_path
524
 
 
541
  gr.Markdown("### 1. Separate Stems")
542
  audio_input = gr.Audio(type="filepath", label="Upload a Track")
543
  separate_btn = gr.Button("Separate & Analyze Stems", variant="primary")
544
+
545
  # Outputs for separated stems
546
  vocals_output = gr.Audio(label="Vocals", visible=False)
547
  drums_output = gr.Audio(label="Drums", visible=False)
 
549
  other_output = gr.Audio(label="Other / Instrumental", visible=False)
550
  guitar_output = gr.Audio(label="Guitar", visible=False)
551
  piano_output = gr.Audio(label="Piano", visible=False)
552
+
553
  # Analysis results
554
  with gr.Group():
555
  gr.Markdown("### 2. Analysis & Transform")
556
  detected_bpm_key = gr.Textbox(label="Detected Tempo & Key", value="", interactive=False)
557
  harmonic_recs = gr.Textbox(label="Harmonic Mixing Recommendations", value="", interactive=False)
558
+
559
  transpose_slider = gr.Slider(
560
  minimum=-12, maximum=12, value=0, step=1,
561
  label="Transpose Loops (Semitones)",
 
664
  gr.Markdown("### Separated Stems")
665
  with gr.Row():
666
  with gr.Column():
667
+ # vocals_output.render() # Removed redundant render call
668
  slice_vocals_btn = gr.Button("Slice Vocals")
669
  with gr.Column():
670
+ # drums_output.render() # Removed redundant render call
671
  slice_drums_btn = gr.Button("Slice Drums")
672
  with gr.Row():
673
  with gr.Column():
674
+ # bass_output.render() # Removed redundant render call
675
  slice_bass_btn = gr.Button("Slice Bass")
676
  with gr.Column():
677
+ # other_output.render() # Removed redundant render call
678
  slice_other_btn = gr.Button("Slice Other")
679
  with gr.Row():
680
  with gr.Column():
681
+ # guitar_output.render() # Removed redundant render call
682
  slice_guitar_btn = gr.Button("Slice Guitar")
683
  with gr.Column():
684
+ # piano_output.render() # Removed redundant render call
685
  slice_piano_btn = gr.Button("Slice Piano")
686
+
687
  # Gallery for previews
688
  gr.Markdown("### Sliced Loops Preview")
689
  loop_gallery = gr.Gallery(
690
  label="Generated Loops",
691
+ columns=4,
692
+ object_fit="contain",
693
  height="auto"
694
  )
695
 
 
719
  )
720
 
721
  # Individual stem slicing
722
+ def slice_and_display(stem_data, loop_choice, sensitivity, stem_name, manual_bpm, time_signature,
723
+ crossfade_ms, transpose_semitones, detected_key, pan_depth, level_depth,
724
+ modulation_rate, target_dbfs, attack_gain, sustain_gain, filter_type,
725
  filter_freq, filter_depth):
726
  if stem_data is None:
727
  return [], "No stem data available"
728
+
729
  try:
730
  files, img_path = slice_stem_real(
731
  stem_data, loop_choice, sensitivity, stem_name,
 
733
  pan_depth, level_depth, modulation_rate, target_dbfs,
734
  attack_gain, sustain_gain, filter_type, filter_freq, filter_depth
735
  )
736
+
737
  # Return only WAV files for gallery display
738
  wav_files = [f[0] for f in files if f[1] == "WAV"]
739
  return wav_files + [img_path], f"Generated {len(wav_files)} slices for {stem_name}"
 
748
  pan_depth_slider, level_depth_slider, modulation_rate_radio, lufs_target_slider,
749
  attack_gain_slider, sustain_gain_slider, filter_type_radio, filter_freq_slider, filter_depth_slider
750
  ],
751
+ outputs=[loop_gallery, gr.Textbox(label="Status", visible=True)] # Added status textbox here
752
  )
753
 
754
  slice_drums_btn.click(
 
759
  pan_depth_slider, level_depth_slider, modulation_rate_radio, lufs_target_slider,
760
  attack_gain_slider, sustain_gain_slider, filter_type_radio, filter_freq_slider, filter_depth_slider
761
  ],
762
+ outputs=[loop_gallery, gr.Textbox(label="Status", visible=True)] # Added status textbox here
763
  )
764
 
765
  slice_bass_btn.click(
 
770
  pan_depth_slider, level_depth_slider, modulation_rate_radio, lufs_target_slider,
771
  attack_gain_slider, sustain_gain_slider, filter_type_radio, filter_freq_slider, filter_depth_slider
772
  ],
773
+ outputs=[loop_gallery, gr.Textbox(label="Status", visible=True)] # Added status textbox here
774
  )
775
 
776
  slice_other_btn.click(
 
781
  pan_depth_slider, level_depth_slider, modulation_rate_radio, lufs_target_slider,
782
  attack_gain_slider, sustain_gain_slider, filter_type_radio, filter_freq_slider, filter_depth_slider
783
  ],
784
+ outputs=[loop_gallery, gr.Textbox(label="Status", visible=True)] # Added status textbox here
785
  )
786
 
787
  slice_guitar_btn.click(
 
792
  pan_depth_slider, level_depth_slider, modulation_rate_radio, lufs_target_slider,
793
  attack_gain_slider, sustain_gain_slider, filter_type_radio, filter_freq_slider, filter_depth_slider
794
  ],
795
+ outputs=[loop_gallery, gr.Textbox(label="Status", visible=True)] # Added status textbox here
796
  )
797
 
798
  slice_piano_btn.click(
 
803
  pan_depth_slider, level_depth_slider, modulation_rate_radio, lufs_target_slider,
804
  attack_gain_slider, sustain_gain_slider, filter_type_radio, filter_freq_slider, filter_depth_slider
805
  ],
806
+ outputs=[loop_gallery, gr.Textbox(label="Status", visible=True)] # Added status textbox here
807
  )
808
 
809
+
810
  # Slice all stems and create ZIP
811
  slice_all_btn.click(
812
  fn=slice_all_and_zip,
 
827
 
828
  # Launch the app
829
  if __name__ == "__main__":
830
+ demo.launch()