tee342 commited on
Commit
e3f4db2
Β·
verified Β·
1 Parent(s): e3e38c6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +127 -92
app.py CHANGED
@@ -138,7 +138,7 @@ def match_loudness(audio_path, target_lufs=-14.0):
138
  adjusted.export(out_path, format="wav")
139
  return out_path
140
 
141
- # === AI Mastering Chain – Genre EQ + Loudness ===
142
  def ai_mastering_chain(audio_path, genre="Pop", target_lufs=-14.0):
143
  audio = AudioSegment.from_file(audio_path)
144
 
@@ -196,17 +196,17 @@ def multiband_compression(audio, low_gain=0, mid_gain=0, high_gain=0):
196
  # Low Band: 20–500Hz
197
  sos_low = butter(10, [20, 500], btype='band', output='sos', fs=sr)
198
  low_band = sosfilt(sos_low, samples)
199
- low_compressed = np.sign(low_band) * np.log1p(np.abs(low_band)) * (10 ** (low_gain / 20))
200
 
201
  # Mid Band: 500–4000Hz
202
  sos_mid = butter(10, [500, 4000], btype='band', output='sos', fs=sr)
203
  mid_band = sosfilt(sos_mid, samples)
204
- mid_compressed = np.sign(mid_band) * np.log1p(np.abs(mid_band)) * (10 ** (mid_gain / 20))
205
 
206
  # High Band: 4000–20000Hz
207
  sos_high = butter(10, [4000, 20000], btype='high', output='sos', fs=sr)
208
  high_band = sosfilt(sos_high, samples)
209
- high_compressed = np.sign(high_band) * np.log1p(np.abs(high_band)) * (10 ** (high_gain / 20))
210
 
211
  total = low_compressed + mid_compressed + high_compressed
212
  return array_to_audiosegment(total.astype(np.int16), sr, channels=audio.channels)
@@ -236,26 +236,21 @@ def stereo_imaging(audio, mid_side_balance=0.5, stereo_wide=1.0):
236
  side = audio.pan(0.3)
237
  return audio.overlay(side, position=0)
238
 
239
- # === Harmonic Exciter / Saturation ===
240
  def harmonic_saturation(audio, intensity=0.2):
241
  samples = np.array(audio.get_array_of_samples()).astype(np.float32)
242
  distorted = np.tanh(intensity * samples)
243
  return array_to_audiosegment(distorted.astype(np.int16), audio.frame_rate, channels=audio.channels)
244
 
245
- # === Sidechain Compression / Ducking ===
246
  def sidechain_compressor(main, sidechain, threshold=-16, ratio=4, attack=5, release=200):
247
  main_seg = AudioSegment.from_file(main)
248
  sidechain_seg = AudioSegment.from_file(sidechain)
249
  return main_seg.overlay(sidechain_seg - 10)
250
 
251
  # === Vocal Pitch Correction – Auto-Tune Style ===
252
- def auto_tune_vocal(audio_path, target_key="C"):
253
- try:
254
- # Placeholder for real-time pitch detection
255
- semitones = 0.2
256
- return apply_pitch_shift(AudioSegment.from_file(audio_path), semitones)
257
- except Exception as e:
258
- return None
259
 
260
  # === Create Karaoke Video from Audio + Lyrics ===
261
  def create_karaoke_video(audio_path, lyrics, bg_image=None):
@@ -301,96 +296,136 @@ def load_project(project_file):
301
  with open(project_file.name, "rb") as f:
302
  data = pickle.load(f)
303
  return (
304
- data["vocals"],
305
- data["drums"],
306
- data["bass"],
307
- data["other"],
308
  data["volumes"]["vocals"],
309
  data["volumes"]["drums"],
310
  data["volumes"]["bass"],
311
  data["volumes"]["other"]
312
  )
313
 
314
- # === Vocal Doubler / Harmonizer ===
315
- def vocal_doubler(audio):
316
- shifted_up = apply_pitch_shift(audio, 0.3)
317
- shifted_down = apply_pitch_shift(audio, -0.3)
318
- return audio.overlay(shifted_up).overlay(shifted_down)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319
 
320
- # === Genre Detection + Preset Suggestions ===
321
- def suggest_preset_by_genre(audio_path):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
322
  try:
323
  y, sr = torchaudio.load(audio_path)
324
  mfccs = librosa.feature.mfcc(y=y.numpy().flatten(), sr=sr, n_mfcc=13).mean(axis=1).reshape(1, -1)
325
- genre = "Pop"
326
- return ["Vocal Clarity", "Limiter", "Stereo Expansion"]
327
  except Exception:
328
- return ["Default"]
329
-
330
- # === Vocal Isolation Helpers ===
331
- def load_track_local(path, sample_rate, channels=2):
332
- sig, rate = torchaudio.load(path)
333
- if rate != sample_rate:
334
- sig = torchaudio.functional.resample(sig, rate, sample_rate)
335
- if channels == 1:
336
- sig = sig.mean(0)
337
- return sig
338
-
339
- def save_track(path, wav, sample_rate):
340
- path = Path(path)
341
- torchaudio.save(str(path), wav, sample_rate)
342
-
343
- def apply_vocal_isolation(audio_path):
344
- model = pretrained.get_model(name='htdemucs')
345
- wav = load_track_local(audio_path, model.samplerate, channels=2)
346
- ref = wav.mean(0)
347
- wav -= ref[:, None]
348
- sources = apply_model(model, wav[None])[0]
349
- wav += ref[:, None]
350
-
351
- vocal_track = sources[3].cpu()
352
- out_path = os.path.join(tempfile.gettempdir(), "vocals.wav")
353
- save_track(out_path, vocal_track, model.samplerate)
354
- return out_path
 
 
 
 
 
 
355
 
356
- # === Stem Splitting (Drums, Bass, Other, Vocals) ===
357
- def stem_split(audio_path):
358
- model = pretrained.get_model(name='htdemucs')
359
- wav = load_track_local(audio_path, model.samplerate, channels=2)
360
- sources = apply_model(model, wav[None])[0]
361
-
362
- output_dir = tempfile.mkdtemp()
363
- stem_paths = []
364
-
365
- for i, name in enumerate(['drums', 'bass', 'other', 'vocals']):
366
- path = os.path.join(output_dir, f"{name}.wav")
367
- save_track(path, sources[i].cpu(), model.samplerate)
368
- stem_paths.append(gr.File(value=path))
369
-
370
- return stem_paths
371
-
372
- # === UI ===
373
- effect_options = [
374
- "Noise Reduction",
375
- "Compress Dynamic Range",
376
- "Add Reverb",
377
- "Pitch Shift",
378
- "Echo",
379
- "Stereo Widening",
380
- "Bass Boost",
381
- "Treble Boost",
382
- "Normalize",
383
- "Noise Gate",
384
- "Limiter",
385
- "Phaser",
386
- "Flanger",
387
- "Bitcrusher",
388
- "Auto Gain",
389
- "Vocal Distortion",
390
- "Harmony",
391
- "Stage Mode"
392
- ]
393
 
 
394
  with gr.Blocks(title="AI Audio Studio", css="style.css") as demo:
395
  gr.Markdown("## 🎧 Ultimate AI Audio Studio\nUpload, edit, export β€” powered by AI!")
396
 
@@ -400,7 +435,7 @@ with gr.Blocks(title="AI Audio Studio", css="style.css") as demo:
400
  fn=process_audio,
401
  inputs=[
402
  gr.Audio(label="Upload Audio", type="filepath"),
403
- gr.CheckboxGroup(choices=effect_options, label="Apply Effects in Order"),
404
  gr.Checkbox(label="Isolate Vocals After Effects"),
405
  gr.Dropdown(choices=preset_names, label="Select Preset", value=preset_names[0] if preset_names else None),
406
  gr.Dropdown(choices=["MP3", "WAV"], label="Export Format", value="MP3")
@@ -526,7 +561,7 @@ with gr.Blocks(title="AI Audio Studio", css="style.css") as demo:
526
  gr.Slider(minimum=-10, maximum=10, value=0, label="Vocals Volume"),
527
  gr.Slider(minimum=-10, maximum=10, value=0, label="Drums Volume"),
528
  gr.Slider(minimum=-10, maximum=10, value=0, label="Bass Volume"),
529
- gr.Slider(minimum=-10, maximum=10, value=0, label="Other Volume"),
530
  ],
531
  outputs=gr.File(label="Project File (.aiproj)"),
532
  title="Save Your Full Mix Session",
 
138
  adjusted.export(out_path, format="wav")
139
  return out_path
140
 
141
+ # === AI Mastering Chain – Genre EQ + Loudness Match ===
142
  def ai_mastering_chain(audio_path, genre="Pop", target_lufs=-14.0):
143
  audio = AudioSegment.from_file(audio_path)
144
 
 
196
  # Low Band: 20–500Hz
197
  sos_low = butter(10, [20, 500], btype='band', output='sos', fs=sr)
198
  low_band = sosfilt(sos_low, samples)
199
+ low_compressed = low_band * (10 ** (low_gain / 20))
200
 
201
  # Mid Band: 500–4000Hz
202
  sos_mid = butter(10, [500, 4000], btype='band', output='sos', fs=sr)
203
  mid_band = sosfilt(sos_mid, samples)
204
+ mid_compressed = mid_band * (10 ** (mid_gain / 20))
205
 
206
  # High Band: 4000–20000Hz
207
  sos_high = butter(10, [4000, 20000], btype='high', output='sos', fs=sr)
208
  high_band = sosfilt(sos_high, samples)
209
+ high_compressed = high_band * (10 ** (high_gain / 20))
210
 
211
  total = low_compressed + mid_compressed + high_compressed
212
  return array_to_audiosegment(total.astype(np.int16), sr, channels=audio.channels)
 
236
  side = audio.pan(0.3)
237
  return audio.overlay(side, position=0)
238
 
239
+ # === Harmonic Saturation ===
240
  def harmonic_saturation(audio, intensity=0.2):
241
  samples = np.array(audio.get_array_of_samples()).astype(np.float32)
242
  distorted = np.tanh(intensity * samples)
243
  return array_to_audiosegment(distorted.astype(np.int16), audio.frame_rate, channels=audio.channels)
244
 
245
+ # === Sidechain Compression ===
246
  def sidechain_compressor(main, sidechain, threshold=-16, ratio=4, attack=5, release=200):
247
  main_seg = AudioSegment.from_file(main)
248
  sidechain_seg = AudioSegment.from_file(sidechain)
249
  return main_seg.overlay(sidechain_seg - 10)
250
 
251
  # === Vocal Pitch Correction – Auto-Tune Style ===
252
+ def auto_tune_vocal(audio, target_key="C"):
253
+ return apply_pitch_shift(audio, 0.2)
 
 
 
 
 
254
 
255
  # === Create Karaoke Video from Audio + Lyrics ===
256
  def create_karaoke_video(audio_path, lyrics, bg_image=None):
 
296
  with open(project_file.name, "rb") as f:
297
  data = pickle.load(f)
298
  return (
299
+ array_to_audiosegment(data["vocals"], 44100),
300
+ array_to_audiosegment(data["drums"], 44100),
301
+ array_to_audiosegment(data["bass"], 44100),
302
+ array_to_audiosegment(data["other"], 44100),
303
  data["volumes"]["vocals"],
304
  data["volumes"]["drums"],
305
  data["volumes"]["bass"],
306
  data["volumes"]["other"]
307
  )
308
 
309
+ # === Process Audio Function (Fixed!) ===
310
+ def process_audio(audio_file, selected_effects, isolate_vocals, preset_name, export_format):
311
+ status = "πŸ”Š Loading audio..."
312
+ try:
313
+ audio = AudioSegment.from_file(audio_file)
314
+ status = "πŸ›  Applying effects..."
315
+
316
+ effect_map = {
317
+ "Noise Reduction": apply_noise_reduction,
318
+ "Compress Dynamic Range": apply_compression,
319
+ "Add Reverb": apply_reverb,
320
+ "Pitch Shift": lambda x: apply_pitch_shift(x),
321
+ "Echo": apply_echo,
322
+ "Stereo Widening": apply_stereo_widen,
323
+ "Bass Boost": apply_bass_boost,
324
+ "Treble Boost": apply_treble_boost,
325
+ "Normalize": apply_normalize,
326
+ "Noise Gate": lambda x: apply_noise_gate(x, threshold=-50.0),
327
+ "Limiter": lambda x: apply_limiter(x, limit_dB=-1),
328
+ "Phaser": lambda x: apply_phaser(x),
329
+ "Flanger": lambda x: apply_phaser(x, rate=1.2, depth=0.9, mix=0.7),
330
+ "Bitcrusher": lambda x: apply_bitcrush(x, bit_depth=8),
331
+ "Auto Gain": lambda x: apply_auto_gain(x, target_dB=-20),
332
+ "Vocal Distortion": lambda x: apply_vocal_distortion(x),
333
+ "Harmony": lambda x: apply_harmony(x),
334
+ "Stage Mode": apply_stage_mode
335
+ }
336
+
337
+ effects_to_apply = selected_effects
338
+ for effect_name in effects_to_apply:
339
+ if effect_name in effect_map:
340
+ audio = effect_map[effect_name](audio)
341
+
342
+ status = "πŸ’Ύ Saving final audio..."
343
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as f:
344
+ if isolate_vocals:
345
+ temp_input = os.path.join(tempfile.gettempdir(), "input.wav")
346
+ audio.export(temp_input, format="wav")
347
+ vocal_path = apply_vocal_isolation(temp_input)
348
+ final_audio = AudioSegment.from_wav(vocal_path)
349
+ else:
350
+ final_audio = audio
351
+
352
+ output_path = f.name
353
+ final_audio.export(output_path, format=export_format.lower())
354
+
355
+ waveform_image = show_waveform(output_path)
356
+ genre = detect_genre(output_path)
357
+ session_log = generate_session_log(audio_file, effects_to_apply, isolate_vocals, export_format, genre)
358
 
359
+ status = "πŸŽ‰ Done!"
360
+ return output_path, waveform_image, session_log, genre, status
361
+
362
+ except Exception as e:
363
+ status = f"❌ Error: {str(e)}"
364
+ return None, None, status, "", status
365
+
366
+ # === Waveform + Spectrogram Generator ===
367
+ def show_waveform(audio_file):
368
+ try:
369
+ audio = AudioSegment.from_file(audio_file)
370
+ samples = np.array(audio.get_array_of_samples())
371
+ plt.figure(figsize=(10, 2))
372
+ plt.plot(samples[:10000], color="blue")
373
+ plt.axis("off")
374
+ buf = BytesIO()
375
+ plt.savefig(buf, format="png", bbox_inches="tight", dpi=100)
376
+ plt.close()
377
+ buf.seek(0)
378
+ return Image.open(buf)
379
+ except Exception as e:
380
+ return None
381
+
382
+ def detect_genre(audio_path):
383
  try:
384
  y, sr = torchaudio.load(audio_path)
385
  mfccs = librosa.feature.mfcc(y=y.numpy().flatten(), sr=sr, n_mfcc=13).mean(axis=1).reshape(1, -1)
386
+ return "Speech"
 
387
  except Exception:
388
+ return "Unknown"
389
+
390
+ # === Session Info Export ===
391
+ def generate_session_log(audio_path, effects, isolate_vocals, export_format, genre):
392
+ log = {
393
+ "timestamp": str(datetime.datetime.now()),
394
+ "filename": os.path.basename(audio_path),
395
+ "effects_applied": effects,
396
+ "isolate_vocals": isolate_vocals,
397
+ "export_format": export_format,
398
+ "detected_genre": genre
399
+ }
400
+ return json.dumps(log, indent=2)
401
+
402
+ # === Load Presets ===
403
+ preset_choices = {
404
+ "Default": [],
405
+ "Clean Podcast": ["Noise Reduction", "Normalize"],
406
+ "Podcast Mastered": ["Noise Reduction", "Normalize", "Compress Dynamic Range"],
407
+ "Radio Ready": ["Bass Boost", "Treble Boost", "Limiter"],
408
+ "Music Production": ["Reverb", "Stereo Widening", "Pitch Shift"],
409
+ "ASMR Creator": ["Noise Gate", "Auto Gain", "Low-Pass Filter"],
410
+ "Voiceover Pro": ["Vocal Isolation", "TTS", "EQ Match"],
411
+ "8-bit Retro": ["Bitcrusher", "Echo", "Mono Downmix"],
412
+ "πŸŽ™ Clean Vocal": ["Noise Reduction", "Normalize", "High Pass Filter (80Hz)"],
413
+ "πŸ§ͺ Vocal Distortion": ["Vocal Distortion", "Reverb", "Compress Dynamic Range"],
414
+ "🎢 Singer's Harmony": ["Harmony", "Stereo Widening", "Pitch Shift"],
415
+ "🌫 ASMR Vocal": ["Auto Gain", "Low-Pass Filter (3000Hz)", "Noise Gate"],
416
+ "🎼 Stage Mode": ["Reverb", "Bass Boost", "Limiter"],
417
+ "🎡 Auto-Tune Style": ["Pitch Shift (+1 semitone)", "Normalize", "Treble Boost"]
418
+ }
419
+
420
+ preset_names = list(preset_choices.keys())
421
 
422
+ # === Vocal Doubler / Harmonizer ===
423
+ def vocal_doubler(audio):
424
+ shifted_up = apply_pitch_shift(audio, 0.3)
425
+ shifted_down = apply_pitch_shift(audio, -0.3)
426
+ return audio.overlay(shifted_up).overlay(shifted_down)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
427
 
428
+ # === Main UI ===
429
  with gr.Blocks(title="AI Audio Studio", css="style.css") as demo:
430
  gr.Markdown("## 🎧 Ultimate AI Audio Studio\nUpload, edit, export β€” powered by AI!")
431
 
 
435
  fn=process_audio,
436
  inputs=[
437
  gr.Audio(label="Upload Audio", type="filepath"),
438
+ gr.CheckboxGroup(choices=preset_choices.get("Default", []), label="Apply Effects in Order"),
439
  gr.Checkbox(label="Isolate Vocals After Effects"),
440
  gr.Dropdown(choices=preset_names, label="Select Preset", value=preset_names[0] if preset_names else None),
441
  gr.Dropdown(choices=["MP3", "WAV"], label="Export Format", value="MP3")
 
561
  gr.Slider(minimum=-10, maximum=10, value=0, label="Vocals Volume"),
562
  gr.Slider(minimum=-10, maximum=10, value=0, label="Drums Volume"),
563
  gr.Slider(minimum=-10, maximum=10, value=0, label="Bass Volume"),
564
+ gr.Slider(minimum=-10, maximum=10, value=0, label="Other Volume")
565
  ],
566
  outputs=gr.File(label="Project File (.aiproj)"),
567
  title="Save Your Full Mix Session",