Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import requests | |
| import soundfile as sf | |
| import io | |
| import torch | |
| import torchaudio | |
| API_URL = "https://hf.space/embed/akhaliq/denoise-audio/+/api/predict" | |
| def enhance_vo(file, denoise_strength): | |
| # Step 1: Read audio and send to remote model | |
| with open(file, "rb") as f: | |
| response = requests.post(API_URL, files={"data": f}) | |
| if response.status_code != 200: | |
| raise Exception("Denoising model failed to process audio.") | |
| # Step 2: Get denoised audio back | |
| response_data = response.json() | |
| url = response_data["data"][0]["url"] | |
| audio_response = requests.get(url) | |
| denoised_bytes = io.BytesIO(audio_response.content) | |
| # Step 3: Load both original and denoised audio | |
| orig_waveform, sr = torchaudio.load(file) | |
| denoised_waveform, _ = torchaudio.load(denoised_bytes) | |
| # Step 4: Blend based on slider value | |
| blend_ratio = denoise_strength / 100.0 | |
| output = (1 - blend_ratio) * orig_waveform + blend_ratio * denoised_waveform | |
| output = output / output.abs().max() | |
| # Step 5: Save to file | |
| output_path = "enhanced_output.wav" | |
| torchaudio.save(output_path, output, sr) | |
| return output_path | |
| # Gradio app UI | |
| interface = gr.Interface( | |
| fn=enhance_vo, | |
| inputs=[ | |
| gr.Audio(type="filepath", label="Upload MP3 or WAV"), | |
| gr.Slider(0, 100, value=100, label="Noise Reduction Strength (%)") | |
| ], | |
| outputs=gr.Audio(type="filepath", label="Enhanced Audio (WAV)"), | |
| title="Adobe-style VO Enhancer (Online Model)", | |
| description="Upload VO audio (MP3/WAV), adjust slider, and download cleaned WAV file." | |
| ) | |
| interface.launch() | |