Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import torch | |
| from diffusers import AudioLDM2Pipeline | |
| import ast | |
| import copy | |
| import csv | |
| import inspect | |
| import os | |
| import shutil | |
| import subprocess | |
| import tempfile | |
| import warnings | |
| from functools import partial | |
| from pathlib import Path | |
| from typing import TYPE_CHECKING, Any, Callable, Iterable, Literal, Optional, Sequence | |
| import numpy as np | |
| import PIL | |
| import PIL.Image | |
| from gradio_client import utils as client_utils | |
| from gradio_client.documentation import document | |
| from gradio import components, oauth, processing_utils, routes, utils, wasm_utils | |
| from gradio.context import Context, LocalContext, get_blocks_context | |
| from gradio.data_classes import GradioModel, GradioRootModel | |
| from gradio.events import Dependency, EventData | |
| from gradio.exceptions import Error | |
| from gradio.flagging import CSVLogger | |
| from gradio.utils import UnhashableKeyDict | |
| # make Space compatible with CPU duplicates | |
| if torch.cuda.is_available(): | |
| device = "cuda" | |
| torch_dtype = torch.float16 | |
| else: | |
| device = "cpu" | |
| torch_dtype = torch.float32 | |
| # load the diffusers pipeline | |
| repo_id = "cvssp/audioldm2" | |
| pipe = AudioLDM2Pipeline.from_pretrained(repo_id, torch_dtype=torch_dtype).to(device) | |
| # pipe.unet = torch.compile(pipe.unet) | |
| # set the generator for reproducibility | |
| generator = torch.Generator(device) | |
| def make_waveform( | |
| audio: str | tuple[int, np.ndarray], | |
| *, | |
| bg_color: str = "#f3f4f6", | |
| bg_image: str | None = None, | |
| fg_alpha: float = 0.75, | |
| bars_color: str | tuple[str, str] = ("#fbbf24", "#ea580c"), | |
| bar_count: int = 50, | |
| bar_width: float = 0.6, | |
| animate: bool = False, | |
| ) -> str: | |
| """ | |
| Generates a waveform video from an audio file. Useful for creating an easy to share audio visualization. The output should be passed into a `gr.Video` component. | |
| Parameters: | |
| audio: Audio file path or tuple of (sample_rate, audio_data) | |
| bg_color: Background color of waveform (ignored if bg_image is provided) | |
| bg_image: Background image of waveform | |
| fg_alpha: Opacity of foreground waveform | |
| bars_color: Color of waveform bars. Can be a single color or a tuple of (start_color, end_color) of gradient | |
| bar_count: Number of bars in waveform | |
| bar_width: Width of bars in waveform. 1 represents full width, 0.5 represents half width, etc. | |
| animate: If true, the audio waveform overlay will be animated, if false, it will be static. | |
| Returns: | |
| A filepath to the output video in mp4 format. | |
| """ | |
| import matplotlib.pyplot as plt | |
| from matplotlib.animation import FuncAnimation | |
| if isinstance(audio, str): | |
| audio_file = audio | |
| audio = processing_utils.audio_from_file(audio) | |
| else: | |
| tmp_wav = tempfile.NamedTemporaryFile(suffix=".wav", delete=False) | |
| processing_utils.audio_to_file(audio[0], audio[1], tmp_wav.name, format="wav") | |
| audio_file = tmp_wav.name | |
| if not os.path.isfile(audio_file): | |
| raise ValueError("Audio file not found.") | |
| ffmpeg = shutil.which("ffmpeg") | |
| if not ffmpeg: | |
| raise RuntimeError("ffmpeg not found.") | |
| duration = round(len(audio[1]) / audio[0], 4) | |
| # Helper methods to create waveform | |
| def hex_to_rgb(hex_str): | |
| return [int(hex_str[i : i + 2], 16) for i in range(1, 6, 2)] | |
| def get_color_gradient(c1, c2, n): | |
| if n < 1: | |
| raise ValueError("Must have at least one stop in gradient") | |
| c1_rgb = np.array(hex_to_rgb(c1)) / 255 | |
| c2_rgb = np.array(hex_to_rgb(c2)) / 255 | |
| mix_pcts = [x / (n - 1) for x in range(n)] | |
| rgb_colors = [((1 - mix) * c1_rgb + (mix * c2_rgb)) for mix in mix_pcts] | |
| return [ | |
| "#" + "".join(f"{int(round(val * 255)):02x}" for val in item) | |
| for item in rgb_colors | |
| ] | |
| # Reshape audio to have a fixed number of bars | |
| samples = audio[1] | |
| if len(samples.shape) > 1: | |
| samples = np.mean(samples, 1) | |
| bins_to_pad = bar_count - (len(samples) % bar_count) | |
| samples = np.pad(samples, [(0, bins_to_pad)]) | |
| samples = np.reshape(samples, (bar_count, -1)) | |
| samples = np.abs(samples) | |
| samples = np.max(samples, 1) | |
| with utils.MatplotlibBackendMananger(): | |
| plt.clf() | |
| # Plot waveform | |
| color = ( | |
| bars_color | |
| if isinstance(bars_color, str) | |
| else get_color_gradient(bars_color[0], bars_color[1], bar_count) | |
| ) | |
| if animate: | |
| fig = plt.figure(figsize=(5, 1), dpi=200, frameon=False) | |
| fig.subplots_adjust(left=0, bottom=0, right=1, top=1) | |
| plt.axis("off") | |
| plt.margins(x=0) | |
| bar_alpha = fg_alpha if animate else 1.0 | |
| barcollection = plt.bar( | |
| np.arange(0, bar_count), | |
| samples * 2, | |
| bottom=(-1 * samples), | |
| width=bar_width, | |
| color=color, | |
| alpha=bar_alpha, | |
| ) | |
| tmp_img = tempfile.NamedTemporaryFile(suffix=".png", delete=False) | |
| savefig_kwargs: dict[str, Any] = {"bbox_inches": "tight"} | |
| if bg_image is not None: | |
| savefig_kwargs["transparent"] = True | |
| if animate: | |
| savefig_kwargs["facecolor"] = "none" | |
| else: | |
| savefig_kwargs["facecolor"] = bg_color | |
| plt.savefig(tmp_img.name, **savefig_kwargs) | |
| if not animate: | |
| waveform_img = PIL.Image.open(tmp_img.name) | |
| waveform_img = waveform_img.resize((1000, 400)) | |
| # Composite waveform with background image | |
| if bg_image is not None: | |
| waveform_array = np.array(waveform_img) | |
| waveform_array[:, :, 3] = waveform_array[:, :, 3] * fg_alpha | |
| waveform_img = PIL.Image.fromarray(waveform_array) | |
| bg_img = PIL.Image.open(bg_image) | |
| waveform_width, waveform_height = waveform_img.size | |
| bg_width, bg_height = bg_img.size | |
| if waveform_width != bg_width: | |
| bg_img = bg_img.resize( | |
| ( | |
| waveform_width, | |
| 2 * int(bg_height * waveform_width / bg_width / 2), | |
| ) | |
| ) | |
| bg_width, bg_height = bg_img.size | |
| composite_height = max(bg_height, waveform_height) | |
| composite = PIL.Image.new( | |
| "RGBA", (waveform_width, composite_height), "#FFFFFF" | |
| ) | |
| composite.paste(bg_img, (0, composite_height - bg_height)) | |
| composite.paste( | |
| waveform_img, (0, composite_height - waveform_height), waveform_img | |
| ) | |
| composite.save(tmp_img.name) | |
| img_width, img_height = composite.size | |
| else: | |
| img_width, img_height = waveform_img.size | |
| waveform_img.save(tmp_img.name) | |
| else: | |
| def _animate(_): | |
| for idx, b in enumerate(barcollection): | |
| rand_height = np.random.uniform(0.8, 1.2) | |
| b.set_height(samples[idx] * rand_height * 2) | |
| b.set_y((-rand_height * samples)[idx]) | |
| frames = int(duration * 10) | |
| anim = FuncAnimation( | |
| fig, # type: ignore | |
| _animate, # type: ignore | |
| repeat=False, | |
| blit=False, | |
| frames=frames, | |
| interval=100, | |
| ) | |
| anim.save( | |
| tmp_img.name, | |
| writer="pillow", | |
| fps=10, | |
| codec="png", | |
| savefig_kwargs=savefig_kwargs, | |
| ) | |
| # Convert waveform to video with ffmpeg | |
| output_mp4 = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) | |
| if animate and bg_image is not None: | |
| ffmpeg_cmd = [ | |
| ffmpeg, | |
| "-loop", | |
| "1", | |
| "-i", | |
| bg_image, | |
| "-i", | |
| tmp_img.name, | |
| "-i", | |
| audio_file, | |
| "-filter_complex", | |
| "[0:v]scale=w=trunc(iw/2)*2:h=trunc(ih/2)*2[bg];[1:v]format=rgba,colorchannelmixer=aa=1.0[ov];[bg][ov]overlay=(main_w-overlay_w*0.9)/2:main_h-overlay_h*0.9/2[output]", | |
| "-t", | |
| str(duration), | |
| "-map", | |
| "[output]", | |
| "-map", | |
| "2:a", | |
| "-c:v", | |
| "libx264", | |
| "-c:a", | |
| "aac", | |
| "-shortest", | |
| "-y", | |
| output_mp4.name, | |
| ] | |
| elif animate and bg_image is None: | |
| ffmpeg_cmd = [ | |
| ffmpeg, | |
| "-i", | |
| tmp_img.name, | |
| "-i", | |
| audio_file, | |
| "-filter_complex", | |
| "[0:v][1:a]concat=n=1:v=1:a=1[v];[v]scale=1000:400,format=yuv420p[v_scaled]", | |
| "-map", | |
| "[v_scaled]", | |
| "-map", | |
| "1:a", | |
| "-c:v", | |
| "libx264", | |
| "-c:a", | |
| "aac", | |
| "-shortest", | |
| "-y", | |
| output_mp4.name, | |
| ] | |
| else: | |
| ffmpeg_cmd = [ | |
| ffmpeg, | |
| "-loop", | |
| "1", | |
| "-i", | |
| tmp_img.name, | |
| "-i", | |
| audio_file, | |
| "-vf", | |
| f"color=c=#FFFFFF77:s={img_width}x{img_height}[bar];[0][bar]overlay=-w+(w/{duration})*t:H-h:shortest=1", # type: ignore | |
| "-t", | |
| str(duration), | |
| "-y", | |
| output_mp4.name, | |
| ] | |
| subprocess.check_call(ffmpeg_cmd) | |
| return output_mp4.name | |
| def text2audio(text, negative_prompt, duration, guidance_scale, random_seed, n_candidates): | |
| if text is None: | |
| raise gr.Error("Please provide a text input.") | |
| waveforms = pipe( | |
| text, | |
| audio_length_in_s=duration, | |
| guidance_scale=guidance_scale, | |
| num_inference_steps=200, | |
| negative_prompt=negative_prompt, | |
| num_waveforms_per_prompt=n_candidates if n_candidates else 1, | |
| generator=generator.manual_seed(int(random_seed)), | |
| )["audios"] | |
| return make_waveform((16000, waveforms[0]), bg_image="bg.png") | |
| # return gr.Audio(sources=["microphone"], type="filepath") | |
| iface = gr.Blocks() | |
| with iface: | |
| gr.HTML( | |
| """ | |
| <div style="text-align: center; max-width: 700px; margin: 0 auto;"> | |
| <div | |
| style=" | |
| display: inline-flex; align-items: center; gap: 0.8rem; font-size: 1.75rem; | |
| " | |
| > | |
| <h1 style="font-weight: 900; margin-bottom: 7px; line-height: normal;"> | |
| AudioLDM 2: A General Framework for Audio, Music, and Speech Generation | |
| </h1> | |
| </div> <p style="margin-bottom: 10px; font-size: 94%"> | |
| <a href="https://arxiv.org/abs/2308.05734">[Paper]</a> <a href="https://audioldm.github.io/audioldm2">[Project | |
| page]</a> <a href="https://huggingface.co/docs/diffusers/main/en/api/pipelines/audioldm2">[🧨 | |
| Diffusers]</a> | |
| </p> | |
| </div> | |
| """ | |
| ) | |
| gr.HTML("""This is the demo for AudioLDM 2, powered by 🧨 Diffusers. Demo uses the checkpoint <a | |
| href="https://huggingface.co/cvssp/audioldm2"> AudioLDM 2 base</a>. For faster inference without waiting in | |
| queue, you may duplicate the space and upgrade to a GPU in the settings.""") | |
| gr.DuplicateButton() | |
| with gr.Group(): | |
| textbox = gr.Textbox( | |
| value="The vibrant beat of Brazilian samba drums.", | |
| max_lines=1, | |
| label="Input text", | |
| info="Your text is important for the audio quality. Please ensure it is descriptive by using more adjectives.", | |
| elem_id="prompt-in", | |
| ) | |
| negative_textbox = gr.Textbox( | |
| value="Low quality.", | |
| max_lines=1, | |
| label="Negative prompt", | |
| info="Enter a negative prompt not to guide the audio generation. Selecting appropriate negative prompts can improve the audio quality significantly.", | |
| elem_id="prompt-in", | |
| ) | |
| with gr.Accordion("Click to modify detailed configurations", open=False): | |
| seed = gr.Number( | |
| value=45, | |
| label="Seed", | |
| info="Change this value (any integer number) will lead to a different generation result.", | |
| ) | |
| duration = gr.Slider(5, 15, value=10, step=2.5, label="Duration (seconds)") | |
| guidance_scale = gr.Slider( | |
| 0, | |
| 7, | |
| value=3.5, | |
| step=0.5, | |
| label="Guidance scale", | |
| info="Larger => better quality and relevancy to text; Smaller => better diversity", | |
| ) | |
| n_candidates = gr.Slider( | |
| 1, | |
| 5, | |
| value=3, | |
| step=1, | |
| label="Number waveforms to generate", | |
| info="Automatic quality control. This number control the number of candidates (e.g., generate three audios and choose the best to show you). A larger value usually lead to better quality with heavier computation", | |
| ) | |
| outputs = gr.Video(label="Output", elem_id="output-video") | |
| btn = gr.Button("Submit") | |
| btn.click( | |
| text2audio, | |
| inputs=[textbox, negative_textbox, duration, guidance_scale, seed, n_candidates], | |
| # inputs=[textbox, negative_textbox, 10, guidance_scale, seed, n_candidates], | |
| outputs=[outputs], | |
| ) | |
| gr.HTML( | |
| """ | |
| <div class="footer" style="text-align: center"> | |
| <p>Share your generations with the community by clicking the share icon at the top right the generated audio!</p> | |
| <p>Follow the latest update of AudioLDM 2 on our<a href="https://audioldm.github.io/audioldm2" | |
| style="text-decoration: underline;" target="_blank"> Github repo</a> </p> | |
| <p>Model by <a | |
| href="https://twitter.com/LiuHaohe" style="text-decoration: underline;" target="_blank">Haohe | |
| Liu</a>. Code and demo by 🤗 Hugging Face.</p> | |
| </div> | |
| """ | |
| ) | |
| gr.Examples( | |
| [ | |
| ["A hammer is hitting a wooden surface.", "Low quality.", 10, 3.5, 45, 3], | |
| ["A cat is meowing for attention.", "Low quality.", 10, 3.5, 45, 3], | |
| ["An excited crowd cheering at a sports game.", "Low quality.", 10, 3.5, 45, 3], | |
| ["Birds singing sweetly in a blooming garden.", "Low quality.", 10, 3.5, 45, 3], | |
| ["A modern synthesizer creating futuristic soundscapes.", "Low quality.", 10, 3.5, 45, 3], | |
| ["The vibrant beat of Brazilian samba drums.", "Low quality.", 10, 3.5, 45, 3], | |
| ], | |
| fn=text2audio, | |
| inputs=[textbox, negative_textbox, duration, guidance_scale, seed, n_candidates], | |
| outputs=[outputs], | |
| cache_examples=True, | |
| ) | |
| gr.HTML( | |
| """ | |
| <div class="acknowledgements"> <p>Essential Tricks for Enhancing the Quality of Your Generated | |
| Audio</p> | |
| <p>1. Try using more adjectives to describe your sound. For example: "A man is speaking | |
| clearly and slowly in a large room" is better than "A man is speaking".</p> | |
| <p>2. Try using different random seeds, which can significantly affect the quality of the generated | |
| output.</p> | |
| <p>3. It's better to use general terms like 'man' or 'woman' instead of specific names for individuals or | |
| abstract objects that humans may not be familiar with.</p> | |
| <p>4. Using a negative prompt to not guide the diffusion process can improve the | |
| audio quality significantly. Try using negative prompts like 'low quality'.</p> | |
| </div> | |
| """ | |
| ) | |
| with gr.Accordion("Additional information", open=False): | |
| gr.HTML( | |
| """ | |
| <div class="acknowledgments"> | |
| <p> We build the model with data from <a href="http://research.google.com/audioset/">AudioSet</a>, | |
| <a href="https://freesound.org/">Freesound</a> and <a | |
| href="https://sound-effects.bbcrewind.co.uk/">BBC Sound Effect library</a>. We share this demo | |
| based on the <a | |
| href="https://assets.publishing.service.gov.uk/government/uploads/system/uploads/attachment_data/file/375954/Research.pdf">UK | |
| copyright exception</a> of data for academic research. | |
| </p> | |
| </div> | |
| """ | |
| ) | |
| iface.queue(max_size=20).launch() | |