Sound-AI-SFX / app.py
Imosu's picture
Update app.py
5bd40e7 verified
import spaces
import gradio as gr
import json
import torch
import wavio
from tqdm import tqdm
from huggingface_hub import snapshot_download
from pydub import AudioSegment
from gradio import Markdown
import uuid
import torch
from diffusers import DiffusionPipeline,AudioPipelineOutput
from transformers import CLIPTextModel, T5EncoderModel, AutoModel, T5Tokenizer, T5TokenizerFast
from typing import Union
from diffusers.utils.torch_utils import randn_tensor
from tqdm import tqdm
from TangoFlux import TangoFluxInference
import torchaudio
# Define the description text
description_text = """
# TangoFlux Text-to-Audio Generation
Generate high-quality audio from text descriptions using TangoFlux.
## Instructions:
1. Enter your text description in the prompt box
2. Adjust the generation parameters if desired
3. Click submit to generate audio
## Parameters:
- Steps: Higher values give better quality but take longer
- Guidance Scale: Controls how closely the generation follows the prompt
- Duration: Length of the generated audio in seconds
"""
tangoflux = TangoFluxInference(name="declare-lab/TangoFlux")
@spaces.GPU(duration=15)
def gradio_generate(prompt, steps, guidance, duration):
# Ensure duration has a default value if None
if duration is None:
duration = 10
output = tangoflux.generate(prompt, steps=steps, guidance_scale=guidance, duration=duration)
filename = 'temp.wav'
output = output[:,:int(duration*44100)]
torchaudio.save(filename, output, 44100)
return filename
# Create custom interface with HTML badges
with gr.Blocks(theme="soft") as gr_interface:
# Add HTML badges at the top
gr.HTML(
"""
<div class='container' style='display:flex; justify-content:center; gap:12px;'>
<a href="https://huggingface.co/spaces/openfree/Best-AI" target="_blank">
<img src="https://img.shields.io/static/v1?label=OpenFree&message=BEST%20AI%20Services&color=%230000ff&labelColor=%23000080&logo=huggingface&logoColor=%23ffa500&style=for-the-badge" alt="OpenFree badge">
</a>
<a href="https://discord.gg/openfreeai" target="_blank">
<img src="https://img.shields.io/static/v1?label=Discord&message=Openfree%20AI&color=%230000ff&labelColor=%23800080&logo=discord&logoColor=white&style=for-the-badge" alt="Discord badge">
</a>
</div>
"""
)
# Title and description
gr.Markdown("# TangoFlux: Super Fast and Faithful Text to Audio Generation with Flow Matching and Clap-Ranked Preference Optimization")
gr.Markdown(description_text)
# Input components
with gr.Row():
with gr.Column():
input_text = gr.Textbox(lines=2, label="Prompt")
with gr.Row():
denoising_steps = gr.Slider(minimum=10, maximum=100, value=25, step=5, label="Steps", interactive=True)
guidance_scale = gr.Slider(minimum=1, maximum=10, value=4.5, step=0.5, label="Guidance Scale", interactive=True)
duration_scale = gr.Slider(minimum=1, maximum=30, value=10, step=1, label="Duration", interactive=True)
submit_btn = gr.Button("Generate Audio", variant="primary")
with gr.Column():
output_audio = gr.Audio(label="Generated Audio", type="filepath")
# Examples
gr.Examples(
examples=[
# [prompt, steps, guidance, duration]
["Quiet whispered conversation gradually fading into distant jet engine roar diminishing into silence", 25, 4.5, 10],
["Clear sound of bicycle tires crunching on loose gravel and dirt, followed by deep male laughter echoing", 25, 4.5, 10]
],
inputs=[input_text, denoising_steps, guidance_scale, duration_scale],
outputs=output_audio,
fn=gradio_generate,
cache_examples="lazy",
)
# Connect the button click to the generation function
submit_btn.click(
fn=gradio_generate,
inputs=[input_text, denoising_steps, guidance_scale, duration_scale],
outputs=output_audio
)
# Launch the interface
gr_interface.queue(15).launch(mcp_server=True)