Spaces:
Running on Zero
Running on Zero
File size: 4,189 Bytes
348f0d7 ffead1e d82e679 adac4ab ffead1e d82e679 df31906 d82e679 2a2a34a b2b0b5e df31906 d82e679 df31906 d82e679 cbc1ec9 b2b0b5e d82e679 0794579 7abbc1b 5bd40e7 7abbc1b 31cd11e b2b0b5e 5bd40e7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 | import spaces
import gradio as gr
import json
import torch
import wavio
from tqdm import tqdm
from huggingface_hub import snapshot_download
from pydub import AudioSegment
from gradio import Markdown
import uuid
import torch
from diffusers import DiffusionPipeline,AudioPipelineOutput
from transformers import CLIPTextModel, T5EncoderModel, AutoModel, T5Tokenizer, T5TokenizerFast
from typing import Union
from diffusers.utils.torch_utils import randn_tensor
from tqdm import tqdm
from TangoFlux import TangoFluxInference
import torchaudio
# Define the description text
description_text = """
# TangoFlux Text-to-Audio Generation
Generate high-quality audio from text descriptions using TangoFlux.
## Instructions:
1. Enter your text description in the prompt box
2. Adjust the generation parameters if desired
3. Click submit to generate audio
## Parameters:
- Steps: Higher values give better quality but take longer
- Guidance Scale: Controls how closely the generation follows the prompt
- Duration: Length of the generated audio in seconds
"""
tangoflux = TangoFluxInference(name="declare-lab/TangoFlux")
@spaces.GPU(duration=15)
def gradio_generate(prompt, steps, guidance, duration):
# Ensure duration has a default value if None
if duration is None:
duration = 10
output = tangoflux.generate(prompt, steps=steps, guidance_scale=guidance, duration=duration)
filename = 'temp.wav'
output = output[:,:int(duration*44100)]
torchaudio.save(filename, output, 44100)
return filename
# Create custom interface with HTML badges
with gr.Blocks(theme="soft") as gr_interface:
# Add HTML badges at the top
gr.HTML(
"""
<div class='container' style='display:flex; justify-content:center; gap:12px;'>
<a href="https://huggingface.co/spaces/openfree/Best-AI" target="_blank">
<img src="https://img.shields.io/static/v1?label=OpenFree&message=BEST%20AI%20Services&color=%230000ff&labelColor=%23000080&logo=huggingface&logoColor=%23ffa500&style=for-the-badge" alt="OpenFree badge">
</a>
<a href="https://discord.gg/openfreeai" target="_blank">
<img src="https://img.shields.io/static/v1?label=Discord&message=Openfree%20AI&color=%230000ff&labelColor=%23800080&logo=discord&logoColor=white&style=for-the-badge" alt="Discord badge">
</a>
</div>
"""
)
# Title and description
gr.Markdown("# TangoFlux: Super Fast and Faithful Text to Audio Generation with Flow Matching and Clap-Ranked Preference Optimization")
gr.Markdown(description_text)
# Input components
with gr.Row():
with gr.Column():
input_text = gr.Textbox(lines=2, label="Prompt")
with gr.Row():
denoising_steps = gr.Slider(minimum=10, maximum=100, value=25, step=5, label="Steps", interactive=True)
guidance_scale = gr.Slider(minimum=1, maximum=10, value=4.5, step=0.5, label="Guidance Scale", interactive=True)
duration_scale = gr.Slider(minimum=1, maximum=30, value=10, step=1, label="Duration", interactive=True)
submit_btn = gr.Button("Generate Audio", variant="primary")
with gr.Column():
output_audio = gr.Audio(label="Generated Audio", type="filepath")
# Examples
gr.Examples(
examples=[
# [prompt, steps, guidance, duration]
["Quiet whispered conversation gradually fading into distant jet engine roar diminishing into silence", 25, 4.5, 10],
["Clear sound of bicycle tires crunching on loose gravel and dirt, followed by deep male laughter echoing", 25, 4.5, 10]
],
inputs=[input_text, denoising_steps, guidance_scale, duration_scale],
outputs=output_audio,
fn=gradio_generate,
cache_examples="lazy",
)
# Connect the button click to the generation function
submit_btn.click(
fn=gradio_generate,
inputs=[input_text, denoising_steps, guidance_scale, duration_scale],
outputs=output_audio
)
# Launch the interface
gr_interface.queue(15).launch(mcp_server=True) |