import gradio as gr import torch from transformers import MusicgenForConditionalGeneration, AutoProcessor import numpy as np # Load the model model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") processor = AutoProcessor.from_pretrained("facebook/musicgen-small") def generate_music(prompt, duration): if not prompt: return None, "Please enter a description." inputs = processor(text=[prompt], padding=True, return_tensors="pt") max_tokens = int(duration * 50) audio_values = model.generate(**inputs, max_new_tokens=max_tokens) sampling_rate = model.config.audio_encoder.sampling_rate audio_data = audio_values[0, 0].cpu().numpy() # Legal Certificate generated with the track license_cert = f"✅ COMMERCIAL LICENSE ACTIVATED\nPrompt: {prompt}\nStatus: Royalty-Free for Commercial Use" return (sampling_rate, audio_data), license_cert # UI Layout with gr.Blocks(theme=gr.themes.Soft()) as demo: gr.HTML("

🎹 NEURAL MUSIC STUDIO

Generate & Own Your Tracks

") with gr.Row(): with gr.Column(): text_input = gr.Textbox(label="Music Style", placeholder="e.g., Chill lo-fi hip hop") duration_slider = gr.Slider(minimum=5, maximum=20, value=10, label="Length (Sec)") generate_btn = gr.Button("Compose Now", variant="primary") with gr.Column(): audio_out = gr.Audio(label="Studio Master Output", type="numpy") license_out = gr.Textbox(label="Legal Rights", interactive=False) generate_btn.click(fn=generate_music, inputs=[text_input, duration_slider], outputs=[audio_out, license_out]) demo.launch()