from transformers import AutoProcessor, MusicgenForConditionalGeneration # Cargar el procesador y el modelo preentrenado processor = AutoProcessor.from_pretrained("facebook/musicgen-large") model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-large") def generate_music(prompt, guidance_scale=3, max_new_tokens=256): inputs = processor( text=[prompt], padding=True, return_tensors="pt", ) audio_values = model.generate( **inputs, do_sample=True, guidance_scale=guidance_scale, max_new_tokens=max_new_tokens ) return audio_values