Spaces:
Runtime error
Runtime error
| from transformers import MusicgenForConditionalGeneration, MusicgenProcessor | |
| import torchaudio | |
| import torch | |
| import os | |
| processor = MusicgenProcessor.from_pretrained("facebook/musicgen-small") | |
| model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") | |
| def generate_music(prompt, filename="instrumental.wav"): | |
| inputs = processor( | |
| text=[prompt], | |
| padding=True, | |
| return_tensors="pt", | |
| ) | |
| audio_values = model.generate(**inputs, max_new_tokens=256) | |
| sampling_rate = model.config.audio_encoder.sampling_rate | |
| torchaudio.save(filename, audio_values[0].cpu(), sampling_rate) | |
| return filename | |