Update app.py
Browse files
app.py
CHANGED
|
@@ -5,74 +5,64 @@ import requests
|
|
| 5 |
from pydub import AudioSegment
|
| 6 |
import os
|
| 7 |
|
| 8 |
-
#
|
| 9 |
-
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
|
|
|
|
|
|
| 15 |
|
| 16 |
-
#
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
with open("audio.mp3", "wb") as f:
|
| 20 |
-
f.write(audio_data)
|
| 21 |
-
st.audio(audio_data, format="audio/mp3")
|
| 22 |
-
st.success("AI voice sample loaded successfully!")
|
| 23 |
-
except Exception as e:
|
| 24 |
-
st.error(f"Failed to load audio: {e}")
|
| 25 |
|
| 26 |
-
# Step
|
| 27 |
-
st.subheader("Step
|
| 28 |
-
lyrics = st.text_area("Enter
|
|
|
|
| 29 |
|
| 30 |
-
#
|
| 31 |
-
st.subheader("Step
|
| 32 |
-
|
| 33 |
-
|
|
|
|
| 34 |
|
| 35 |
-
|
| 36 |
-
|
|
|
|
|
|
|
| 37 |
model = MusicGen.get_pretrained("facebook/musicgen-small")
|
| 38 |
model.set_generation_params(duration=10)
|
| 39 |
output = model.generate([prompt])
|
| 40 |
-
output_path = "musicgen_output.wav"
|
| 41 |
torchaudio.save(output_path, output[0].cpu(), 32000)
|
| 42 |
st.audio(output_path, format="audio/wav")
|
| 43 |
-
st.success("Music
|
| 44 |
|
| 45 |
-
# Step 4:
|
| 46 |
-
st.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
|
|
|
| 53 |
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
beat = beat - 2 # Reduce the beat volume
|
| 61 |
-
|
| 62 |
-
# Combine the voice and instrumental
|
| 63 |
-
combined = beat.overlay(voice, loop=False)
|
| 64 |
-
combined_path = "combined_output.mp3"
|
| 65 |
-
combined.export(combined_path, format="mp3")
|
| 66 |
-
|
| 67 |
-
st.audio(combined_path, format="audio/mp3")
|
| 68 |
-
st.success("Combined output ready!")
|
| 69 |
-
else:
|
| 70 |
-
st.error("Voice sample file (audio.mp3) is missing. Please ensure it is downloaded properly.")
|
| 71 |
-
except Exception as e:
|
| 72 |
-
st.error(f"Error while combining: {e}")
|
| 73 |
-
else:
|
| 74 |
-
st.info("Generate the music first to combine it with the voice.")
|
| 75 |
-
|
| 76 |
-
# Step 5: Show Lyrics Input (Optional for View)
|
| 77 |
-
if lyrics:
|
| 78 |
-
st.write(f"**Lyrics Entered**:\n{lyrics}")
|
|
|
|
| 5 |
from pydub import AudioSegment
|
| 6 |
import os
|
| 7 |
|
| 8 |
+
# Function to generate voice with Resemble AI
|
| 9 |
+
def generate_resemble_audio(api_key, project_uuid, text, voice_uuid):
|
| 10 |
+
url = f"https://app.resemble.ai/api/v2/projects/{project_uuid}/clips"
|
| 11 |
+
headers = {"Authorization": f"Token {Gpn4u6ObHlmLWc5aQvs8cQtt}"}
|
| 12 |
+
data = {"voice_uuid": voice_uuid, "text": text, "is_active": True}
|
| 13 |
+
response = requests.post(url, headers=headers, json=data)
|
| 14 |
|
| 15 |
+
if response.status_code == 200:
|
| 16 |
+
clip = response.json()
|
| 17 |
+
return clip["audio_src"]
|
| 18 |
+
else:
|
| 19 |
+
raise Exception(f"Failed to generate audio: {response.text}")
|
| 20 |
|
| 21 |
+
# Streamlit UI
|
| 22 |
+
st.set_page_config(page_title="Suno-like Music App with Resemble AI", layout="centered")
|
| 23 |
+
st.title("Suno-like AI Music Generator with Resemble AI")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
|
| 25 |
+
# Step 1: Input lyrics and music prompt
|
| 26 |
+
st.subheader("Step 1: Input Lyrics and Music Prompt")
|
| 27 |
+
lyrics = st.text_area("Enter lyrics:", "Life is good, the sky is blue, moving fast in brand new shoes")
|
| 28 |
+
prompt = st.text_input("Enter music style:", "moody trap instrumental with 808 bass")
|
| 29 |
|
| 30 |
+
# Step 2: Resemble AI credentials
|
| 31 |
+
st.subheader("Step 2: Resemble AI Settings")
|
| 32 |
+
api_key = st.text_input("Gpn4u6ObHlmLWc5aQvs8cQtt", type="password")
|
| 33 |
+
project_id = st.text_input("Resemble Project UUID")
|
| 34 |
+
voice_id = st.text_input("Resemble Voice UUID")
|
| 35 |
|
| 36 |
+
# Step 3: Generate Instrumental
|
| 37 |
+
output_path = "musicgen_output.wav"
|
| 38 |
+
if st.button("Generate Instrumental"):
|
| 39 |
+
with st.spinner("Generating music with MusicGen..."):
|
| 40 |
model = MusicGen.get_pretrained("facebook/musicgen-small")
|
| 41 |
model.set_generation_params(duration=10)
|
| 42 |
output = model.generate([prompt])
|
|
|
|
| 43 |
torchaudio.save(output_path, output[0].cpu(), 32000)
|
| 44 |
st.audio(output_path, format="audio/wav")
|
| 45 |
+
st.success("Music generated successfully!")
|
| 46 |
|
| 47 |
+
# Step 4: Generate Voice with Resemble and Combine
|
| 48 |
+
if st.button("Generate Voice and Combine"):
|
| 49 |
+
try:
|
| 50 |
+
# Generate voice using Resemble
|
| 51 |
+
audio_url = generate_resemble_audio(api_key, project_id, lyrics, voice_id)
|
| 52 |
+
voice_file = "resemble_voice.mp3"
|
| 53 |
+
with open(voice_file, "wb") as f:
|
| 54 |
+
f.write(requests.get(audio_url).content)
|
| 55 |
|
| 56 |
+
# Combine voice with music
|
| 57 |
+
beat = AudioSegment.from_wav(output_path)
|
| 58 |
+
voice = AudioSegment.from_mp3(voice_file)
|
| 59 |
+
voice = voice - 4
|
| 60 |
+
beat = beat - 2
|
| 61 |
+
combined = beat.overlay(voice, loop=False)
|
| 62 |
|
| 63 |
+
combined_path = "final_mix.mp3"
|
| 64 |
+
combined.export(combined_path, format="mp3")
|
| 65 |
+
st.audio(combined_path, format="audio/mp3")
|
| 66 |
+
st.success("Final mix is ready!")
|
| 67 |
+
except Exception as e:
|
| 68 |
+
st.error(str(e))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|