|
|
import streamlit as st |
|
|
import torch |
|
|
import sys |
|
|
import os |
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
from peft import PeftModel |
|
|
|
|
|
|
|
|
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__)) |
|
|
sys.path.append(os.path.join(PROJECT_ROOT, 'src')) |
|
|
|
|
|
from lyricloop.config import MODEL_ID, RANDOM_STATE |
|
|
from lyricloop.data import build_inference_prompt, format_lyrics |
|
|
from lyricloop.metrics import execute_generation |
|
|
from lyricloop.environment import set_seed |
|
|
|
|
|
|
|
|
st.set_page_config(page_title="LyricLoop v2.0", page_icon="🎤", layout="wide") |
|
|
|
|
|
|
|
|
@st.cache_resource |
|
|
def load_studio_engine(): |
|
|
"""Initializes the Gemma-2b engine for Hugging Face Spaces (CPU).""" |
|
|
set_seed(RANDOM_STATE) |
|
|
|
|
|
|
|
|
hf_token = st.secrets["HF_TOKEN"] |
|
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, token=hf_token) |
|
|
tokenizer.pad_token = tokenizer.eos_token |
|
|
|
|
|
|
|
|
device = "cpu" |
|
|
dtype = torch.float32 |
|
|
|
|
|
|
|
|
base_model = AutoModelForCausalLM.from_pretrained( |
|
|
MODEL_ID, |
|
|
dtype=dtype, |
|
|
device_map=device, |
|
|
token=hf_token |
|
|
) |
|
|
|
|
|
|
|
|
adapter_repo = "lxtung95/lyricloop" |
|
|
|
|
|
model = PeftModel.from_pretrained( |
|
|
base_model, |
|
|
adapter_repo, |
|
|
token=hf_token |
|
|
) |
|
|
|
|
|
return model, tokenizer |
|
|
|
|
|
|
|
|
st.title("LyricLoop v2.0") |
|
|
st.caption("Professional AI Songwriting Framework | Powered by Gemma-2b") |
|
|
st.markdown("---") |
|
|
|
|
|
|
|
|
st.sidebar.header("Studio Controls") |
|
|
creativity = st.sidebar.slider("Creativity (Temperature)", 0.5, 1.2, 0.85) |
|
|
token_limit = st.sidebar.number_input("Max Tokens", 100, 500, 300) |
|
|
|
|
|
|
|
|
col1, col2 = st.columns([1, 1]) |
|
|
|
|
|
with col1: |
|
|
st.subheader("Composition Details") |
|
|
genre = st.selectbox("Target Genre", ["Pop", "Rock", "Hip-hop", "Electronic", "R&B", "Country"]) |
|
|
artist = st.text_input("Artist Aesthetic", placeholder="e.g., Taylor Swift") |
|
|
title = st.text_input("Song Title", placeholder="Enter your track title...") |
|
|
|
|
|
generate_btn = st.button("Compose Lyrics", type="primary", use_container_width=True) |
|
|
|
|
|
with col2: |
|
|
st.subheader("Output") |
|
|
|
|
|
|
|
|
output_placeholder = st.empty() |
|
|
|
|
|
if generate_btn: |
|
|
with st.spinner("Model is writing..."): |
|
|
|
|
|
model, tokenizer = load_studio_engine() |
|
|
|
|
|
|
|
|
prompt = build_inference_prompt(genre, artist, title) |
|
|
|
|
|
|
|
|
raw_output = execute_generation( |
|
|
model, tokenizer, prompt, |
|
|
max_tokens=token_limit, |
|
|
temperature=creativity, |
|
|
do_sample=True |
|
|
) |
|
|
|
|
|
|
|
|
clean_lyrics = format_lyrics(raw_output) |
|
|
|
|
|
|
|
|
output_placeholder.text_area( |
|
|
"Final Draft", |
|
|
clean_lyrics, |
|
|
height=400, |
|
|
key="lyrics_output" |
|
|
) |
|
|
|
|
|
st.download_button( |
|
|
"Export as TXT", |
|
|
clean_lyrics, |
|
|
file_name=f"{title}_lyrics.txt" |
|
|
) |