""" Suno Prompting App Converts natural language song ideas into structured Suno AI prompts. Supports OpenRouter and ElectronHub APIs with auto-detection. """ import json import os import random import re import tempfile import gradio as gr import requests from dotenv import load_dotenv from openai import OpenAI load_dotenv(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".env")) from knowledge_base import build_system_prompt # ───────────────────────────────────────────── # CONFIG # ───────────────────────────────────────────── MODELS = { "Google Gemini 3 Pro": "google/gemini-3-pro-preview", "Google Gemini 3 Flash": "google/gemini-3-flash-preview", "Anthropic Claude Sonnet 4.6": "anthropic/claude-sonnet-4.6", "OpenAI GPT-5.2": "openai/gpt-5.2", "xAI Grok 4": "x-ai/grok-4", "Custom": "custom", } FREE_MODELS = [ "glm-4.5-air", "qwen3-coder-480b-a35b-instruct:free", "llama-4-maverick-17b-128e-instruct", "claude-3-haiku-20240307", "kimi-k2.5:free", "gemini-2.5-flash", ] # ElectronHub uses different model IDs for some models. # Most just drop the provider prefix, but these two need explicit overrides. ELECTRONHUB_MODEL_OVERRIDES = { "anthropic/claude-sonnet-4.6": "claude-sonnet-4-6", "x-ai/grok-4": "grok-4-0709", } THEME = gr.themes.Base( primary_hue=gr.themes.colors.orange, secondary_hue=gr.themes.colors.neutral, neutral_hue=gr.themes.colors.gray, font=gr.themes.GoogleFont("Inter"), ).set( body_background_fill="#1a1a1a", body_background_fill_dark="#1a1a1a", body_text_color="#e0e0e0", body_text_color_dark="#e0e0e0", block_background_fill="#2a2a2a", block_background_fill_dark="#2a2a2a", block_border_color="#444", block_border_color_dark="#444", block_label_text_color="#ccc", block_label_text_color_dark="#ccc", block_title_text_color="#fff", block_title_text_color_dark="#fff", input_background_fill="#333", input_background_fill_dark="#333", input_border_color="#555", input_border_color_dark="#555", button_primary_background_fill="#e67e22", button_primary_background_fill_dark="#e67e22", button_primary_background_fill_hover="#d35400", button_primary_background_fill_hover_dark="#d35400", button_primary_text_color="#fff", button_primary_text_color_dark="#fff", ) # ───────────────────────────────────────────── # CORE LOGIC # ───────────────────────────────────────────── def _get_client(api_key: str = "", free_mode: bool = False): """Create OpenAI-compatible client. Auto-detects provider from key prefix. Free mode uses the hardcoded ElectronHub key. Otherwise: ek-* -> ElectronHub, everything else -> OpenRouter. """ if free_mode: key = os.getenv("ELECTRONHUB_API_KEY", "") if not key: raise ValueError( "Free Mode is unavailable. Enter your own API key and uncheck Free Mode." ) base_url = "https://api.electronhub.ai/v1" else: key = api_key.strip() if not key: raise ValueError("No API key provided. Enter your OpenRouter or ElectronHub API key above.") # Auto-detect provider from key prefix if key.startswith("ek-"): base_url = "https://api.electronhub.ai/v1" else: base_url = "https://openrouter.ai/api/v1" return OpenAI(base_url=base_url, api_key=key, timeout=90.0) def _resolve_model_id(model_id: str) -> str: """Translate OpenRouter model ID to ElectronHub format. Most models just drop the provider prefix (google/, anthropic/, etc.). A few need explicit overrides (different naming conventions). """ if model_id in ELECTRONHUB_MODEL_OVERRIDES: return ELECTRONHUB_MODEL_OVERRIDES[model_id] if "/" in model_id: return model_id.split("/", 1)[1] return model_id def _fix_json_newlines(text: str) -> str: """Replace literal newlines inside JSON string values with \\n. Free models often put real line breaks in string fields (especially lyrics) instead of \\n escape sequences, which makes json.loads() fail. """ result = [] in_string = False escape_next = False for char in text: if escape_next: result.append(char) escape_next = False continue if char == '\\': result.append(char) escape_next = True continue if char == '"': in_string = not in_string result.append(char) continue if char == '\n' and in_string: result.append('\\n') continue result.append(char) return ''.join(result) def _extract_json(raw: str) -> dict: """Extract JSON object from model response, even if wrapped in extra text. Handles: clean JSON, markdown code fences, preamble/trailing text, and literal newlines inside string values (common with free models). """ # Strip markdown code fences cleaned = re.sub(r'^```(?:json)?\s*\n?', '', raw.strip(), flags=re.MULTILINE) cleaned = re.sub(r'\n?```\s*$', '', cleaned.strip(), flags=re.MULTILINE) # Try parsing cleaned text directly try: return json.loads(cleaned) except json.JSONDecodeError: pass # Fix literal newlines inside string values, then try again try: return json.loads(_fix_json_newlines(cleaned)) except json.JSONDecodeError: pass # Find the first { ... } block (handles preamble/trailing text) match = re.search(r'\{[\s\S]*\}', cleaned) if match: block = match.group() try: return json.loads(block) except json.JSONDecodeError: pass try: return json.loads(_fix_json_newlines(block)) except json.JSONDecodeError: pass raise json.JSONDecodeError("No valid JSON found in response", raw, 0) def _generate_cover_image(prompt: str): """Generate cover art via ElectronHub SDXL. Returns file path or None on failure.""" if not prompt: return None eh_key = os.getenv("ELECTRONHUB_API_KEY", "") if not eh_key: return None # Silently skip — image gen is a bonus, not critical try: resp = requests.post( "https://api.electronhub.ai/v1/images/generations", headers={ "Authorization": f"Bearer {eh_key}", "Content-Type": "application/json", }, json={ "model": "sdxl", "prompt": prompt, "n": 1, "size": "1024x1024", }, timeout=60, ) resp.raise_for_status() data = resp.json() image_url = data["data"][0]["url"] # Download image to temp file for Gradio img_resp = requests.get(image_url, timeout=30) img_resp.raise_for_status() tmp = tempfile.NamedTemporaryFile(suffix=".png", delete=False) tmp.write(img_resp.content) tmp.close() return tmp.name except Exception: return None # Image gen failure should never block the main output def generate_prompt( api_key: str, song_idea: str, model_choice: str, custom_model: str, weirdness: int, free_mode: bool, ): """Call LLM API, parse structured response, and generate cover art image.""" # 6 outputs: title, style, lyrics, settings, cover_art_image, cover_art_text if not song_idea.strip(): return "", "Please enter a song idea.", "", "", None, "" # Resolve model ID if free_mode: model_id = None # Set during cascade elif model_choice == "Custom": model_id = custom_model.strip() if not model_id: return "", "Please enter a custom model ID.", "", "", None, "" else: model_id = MODELS.get(model_choice, "google/gemini-3-flash-preview") system_prompt = build_system_prompt(weirdness) try: client = _get_client(api_key, free_mode=free_mode) except ValueError as e: return "", str(e), "", "", None, "" # Translate model ID for ElectronHub (different naming convention) if not free_mode and model_id: key = api_key.strip() if key.startswith("ek-"): model_id = _resolve_model_id(model_id) # --- LLM call --- raw = None if free_mode: # Shuffle and cascade through free models until one works models_to_try = FREE_MODELS[:] random.shuffle(models_to_try) last_error = None for model_id in models_to_try: try: response = client.chat.completions.create( model=model_id, messages=[ {"role": "system", "content": system_prompt}, {"role": "user", "content": song_idea}, ], temperature=0.9, max_tokens=4096, timeout=90.0, ) raw = response.choices[0].message.content.strip() print(f"Free Mode: {model_id} succeeded") break except Exception as e: last_error = e print(f"Free Mode: {model_id} failed ({e}), trying next...") continue if raw is None: return "", f"All free models failed. Last error: {last_error}", "", "", None, "" else: try: response = client.chat.completions.create( model=model_id, messages=[ {"role": "system", "content": system_prompt}, {"role": "user", "content": song_idea}, ], temperature=0.9, max_tokens=4096, timeout=90.0, ) raw = response.choices[0].message.content.strip() except Exception as e: return "", f"Error: {e}", "", "", None, "" # --- Parse JSON response --- try: data = _extract_json(raw) song_title = data.get("song_title", "Untitled") style_prompt = data.get("style_prompt", "") lyrics = data.get("lyrics", "") w = data.get("weirdness", "N/A") w_reason = data.get("weirdness_reasoning", "") si = data.get("style_influence", "N/A") si_reason = data.get("style_influence_reasoning", "") settings = f"Weirdness: {w}/100\n{w_reason}\n\nStyle Influence: {si}/100\n{si_reason}" cover_art_text = data.get("cover_art_prompt", "") # Generate cover art image (failure is silent, never blocks text outputs) cover_art_image = _generate_cover_image(cover_art_text) return song_title, style_prompt, lyrics, settings, cover_art_image, cover_art_text except json.JSONDecodeError: return ( "", f"[JSON parse error - raw response below]\n\n{raw}", "", "", None, "", ) except Exception as e: return "", f"Error: {e}", "", "", None, "" def toggle_custom_visibility(choice): """Show/hide custom model text field.""" return gr.update(visible=(choice == "Custom")) def toggle_free_mode(free_mode: bool): """When Free Mode is checked, gray out API key and model dropdown.""" return ( gr.update(interactive=not free_mode), # api_key_input gr.update(interactive=not free_mode), # model_dropdown ) # ───────────────────────────────────────────── # APP BUILDER # ───────────────────────────────────────────── def create_app(): """Build and return the Gradio Blocks app and theme.""" with gr.Blocks(title="Suno Prompt Generator") as demo: gr.HTML( '
by AnimalMonk | ' 'Join us on Discord
' '