Chronicle / modules /ui_layout.py
topguy's picture
fix: added some padding to textedit field so they look better in firefox.
cef8efc
import gradio as gr
from .config import FEATURE_SEQUENCE, SECTIONS, HF_TEXT_MODELS, HF_IMAGE_MODELS, GEMINI_API_KEY
from .core_logic import (
features_data, generate_prompt, handle_regeneration,
save_character, load_character, get_example_list, load_example_character
)
from .integrations import (
get_ollama_models, check_comfy_availability, refine_master, generate_image_master
)
from .name_generator import generate_fantasy_name
UI_CSS = """
.container span {
line-height: 1.6 !important;
margin-bottom: 8px !important;
display: inline-block !important;
}
.container textarea {
min-height: 42px !important;
line-height: 1.5 !important;
overflow-y: visible !important;
padding-top: 8px !important;
padding-bottom: 8px !important;
}
/* Firefox specific fixes */
@-moz-document url-prefix() {
.container textarea {
padding-top: 10px !important;
padding-bottom: 10px !important;
}
.container span {
padding-bottom: 2px !important;
}
}
"""
def build_ui():
with gr.Blocks(title="Chronicle Portrait Studio") as demo:
with gr.Row(variant="compact"):
gr.HTML('''
<div style="display: flex; justify-content: center; background: #121212; padding: 10px; border-radius: 10px;">
<svg width="600" height="120" viewBox="0 0 600 120" xmlns="http://www.w3.org/2000/svg">
<defs>
<linearGradient id="goldGrad" x1="0%" y1="0%" x2="0%" y2="100%">
<stop offset="0%" style="stop-color:#FFD700;stop-opacity:1" />
<stop offset="100%" style="stop-color:#B8860B;stop-opacity:1" />
</linearGradient>
<filter id="shadow" x="-20%" y="-20%" width="140%" height="140%">
<feGaussianBlur in="SourceAlpha" stdDeviation="2" />
<feOffset dx="2" dy="2" result="offsetblur" />
<feComponentTransfer>
<feFuncA type="linear" slope="0.5" />
</feComponentTransfer>
<feMerge>
<feMergeNode />
<feMergeNode in="SourceGraphic" />
</feMerge>
</filter>
</defs>
<rect width="600" height="120" fill="#121212" rx="10" />
<g transform="translate(60, 60)" filter="url(#shadow)">
<path d="M0 -35 L25 -25 L25 5 Q25 25 0 35 Q-25 25 -25 5 L-25 -25 Z" fill="url(#goldGrad)" opacity="0.9" />
<path d="M0 -30 L20 -22 L20 5 Q20 20 0 30 Q-20 20 -20 5 L-20 -22 Z" fill="#121212" />
<text x="0" y="10" font-family="serif" font-size="28" fill="url(#goldGrad)" text-anchor="middle" font-weight="bold">C</text>
</g>
<text x="340" y="62" font-family="'Georgia', serif" font-size="42" fill="url(#goldGrad)" style="filter:url(#shadow)" font-weight="bold" text-anchor="middle">CHRONICLE</text>
<text x="340" y="98" font-family="'Georgia', serif" font-size="20" fill="#bdc3c7" letter-spacing="2" opacity="0.8" text-anchor="middle">PORTRAIT STUDIO</text>
<rect x="5" y="5" width="590" height="110" fill="none" stroke="#B8860B" stroke-width="1" rx="8" stroke-dasharray="15,10" opacity="0.3" />
</svg>
</div>
''')
gr.Markdown("# ⚔️ Chronicle Portrait Studio")
gr.Markdown("Craft legendary AI-powered portraits for your RPG adventures.")
dropdowns = []
checkboxes = []
extra_texts = []
def create_feature_ui(category, subcategory, label, default_value):
choices = list(features_data[category][subcategory].keys())
gr.Markdown(f"**{label}**")
with gr.Row(equal_height=True):
dd = gr.Dropdown(choices=choices, value=default_value, scale=12, show_label=False)
cb = gr.Checkbox(label="🎲", value=False, scale=1, min_width=50, container=False)
dropdowns.append(dd)
checkboxes.append(cb)
return dd, cb
with gr.Row():
with gr.Column(scale=2):
with gr.Tabs():
with gr.TabItem("👤 Identity & Expression"):
gr.Markdown("### 👤 Identity")
with gr.Row():
character_name = gr.Textbox(label="Character Name", placeholder="Enter name...", value="Unnamed Hero", scale=9)
name_gen_btn = gr.Button("🎲", scale=1, variant="secondary")
race_dd, _ = create_feature_ui('identity', 'race', "Race", "Human")
create_feature_ui('identity', 'class', "Class", "Fighter")
create_feature_ui('identity', 'gender', "Gender", "Male")
create_feature_ui('identity', 'age', "Age", "Young Adult")
extra_id = gr.Textbox(placeholder="Extra Identity details (e.g. lineage, title)", label="Additional Identity Info")
extra_texts.append(extra_id)
gr.Markdown("### 🎭 Expression & Pose")
create_feature_ui('expression_pose', 'expression', "Expression", "Determined")
create_feature_ui('expression_pose', 'pose', "Pose", "Standing")
with gr.TabItem("🎨 Appearance"):
gr.Markdown("### 🎨 Appearance")
create_feature_ui('appearance', 'hair_color', "Hair Color", "Brown")
create_feature_ui('appearance', 'hair_style', "Hair Style", "Short")
create_feature_ui('appearance', 'eye_color', "Eye Color", "Brown")
create_feature_ui('appearance', 'build', "Build", "Average")
create_feature_ui('appearance', 'skin_tone', "Skin Tone", "Fair")
create_feature_ui('appearance', 'distinguishing_feature', "Distinguishing Feature", "None")
extra_app = gr.Textbox(placeholder="Extra Appearance details (e.g. tattoos, scars)", label="Additional Appearance Info")
extra_texts.append(extra_app)
with gr.TabItem("⚔️ Equipment"):
gr.Markdown("### ⚔️ Equipment")
create_feature_ui('equipment', 'armor', "Armor/Clothing", "Travelers Clothes")
create_feature_ui('equipment', 'weapon', "Primary Weapon", "Longsword")
create_feature_ui('equipment', 'accessory', "Accessory 1", "None")
create_feature_ui('equipment', 'accessory', "Accessory 2", "None")
create_feature_ui('equipment', 'material', "Material Detail", "Weathered")
extra_eq = gr.Textbox(placeholder="Extra Equipment details (e.g. weapon enchantments)", label="Additional Equipment Info")
extra_texts.append(extra_eq)
with gr.TabItem("🌍 Environment"):
gr.Markdown("### 🌍 Environment")
create_feature_ui('environment', 'background', "Background", "Forest")
create_feature_ui('environment', 'lighting', "Lighting", "Natural Sunlight")
create_feature_ui('environment', 'atmosphere', "Atmosphere", "Clear")
extra_env = gr.Textbox(placeholder="Extra Environment details (e.g. time of day, weather)", label="Additional Environment Info")
extra_texts.append(extra_env)
with gr.TabItem("🪄 Style & Technical"):
gr.Markdown("### 🪄 Style & Effects")
create_feature_ui('vfx_style', 'vfx', "Special Effects", "None")
create_feature_ui('vfx_style', 'style', "Art Style", "Digital Illustration")
create_feature_ui('vfx_style', 'mood', "Mood", "Heroic")
create_feature_ui('vfx_style', 'camera', "Camera Angle", "Bust")
extra_sty = gr.Textbox(placeholder="Extra Style details (e.g. specific artists, colors)", label="Additional Style Info")
extra_texts.append(extra_sty)
gr.Markdown("### ⚙️ Technical")
create_feature_ui('technical', 'aspect_ratio', "Aspect Ratio", "1:1")
gr.Markdown("---")
with gr.Row():
example_dropdown = gr.Dropdown(choices=get_example_list(), label="Example Characters", scale=3)
load_example_btn = gr.Button("📂 Load Example", variant="secondary", scale=1)
save_btn = gr.Button("💾 Save Character", variant="secondary", scale=1)
load_btn = gr.UploadButton("📂 Load Character", file_types=[".json"], variant="secondary", scale=1)
with gr.Group():
gr.Markdown("### ⚙️ AI Backend Configuration")
with gr.Row():
ollama_models = get_ollama_models()
ollama_active = len(ollama_models) > 0
comfy_active = check_comfy_availability()
refinement_choices = ["Hugging Face (Cloud)"]
if GEMINI_API_KEY:
refinement_choices.insert(0, "Gemini (Cloud)")
if ollama_active:
refinement_choices.append("Ollama (Local)")
refinement_backend = gr.Radio(
choices=refinement_choices,
value=refinement_choices[0],
label="Prompt Refinement Backend",
scale=2
)
with gr.Column(scale=1):
ollama_model_dropdown = gr.Dropdown(
choices=ollama_models,
value=ollama_models[0] if ollama_active else None,
label="Olama Model",
visible=False
)
hf_text_model_dropdown = gr.Dropdown(
choices=HF_TEXT_MODELS,
value=HF_TEXT_MODELS[0],
label="HF Text Model",
visible=("Hugging Face (Cloud)" in refinement_choices and refinement_choices[0] == "Hugging Face (Cloud)")
)
hf_text_provider_input = gr.Textbox(
value="auto",
placeholder="Optional: e.g. fal-ai",
label="HF Text Provider",
visible=("Hugging Face (Cloud)" in refinement_choices and refinement_choices[0] == "Hugging Face (Cloud)")
)
with gr.Row():
img_choices = ["Hugging Face (Cloud)"]
if GEMINI_API_KEY:
img_choices.insert(0, "Gemini (Cloud)")
if comfy_active:
img_choices.append("ComfyUI (Local)")
backend_selector = gr.Radio(
choices=img_choices,
value=img_choices[0],
label="Image Generation Backend",
scale=2
)
with gr.Column(scale=1):
hf_image_model_dropdown = gr.Dropdown(
choices=HF_IMAGE_MODELS,
value=HF_IMAGE_MODELS[0],
label="HF Image Model",
visible=("Hugging Face (Cloud)" in img_choices and img_choices[0] == "Hugging Face (Cloud)")
)
hf_image_provider_input = gr.Textbox(
value="auto",
placeholder="Optional: e.g. fal-ai",
label="HF Image Provider",
visible=("Hugging Face (Cloud)" in img_choices and img_choices[0] == "Hugging Face (Cloud)")
)
with gr.Row():
with gr.Column(scale=1):
hf_login = gr.LoginButton()
gr.Markdown("*Note: HF Login is disabled in 'shared' mode. Use an individual token for remote access.*")
with gr.Column(scale=2):
hf_token_input = gr.Textbox(
label="Individual Hugging Face Token (Optional)",
placeholder="Paste your hf_... token here for custom rate limits",
type="password"
)
with gr.Column(scale=1):
gr.Markdown("### 📝 Prompts & Output")
prompt_output = gr.Textbox(label="Generated Technical Prompt", lines=4, interactive=False, buttons=["copy"])
refine_btn = gr.Button("🧠 Refine Prompt", variant="primary")
regenerate_btn = gr.Button("✨ Randomize Features", variant="secondary")
refined_output = gr.Textbox(label="Refined Artistic Prompt", lines=6, interactive=True, buttons=["copy", "paste", "clear"])
gr.Markdown("---")
image_output = gr.Image(label="Portrait", show_label=False)
gen_img_btn = gr.Button("🖼️ Generate Image", variant="primary", scale=1)
download_img_btn = gr.DownloadButton("📥 Download Portrait (PNG)", variant="secondary", visible=False)
status_msg = gr.Markdown("")
download_file = gr.File(label="Saved Character JSON", visible=False)
all_input_components = [character_name] + dropdowns + checkboxes + extra_texts
for comp in all_input_components:
comp.change(fn=generate_prompt, inputs=all_input_components, outputs=prompt_output)
regenerate_btn.click(
fn=handle_regeneration,
inputs=dropdowns + checkboxes,
outputs=dropdowns
).then(
fn=generate_prompt,
inputs=all_input_components,
outputs=prompt_output
).then(
fn=lambda: "",
outputs=refined_output
)
name_gen_btn.click(
fn=lambda r: generate_fantasy_name(r),
inputs=[race_dd],
outputs=character_name
)
refine_btn.click(
fn=refine_master,
inputs=[prompt_output, refinement_backend, ollama_model_dropdown, hf_text_model_dropdown, hf_text_provider_input, hf_token_input, character_name],
outputs=[refined_output, status_msg]
)
gen_img_btn.click(
fn=generate_image_master,
inputs=[refined_output, prompt_output, dropdowns[-1], backend_selector, hf_image_model_dropdown, hf_image_provider_input, hf_token_input, character_name],
outputs=[image_output, download_img_btn, status_msg]
).then(
fn=lambda x: gr.update(value=x, visible=True) if x else gr.update(visible=False),
inputs=download_img_btn,
outputs=download_img_btn
)
refinement_backend.change(
fn=lambda b: (
gr.update(visible=(b == "Ollama (Local)")),
gr.update(visible=(b == "Hugging Face (Cloud)")),
gr.update(visible=(b == "Hugging Face (Cloud)"))
),
inputs=refinement_backend,
outputs=[ollama_model_dropdown, hf_text_model_dropdown, hf_text_provider_input]
)
backend_selector.change(
fn=lambda b: (
gr.update(visible=(b == "Hugging Face (Cloud)")),
gr.update(visible=(b == "Hugging Face (Cloud)"))
),
inputs=backend_selector,
outputs=[hf_image_model_dropdown, hf_image_provider_input]
)
save_btn.click(
fn=save_character,
inputs=all_input_components,
outputs=download_file
).then(
fn=lambda: gr.update(visible=True),
outputs=download_file
)
load_btn.upload(
fn=load_character,
inputs=load_btn,
outputs=all_input_components
).then(
fn=generate_prompt,
inputs=all_input_components,
outputs=prompt_output
).then(
fn=lambda: "",
outputs=refined_output
)
load_example_btn.click(
fn=load_example_character,
inputs=example_dropdown,
outputs=all_input_components
).then(
fn=generate_prompt,
inputs=all_input_components,
outputs=prompt_output
).then(
fn=lambda: "",
outputs=refined_output
)
demo.load(fn=generate_prompt, inputs=all_input_components, outputs=prompt_output)
return demo