Spaces:
Sleeping
Sleeping
File size: 13,537 Bytes
adfd61d db0fb8b 8fc430a 8d00cc9 adfd61d dd35031 adfd61d de40a85 764f61d a6956eb 764f61d 8fc430a ae00a18 8d00cc9 adfd61d ca6bc6f adfd61d 764f61d db0fb8b 8fc430a a6956eb 8fc430a a6956eb 8fc430a dd35031 8fc430a adfd61d 8fc430a adfd61d de40a85 adfd61d 8fc430a adfd61d dd35031 adfd61d 8fc430a dd35031 adfd61d 8fc430a a6956eb 8fc430a e286e0f a6956eb dd35031 de40a85 dd35031 a6956eb adfd61d 8fc430a 764f61d 61024a5 a6956eb de40a85 adfd61d de40a85 adfd61d 61024a5 adfd61d a6956eb adfd61d dd35031 a6956eb de40a85 adfd61d a6956eb 764f61d a6956eb 764f61d a6956eb 764f61d a6956eb 8d00cc9 de40a85 a6956eb 8fc430a a6956eb 8fc430a a6956eb 8fc430a 8d00cc9 a6956eb 8d00cc9 a6956eb 8d00cc9 8fc430a a6956eb 8fc430a a6956eb 8d00cc9 a6956eb 8d00cc9 a6956eb 8d00cc9 adfd61d dd35031 adfd61d a6956eb adfd61d a6956eb adfd61d de40a85 8fc430a a6956eb 8fc430a a6956eb 8fc430a a6956eb 8fc430a a6956eb 8fc430a de40a85 a6956eb de40a85 8fc430a a6956eb 8d00cc9 a6956eb adfd61d a6956eb 8fc430a a6956eb adfd61d a6956eb 8fc430a adfd61d a6956eb 8fc430a adfd61d a6956eb adfd61d a6956eb adfd61d db0fb8b 8d00cc9 a6956eb de40a85 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 | import gradio as gr
import torch
from diffusers import StableDiffusionPipeline, EulerAncestralDiscreteScheduler
from PIL import Image
import io
import requests
import os
from datetime import datetime
import re
import tempfile
import time
import base64
import json
from typing import Dict, List, Tuple
# External OCI API URL
OCI_API_BASE_URL = "https://yukee1992-oci-story-book.hf.space"
# Try to import your existing OCI connector for direct access
try:
from app import oci_connector
DIRECT_OCI_ACCESS = True
print("β
Direct OCI access available - using existing OCI connector")
except ImportError:
DIRECT_OCI_ACCESS = False
print("β οΈ Direct OCI access not available - using external API endpoint")
# HIGH-QUALITY MODEL SELECTION
MODEL_CHOICES = {
"dreamshaper-8": "lykon/dreamshaper-8",
"realistic-vision": "SG161222/Realistic_Vision_V5.1",
"anything-v5": "andite/anything-v5.0",
"openjourney": "prompthero/openjourney",
"sd-2.1": "stabilityai/stable-diffusion-2-1",
}
# Story tracking for sequence numbering
story_registry: Dict[str, int] = {} # {story_title: current_sequence}
# Initialize the HIGH-QUALITY Stable Diffusion model
def load_model(model_name="dreamshaper-8"):
"""Load and return a high-quality Stable Diffusion model"""
print(f"π Loading {model_name} model...")
try:
model_id = MODEL_CHOICES.get(model_name, "lykon/dreamshaper-8")
pipe = StableDiffusionPipeline.from_pretrained(
model_id,
torch_dtype=torch.float32,
safety_checker=None,
requires_safety_checker=False
)
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe = pipe.to("cpu")
print(f"β
{model_name} loaded successfully!")
return pipe
except Exception as e:
print(f"β Model loading failed: {e}")
return StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
torch_dtype=torch.float32,
safety_checker=None,
requires_safety_checker=False
).to("cpu")
# Load the model once at startup
pipe = load_model()
# PROFESSIONAL PROMPT ENGINEERING
def enhance_prompt(prompt, style="childrens_book"):
"""Transform basic prompts into professional-grade prompts"""
style_templates = {
"childrens_book": [
"masterpiece, best quality, 4K, ultra detailed, children's book illustration",
"watercolor painting, whimsical, cute, charming, storybook style",
"vibrant colors, soft lighting, magical, enchanting, dreamlike",
"Pixar style, Disney animation, high detail, professional artwork"
],
"realistic": [
"photorealistic, 8K, ultra detailed, professional photography",
"sharp focus, studio lighting, high resolution, intricate details",
"realistic textures, natural lighting, cinematic quality"
],
"fantasy": [
"epic fantasy art, digital painting, concept art, trending on artstation",
"magical, mystical, ethereal, otherworldly, fantasy illustration",
"dynamic composition, dramatic lighting, highly detailed"
],
"anime": [
"anime style, Japanese animation, high quality, detailed artwork",
"beautiful anime illustration, vibrant colors, clean lines",
"studio ghibli style, makoto shinkai, professional anime art"
]
}
templates = style_templates.get(style, style_templates["childrens_book"])
style_prompt = templates[0]
enhanced = f"{style_prompt}, {prompt}"
quality_boosters = [
"intricate details", "beautiful composition", "perfect lighting",
"professional artwork", "award winning", "trending on artstation"
]
import random
boosters = random.sample(quality_boosters, 2)
enhanced += ", " + ", ".join(boosters)
negative_prompt = (
"blurry, low quality, low resolution, ugly, deformed, poorly drawn, "
"bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, "
"disconnected limbs, mutation, mutated, ugly, disgusting, bad art, "
"beginner, amateur, distorted, watermark, signature, text, username"
)
return enhanced, negative_prompt
def get_next_sequence_number(story_title: str) -> int:
"""Get the next sequence number for a story"""
# Clean the story title for use in filenames
clean_title = re.sub(r'[^a-zA-Z0-9_\-]', '', story_title.strip().replace(' ', '_'))
if clean_title not in story_registry:
story_registry[clean_title] = 0
story_registry[clean_title] += 1
return story_registry[clean_title], clean_title
def save_to_oci_via_api(image, prompt, story_title, sequence_number):
"""Save image with organized storybook structure"""
try:
# Convert image to bytes
img_bytes = io.BytesIO()
image.save(img_bytes, format='PNG')
img_data = img_bytes.getvalue()
# Get sequence number and clean title
seq_num, clean_title = get_next_sequence_number(story_title)
if sequence_number is not None:
seq_num = sequence_number # Use provided sequence number if available
# Create organized filename with sequence
filename = f"page_{seq_num:03d}_{clean_title}.png"
# Use the EXTERNAL API URL
api_url = f"{OCI_API_BASE_URL}/api/upload"
print(f"π Saving to: stories/{clean_title}/{filename}")
# Prepare form data for API request
files = {
'file': (filename, img_data, 'image/png')
}
data = {
'project_id': 'storybook-library',
'subfolder': f'stories/{clean_title}'
}
# Make the API request
response = requests.post(api_url, files=files, data=data, timeout=30)
if response.status_code == 200:
result = response.json()
if result['status'] == 'success':
return f"β
Saved: {filename} | Story: {clean_title}"
else:
return f"β API Error: {result.get('message', 'Unknown error')}"
else:
return f"β HTTP Error: {response.status_code}"
except Exception as e:
return f"β Upload failed: {str(e)}"
def generate_storybook_page(prompt, story_title, sequence_number=None, model_choice="dreamshaper-8", style="childrens_book"):
"""Generate a storybook page with organized naming"""
try:
if not prompt or not prompt.strip():
return None, "β Please enter a scene description"
if not story_title or not story_title.strip():
return None, "β Please enter a story title"
# Reload model if different choice
global pipe
if model_choice != "dreamshaper-8":
pipe = load_model(model_choice)
# Professional prompt enhancement
enhanced_prompt, negative_prompt = enhance_prompt(prompt, style)
print(f"π Generating page for: {story_title}")
print(f"π¨ Prompt: {enhanced_prompt}")
# Generate high-quality image
image = pipe(
prompt=enhanced_prompt,
negative_prompt=negative_prompt,
num_inference_steps=30,
guidance_scale=8.5,
width=768,
height=768,
generator=torch.Generator(device="cpu").manual_seed(int(time.time()))
).images[0]
print("β
Page generated successfully!")
# Save with organized structure
save_status = save_to_oci_via_api(image, prompt, story_title, sequence_number)
print(f"πΎ {save_status}")
return image, save_status
except Exception as e:
error_msg = f"β Generation failed: {str(e)}"
print(error_msg)
return None, error_msg
def batch_generate_storybook(story_title, scenes_text, model_choice="dreamshaper-8", style="childrens_book"):
"""Generate multiple pages for a complete storybook"""
if not story_title or not scenes_text:
return [], "β Please provide story title and scenes"
scenes = [scene.strip() for scene in scenes_text.split('\n') if scene.strip()]
results = []
status_messages = []
for i, scene in enumerate(scenes, 1):
print(f"π Generating page {i}/{len(scenes)} for: {story_title}")
image, status = generate_storybook_page(
scene, story_title, i, model_choice, style
)
if image:
results.append((f"Page {i}: {scene}", image))
status_messages.append(f"Page {i}: {status}")
return results, "\n".join(status_messages)
# Create the enhanced Gradio interface
with gr.Blocks(title="Professional Storybook Generator", theme="soft") as demo:
gr.Markdown("# π Professional Storybook Generator")
gr.Markdown("Create organized storybooks with sequential page numbering")
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### π Story Information")
story_title = gr.Textbox(
label="Story Title",
placeholder="Enter your story title...\nExample: The Dragon's Reading Adventure",
lines=1
)
gr.Markdown("### π― Quality Settings")
model_choice = gr.Dropdown(
label="AI Model",
choices=list(MODEL_CHOICES.keys()),
value="dreamshaper-8"
)
style_choice = gr.Dropdown(
label="Art Style",
choices=["childrens_book", "realistic", "fantasy", "anime"],
value="childrens_book"
)
with gr.Column(scale=2):
gr.Markdown("### π¨ Single Page Generation")
prompt_input = gr.Textbox(
label="Page Description",
placeholder="Describe this page's scene...\nExample: The dragon discovers a magical library in the forest",
lines=2
)
generate_btn = gr.Button("β¨ Generate Single Page", variant="primary")
image_output = gr.Image(label="Generated Page", height=400, show_download_button=True)
status_output = gr.Textbox(label="Status", interactive=False, lines=2)
with gr.Row():
gr.Markdown("### π Complete Storybook Generation")
with gr.Row():
with gr.Column():
scenes_input = gr.Textbox(
label="Story Scenes (One per line = One per page)",
placeholder="Enter each page's scene on a separate line...\nExample:\nThe dragon finds a mysterious book\nHe learns to read with owl friend\nThey discover hidden treasure map\nFriends celebrate with magical feast",
lines=6
)
batch_btn = gr.Button("π Generate Complete Storybook", variant="primary")
with gr.Column():
batch_status = gr.Textbox(label="Generation Status", interactive=False, lines=8)
batch_gallery = gr.Gallery(label="Storybook Pages", columns=2, height=600)
with gr.Accordion("π Folder Structure Preview", open=True):
gr.Markdown("""
**Your story will be organized as:**
```
storybook-library/
βββ stories/
βββ The_Dragons_Reading_Adventure/
βββ page_001_The_Dragons_Reading_Adventure.png
βββ page_002_The_Dragons_Reading_Adventure.png
βββ page_003_The_Dragons_Reading_Adventure.png
βββ page_004_The_Dragons_Reading_Adventure.png
```
**Perfect for:**
- β
Easy PDF compilation later
- β
Automatic sequencing
- β
Organized by story title
- β
Ready for n8n automation
""")
with gr.Accordion("π‘ API Usage for n8n", open=False):
gr.Markdown("""
**For n8n automation, use this JSON format:**
```json
{
"story_title": "The Dragon's Reading Adventure",
"scenes": [
"The dragon finds a mysterious old book in the forest",
"He meets an owl who teaches him to read",
"They discover a hidden treasure map in the book",
"All animal friends celebrate with a magical feast"
],
"model_choice": "dreamshaper-8",
"style": "childrens_book"
}
```
""")
# Connect buttons to functions
generate_btn.click(
fn=generate_storybook_page,
inputs=[prompt_input, story_title, gr.Number(visible=False), model_choice, style_choice],
outputs=[image_output, status_output]
)
batch_btn.click(
fn=batch_generate_storybook,
inputs=[story_title, scenes_input, model_choice, style_choice],
outputs=[batch_gallery, batch_status]
)
def get_app():
return demo
if __name__ == "__main__":
print("π Starting Professional Storybook Generator...")
print("π Feature: Organized story sequencing enabled")
demo.launch(server_name="0.0.0.0", server_port=7860) |