Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -48,14 +48,18 @@ class JobStatus(str, Enum):
|
|
| 48 |
COMPLETED = "completed"
|
| 49 |
FAILED = "failed"
|
| 50 |
|
| 51 |
-
# Story scene model
|
| 52 |
class StoryScene(BaseModel):
|
| 53 |
visual: str
|
| 54 |
text: str
|
|
|
|
|
|
|
| 55 |
|
| 56 |
class CharacterDescription(BaseModel):
|
| 57 |
name: str
|
| 58 |
description: str
|
|
|
|
|
|
|
| 59 |
|
| 60 |
class StorybookRequest(BaseModel):
|
| 61 |
story_title: str
|
|
@@ -64,6 +68,7 @@ class StorybookRequest(BaseModel):
|
|
| 64 |
model_choice: str = "dreamshaper-8"
|
| 65 |
style: str = "childrens_book"
|
| 66 |
callback_url: Optional[str] = None
|
|
|
|
| 67 |
|
| 68 |
class JobStatusResponse(BaseModel):
|
| 69 |
job_id: str
|
|
@@ -83,6 +88,22 @@ MODEL_CHOICES = {
|
|
| 83 |
"sd-2.1": "stabilityai/stable-diffusion-2-1",
|
| 84 |
}
|
| 85 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
# GLOBAL STORAGE
|
| 87 |
job_storage = {}
|
| 88 |
model_cache = {}
|
|
@@ -135,79 +156,259 @@ print("π Initializing Storybook Generator API...")
|
|
| 135 |
load_model("dreamshaper-8")
|
| 136 |
print("β
Model loaded and ready!")
|
| 137 |
|
| 138 |
-
#
|
| 139 |
-
def
|
| 140 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 141 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 142 |
style_templates = {
|
| 143 |
-
"childrens_book":
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
"Pixar style, Disney animation, high detail, professional artwork"
|
| 148 |
-
],
|
| 149 |
-
"realistic": [
|
| 150 |
-
"photorealistic, 8K, ultra detailed, professional photography",
|
| 151 |
-
"sharp focus, studio lighting, high resolution, intricate details",
|
| 152 |
-
"realistic textures, natural lighting, cinematic quality"
|
| 153 |
-
],
|
| 154 |
-
"fantasy": [
|
| 155 |
-
"epic fantasy art, digital painting, concept art, trending on artstation",
|
| 156 |
-
"magical, mystical, ethereal, otherworldly, fantasy illustration",
|
| 157 |
-
"dynamic composition, dramatic lighting, highly detailed"
|
| 158 |
-
],
|
| 159 |
-
"anime": [
|
| 160 |
-
"anime style, Japanese animation, high quality, detailed artwork",
|
| 161 |
-
"beautiful anime illustration, vibrant colors, clean lines",
|
| 162 |
-
"studio ghibli style, makoto shinkai, professional anime art"
|
| 163 |
-
]
|
| 164 |
}
|
| 165 |
|
| 166 |
-
|
| 167 |
-
|
|
|
|
|
|
|
|
|
|
| 168 |
|
| 169 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 170 |
|
|
|
|
| 171 |
quality_boosters = [
|
| 172 |
-
"
|
| 173 |
-
"
|
|
|
|
|
|
|
| 174 |
]
|
| 175 |
|
| 176 |
-
|
| 177 |
-
enhanced += ", " + ", ".join(boosters)
|
| 178 |
|
|
|
|
| 179 |
negative_prompt = (
|
| 180 |
-
"
|
| 181 |
-
"
|
| 182 |
-
"
|
| 183 |
-
"
|
| 184 |
)
|
| 185 |
|
| 186 |
-
return
|
| 187 |
|
| 188 |
-
def
|
| 189 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 190 |
try:
|
| 191 |
pipe = load_model(model_choice)
|
| 192 |
|
| 193 |
image = pipe(
|
| 194 |
-
prompt=
|
| 195 |
negative_prompt=negative_prompt,
|
| 196 |
-
num_inference_steps=
|
| 197 |
-
guidance_scale=
|
| 198 |
width=768,
|
| 199 |
height=768,
|
| 200 |
-
generator=torch.Generator(device="cpu").manual_seed(
|
| 201 |
).images[0]
|
| 202 |
|
| 203 |
-
print("β
|
|
|
|
|
|
|
|
|
|
| 204 |
return image
|
| 205 |
|
| 206 |
except Exception as e:
|
| 207 |
-
print(f"β
|
| 208 |
raise
|
| 209 |
|
| 210 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 211 |
def save_image_to_local(image, prompt, style="test"):
|
| 212 |
"""Save image to local persistent storage"""
|
| 213 |
try:
|
|
@@ -287,7 +488,7 @@ def refresh_local_images():
|
|
| 287 |
print(f"Error refreshing local images: {e}")
|
| 288 |
return []
|
| 289 |
|
| 290 |
-
# OCI BUCKET FUNCTIONS (
|
| 291 |
def save_to_oci_bucket(image, text_content, story_title, page_number, file_type="image"):
|
| 292 |
"""Save both images and text to OCI bucket via your OCI API"""
|
| 293 |
try:
|
|
@@ -328,10 +529,14 @@ def save_to_oci_bucket(image, text_content, story_title, page_number, file_type=
|
|
| 328 |
except Exception as e:
|
| 329 |
raise Exception(f"OCI upload failed: {str(e)}")
|
| 330 |
|
| 331 |
-
# JOB MANAGEMENT FUNCTIONS
|
| 332 |
def create_job(story_request: StorybookRequest) -> str:
|
| 333 |
job_id = str(uuid.uuid4())
|
| 334 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 335 |
job_storage[job_id] = {
|
| 336 |
"status": JobStatus.PENDING,
|
| 337 |
"progress": 0,
|
|
@@ -340,10 +545,14 @@ def create_job(story_request: StorybookRequest) -> str:
|
|
| 340 |
"result": None,
|
| 341 |
"created_at": time.time(),
|
| 342 |
"updated_at": time.time(),
|
| 343 |
-
"pages": []
|
|
|
|
|
|
|
| 344 |
}
|
| 345 |
|
| 346 |
print(f"π Created job {job_id} for story: {story_request.story_title}")
|
|
|
|
|
|
|
| 347 |
return job_id
|
| 348 |
|
| 349 |
def update_job_status(job_id: str, status: JobStatus, progress: int, message: str, result=None):
|
|
@@ -368,7 +577,7 @@ def update_job_status(job_id: str, status: JobStatus, progress: int, message: st
|
|
| 368 |
try:
|
| 369 |
callback_url = request_data["callback_url"]
|
| 370 |
|
| 371 |
-
# Enhanced callback data
|
| 372 |
callback_data = {
|
| 373 |
"job_id": job_id,
|
| 374 |
"status": status.value,
|
|
@@ -376,6 +585,7 @@ def update_job_status(job_id: str, status: JobStatus, progress: int, message: st
|
|
| 376 |
"message": message,
|
| 377 |
"story_title": request_data["story_title"],
|
| 378 |
"total_scenes": len(request_data["scenes"]),
|
|
|
|
| 379 |
"timestamp": time.time(),
|
| 380 |
"source": "huggingface-storybook-generator",
|
| 381 |
"estimated_time_remaining": calculate_remaining_time(job_id, progress)
|
|
@@ -387,7 +597,8 @@ def update_job_status(job_id: str, status: JobStatus, progress: int, message: st
|
|
| 387 |
"total_pages": result.get("total_pages", 0),
|
| 388 |
"generation_time": result.get("generation_time", 0),
|
| 389 |
"oci_bucket_url": result.get("oci_bucket_url", ""),
|
| 390 |
-
"pages_generated": result.get("generated_pages", 0)
|
|
|
|
| 391 |
}
|
| 392 |
|
| 393 |
# Add current scene info for processing jobs
|
|
@@ -395,7 +606,13 @@ def update_job_status(job_id: str, status: JobStatus, progress: int, message: st
|
|
| 395 |
current_scene = progress // (100 // len(request_data["scenes"])) + 1
|
| 396 |
callback_data["current_scene"] = current_scene
|
| 397 |
callback_data["total_scenes"] = len(request_data["scenes"])
|
| 398 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 399 |
|
| 400 |
headers = {
|
| 401 |
'Content-Type': 'application/json',
|
|
@@ -433,19 +650,26 @@ def calculate_remaining_time(job_id, progress):
|
|
| 433 |
|
| 434 |
return "Unknown"
|
| 435 |
|
| 436 |
-
# BACKGROUND TASK
|
| 437 |
def generate_storybook_background(job_id: str):
|
| 438 |
-
"""Background task to generate complete storybook with
|
| 439 |
try:
|
| 440 |
job_data = job_storage[job_id]
|
| 441 |
story_request_data = job_data["request"]
|
| 442 |
story_request = StorybookRequest(**story_request_data)
|
|
|
|
| 443 |
|
| 444 |
-
print(f"π¬ Starting
|
| 445 |
print(f"π Story: {story_request.story_title}")
|
| 446 |
-
print(f"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 447 |
|
| 448 |
-
update_job_status(job_id, JobStatus.PROCESSING, 5, "Starting storybook generation...")
|
| 449 |
|
| 450 |
total_scenes = len(story_request.scenes)
|
| 451 |
generated_pages = []
|
|
@@ -453,25 +677,35 @@ def generate_storybook_background(job_id: str):
|
|
| 453 |
|
| 454 |
for i, scene in enumerate(story_request.scenes):
|
| 455 |
progress = 5 + int((i / total_scenes) * 90)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 456 |
update_job_status(
|
| 457 |
job_id,
|
| 458 |
JobStatus.PROCESSING,
|
| 459 |
progress,
|
| 460 |
-
f"Generating page {i+1}/{total_scenes}: {scene.visual[:50]}..."
|
| 461 |
)
|
| 462 |
|
| 463 |
try:
|
| 464 |
-
print(f"πΌοΈ Generating page {i+1}: {
|
| 465 |
|
| 466 |
-
#
|
| 467 |
-
|
| 468 |
-
|
| 469 |
-
# Generate high-quality image
|
| 470 |
-
image = generate_high_quality_image(
|
| 471 |
-
enhanced_prompt,
|
| 472 |
story_request.model_choice,
|
| 473 |
story_request.style,
|
| 474 |
-
|
|
|
|
|
|
|
|
|
|
| 475 |
)
|
| 476 |
|
| 477 |
# Save IMAGE to OCI bucket
|
|
@@ -498,12 +732,13 @@ def generate_storybook_background(job_id: str):
|
|
| 498 |
"image_url": image_url,
|
| 499 |
"text_url": text_url,
|
| 500 |
"text_content": scene.text,
|
| 501 |
-
"
|
| 502 |
-
"
|
|
|
|
| 503 |
}
|
| 504 |
generated_pages.append(page_data)
|
| 505 |
|
| 506 |
-
print(f"β
Page {i+1} completed -
|
| 507 |
|
| 508 |
except Exception as e:
|
| 509 |
error_msg = f"Failed to generate page {i+1}: {str(e)}"
|
|
@@ -522,6 +757,8 @@ def generate_storybook_background(job_id: str):
|
|
| 522 |
"generation_time": round(generation_time, 2),
|
| 523 |
"folder_path": f"stories/{story_request.story_title}",
|
| 524 |
"oci_bucket_url": f"https://oci.com/stories/{story_request.story_title}",
|
|
|
|
|
|
|
| 525 |
"pages": generated_pages,
|
| 526 |
"file_structure": {
|
| 527 |
"images": [f"page_{i+1:03d}.png" for i in range(total_scenes)],
|
|
@@ -533,25 +770,40 @@ def generate_storybook_background(job_id: str):
|
|
| 533 |
job_id,
|
| 534 |
JobStatus.COMPLETED,
|
| 535 |
100,
|
| 536 |
-
f"π Storybook completed! {len(generated_pages)} pages created in {generation_time:.2f}s.
|
| 537 |
result
|
| 538 |
)
|
| 539 |
|
| 540 |
-
print(f"π
|
| 541 |
print(f"π Saved to: stories/{story_request.story_title} in OCI bucket")
|
|
|
|
| 542 |
|
| 543 |
except Exception as e:
|
| 544 |
-
error_msg = f"
|
| 545 |
print(f"β {error_msg}")
|
| 546 |
update_job_status(job_id, JobStatus.FAILED, 0, error_msg)
|
| 547 |
|
| 548 |
# FASTAPI ENDPOINTS (for n8n)
|
| 549 |
@app.post("/api/generate-storybook")
|
| 550 |
async def generate_storybook(request: dict, background_tasks: BackgroundTasks):
|
| 551 |
-
"""Main endpoint for n8n integration - generates complete storybook"""
|
| 552 |
try:
|
| 553 |
print(f"π₯ Received n8n request for story: {request.get('story_title', 'Unknown')}")
|
| 554 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 555 |
# Convert to Pydantic model
|
| 556 |
story_request = StorybookRequest(**request)
|
| 557 |
|
|
@@ -568,16 +820,19 @@ async def generate_storybook(request: dict, background_tasks: BackgroundTasks):
|
|
| 568 |
# Immediate response for n8n
|
| 569 |
response_data = {
|
| 570 |
"status": "success",
|
| 571 |
-
"message": "Storybook generation started successfully",
|
| 572 |
"job_id": job_id,
|
| 573 |
"story_title": story_request.story_title,
|
| 574 |
"total_scenes": len(story_request.scenes),
|
|
|
|
|
|
|
|
|
|
| 575 |
"callback_url": story_request.callback_url,
|
| 576 |
-
"estimated_time_seconds": len(story_request.scenes) *
|
| 577 |
"timestamp": datetime.now().isoformat()
|
| 578 |
}
|
| 579 |
|
| 580 |
-
print(f"β
Job {job_id} started for: {story_request.story_title}")
|
| 581 |
|
| 582 |
return response_data
|
| 583 |
|
|
@@ -612,6 +867,7 @@ async def api_health():
|
|
| 612 |
"timestamp": datetime.now().isoformat(),
|
| 613 |
"active_jobs": len(job_storage),
|
| 614 |
"models_loaded": list(model_cache.keys()),
|
|
|
|
| 615 |
"oci_api_connected": OCI_API_BASE_URL
|
| 616 |
}
|
| 617 |
|
|
@@ -631,145 +887,56 @@ async def delete_local_image_api(filename: str):
|
|
| 631 |
except Exception as e:
|
| 632 |
return {"status": "error", "message": str(e)}
|
| 633 |
|
| 634 |
-
#
|
| 635 |
-
|
| 636 |
-
|
| 637 |
-
gr.Markdown("Generate **studio-quality** storybook images with professional results")
|
| 638 |
-
|
| 639 |
-
# Storage info display
|
| 640 |
-
storage_info = gr.Textbox(
|
| 641 |
-
label="π Local Storage Information",
|
| 642 |
-
interactive=False,
|
| 643 |
-
lines=2
|
| 644 |
-
)
|
| 645 |
-
|
| 646 |
-
def update_storage_info():
|
| 647 |
-
info = get_local_storage_info()
|
| 648 |
-
if "error" not in info:
|
| 649 |
-
return f"π Local Storage: {info['total_files']} images, {info['total_size_mb']} MB used"
|
| 650 |
-
return "π Local Storage: Unable to calculate"
|
| 651 |
-
|
| 652 |
-
with gr.Row():
|
| 653 |
-
with gr.Column(scale=1):
|
| 654 |
-
gr.Markdown("### π― Quality Settings")
|
| 655 |
-
|
| 656 |
-
model_choice = gr.Dropdown(
|
| 657 |
-
label="AI Model",
|
| 658 |
-
choices=list(MODEL_CHOICES.keys()),
|
| 659 |
-
value="dreamshaper-8",
|
| 660 |
-
info="Choose the best model for your style"
|
| 661 |
-
)
|
| 662 |
-
|
| 663 |
-
style_choice = gr.Dropdown(
|
| 664 |
-
label="Art Style",
|
| 665 |
-
choices=["childrens_book", "realistic", "fantasy", "anime"],
|
| 666 |
-
value="childrens_book",
|
| 667 |
-
info="Select the artistic style"
|
| 668 |
-
)
|
| 669 |
-
|
| 670 |
-
prompt_input = gr.Textbox(
|
| 671 |
-
label="Scene Description",
|
| 672 |
-
placeholder="Describe your scene in detail...\nExample: A friendly dragon reading a giant book under a magical tree with glowing fairies",
|
| 673 |
-
lines=3
|
| 674 |
-
)
|
| 675 |
-
|
| 676 |
-
generate_btn = gr.Button("β¨ Generate Premium Image", variant="primary")
|
| 677 |
-
|
| 678 |
-
# Current image management
|
| 679 |
-
current_file_path = gr.State()
|
| 680 |
-
delete_btn = gr.Button("ποΈ Delete This Image", variant="stop")
|
| 681 |
-
delete_status = gr.Textbox(label="Delete Status", interactive=False, lines=2)
|
| 682 |
-
|
| 683 |
-
gr.Markdown("### π API Usage for n8n")
|
| 684 |
-
gr.Markdown("""
|
| 685 |
-
**For complete storybooks (OCI bucket):**
|
| 686 |
-
- Endpoint: `POST /api/generate-storybook`
|
| 687 |
-
- Input: `story_title`, `scenes[]`, `characters[]`
|
| 688 |
-
- Output: Saves to OCI bucket automatically
|
| 689 |
-
""")
|
| 690 |
-
|
| 691 |
-
with gr.Column(scale=2):
|
| 692 |
-
image_output = gr.Image(label="Generated Image", height=500, show_download_button=True)
|
| 693 |
-
status_output = gr.Textbox(label="Status", interactive=False, lines=4)
|
| 694 |
-
|
| 695 |
-
# Examples section
|
| 696 |
-
with gr.Accordion("π‘ Prompt Examples & Tips", open=False):
|
| 697 |
-
gr.Markdown("""
|
| 698 |
-
## π¨ Professional Prompt Examples:
|
| 699 |
-
|
| 700 |
-
**Best Results:**
|
| 701 |
-
- "A cute baby dragon learning to read with an old wise turtle in a magical forest"
|
| 702 |
-
- "Group of animal friends having a picnic under a giant glowing mushroom"
|
| 703 |
-
- "Little girl exploring a castle made of candy with talking animals"
|
| 704 |
-
- "Space adventure with friendly robots and colorful aliens"
|
| 705 |
-
|
| 706 |
-
## π« Avoid:
|
| 707 |
-
- "dragon" β **"friendly cartoon dragon reading book"**
|
| 708 |
-
- "cat" β **"cute kitten playing with yarn ball"**
|
| 709 |
-
- "tree" β **"magical tree with glowing fairies"**
|
| 710 |
-
|
| 711 |
-
## β‘ Pro Tips:
|
| 712 |
-
1. **Be descriptive** - Add colors, emotions, settings
|
| 713 |
-
2. **Specify style** - "watercolor", "cartoon", "realistic"
|
| 714 |
-
3. **Add details** - "with glowing effects", "in a magical forest"
|
| 715 |
-
4. **Use positive language** - "happy", "friendly", "colorful"
|
| 716 |
-
""")
|
| 717 |
-
|
| 718 |
-
# Local file management section
|
| 719 |
-
with gr.Accordion("π Manage Local Test Images", open=True):
|
| 720 |
-
gr.Markdown("### Locally Saved Images")
|
| 721 |
-
|
| 722 |
-
with gr.Row():
|
| 723 |
-
refresh_btn = gr.Button("π Refresh List")
|
| 724 |
-
clear_all_btn = gr.Button("ποΈ Clear All Images", variant="stop")
|
| 725 |
-
|
| 726 |
-
file_gallery = gr.Gallery(
|
| 727 |
-
label="Local Images",
|
| 728 |
-
show_label=True,
|
| 729 |
-
elem_id="gallery",
|
| 730 |
-
columns=4,
|
| 731 |
-
height="auto"
|
| 732 |
-
)
|
| 733 |
-
|
| 734 |
-
clear_status = gr.Textbox(label="Clear Status", interactive=False)
|
| 735 |
|
| 736 |
-
|
| 737 |
-
|
| 738 |
-
debug_btn = gr.Button("π Check System Status", variant="secondary")
|
| 739 |
-
debug_output = gr.Textbox(label="System Info", interactive=False, lines=4)
|
| 740 |
-
|
| 741 |
-
def generate_test_image(prompt, model_choice, style_choice):
|
| 742 |
-
"""Generate a single image for testing and save locally"""
|
| 743 |
try:
|
| 744 |
if not prompt.strip():
|
| 745 |
return None, "β Please enter a prompt", None
|
| 746 |
|
|
|
|
|
|
|
|
|
|
| 747 |
print(f"π¨ Generating test image with prompt: {prompt}")
|
|
|
|
| 748 |
|
| 749 |
-
#
|
| 750 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 751 |
|
| 752 |
# Generate the image
|
| 753 |
-
image =
|
| 754 |
-
|
| 755 |
model_choice,
|
| 756 |
style_choice,
|
| 757 |
-
|
|
|
|
|
|
|
| 758 |
)
|
| 759 |
|
| 760 |
# Save to local storage
|
| 761 |
filepath, filename = save_image_to_local(image, prompt, style_choice)
|
| 762 |
|
| 763 |
-
if
|
| 764 |
-
save_info = f"πΎ **Saved locally:** `{filepath}`"
|
| 765 |
-
else:
|
| 766 |
-
save_info = "β οΈ Could not save to local storage"
|
| 767 |
|
| 768 |
status_msg = f"""β
Success! Generated: {prompt}
|
| 769 |
|
| 770 |
-
{
|
| 771 |
|
| 772 |
-
π¨ Enhanced prompt: {enhanced_prompt}
|
| 773 |
|
| 774 |
π **Local file:** {filename if filename else 'Not saved'}"""
|
| 775 |
|
|
@@ -780,107 +947,231 @@ with gr.Blocks(title="Premium Children's Book Illustrator", theme="soft") as dem
|
|
| 780 |
print(error_msg)
|
| 781 |
return None, error_msg, None
|
| 782 |
|
| 783 |
-
|
| 784 |
-
"
|
| 785 |
-
|
| 786 |
-
return "β No image to delete", None, None, refresh_local_images()
|
| 787 |
|
| 788 |
-
|
| 789 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 790 |
|
| 791 |
-
|
| 792 |
-
|
| 793 |
-
|
| 794 |
-
|
| 795 |
-
return
|
| 796 |
-
|
| 797 |
-
|
| 798 |
-
|
| 799 |
-
|
| 800 |
-
|
| 801 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 802 |
|
| 803 |
-
|
| 804 |
-
|
| 805 |
-
|
| 806 |
-
|
| 807 |
-
|
|
|
|
|
|
|
|
|
|
| 808 |
|
| 809 |
-
|
| 810 |
-
|
| 811 |
-
|
| 812 |
-
|
| 813 |
-
|
| 814 |
-
|
| 815 |
-
|
| 816 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 817 |
- Model: {current_model_name}
|
|
|
|
|
|
|
| 818 |
- OCI API: {OCI_API_BASE_URL}
|
| 819 |
- Local Storage: {get_local_storage_info().get('total_files', 0)} images
|
| 820 |
-
- Active Jobs: {
|
| 821 |
-
- Ready for
|
| 822 |
-
|
| 823 |
-
|
| 824 |
-
|
| 825 |
-
|
| 826 |
-
|
| 827 |
-
|
| 828 |
-
|
| 829 |
-
|
| 830 |
-
|
| 831 |
-
|
| 832 |
-
|
| 833 |
-
|
| 834 |
-
|
| 835 |
-
|
| 836 |
-
|
| 837 |
-
|
| 838 |
-
|
| 839 |
-
|
| 840 |
-
|
| 841 |
-
|
| 842 |
-
|
| 843 |
-
|
| 844 |
-
|
| 845 |
-
|
| 846 |
-
|
| 847 |
-
|
| 848 |
-
|
| 849 |
-
|
| 850 |
-
|
| 851 |
-
|
| 852 |
-
|
| 853 |
-
|
| 854 |
-
|
| 855 |
-
|
| 856 |
-
|
| 857 |
-
|
| 858 |
-
|
| 859 |
-
|
| 860 |
-
|
| 861 |
-
|
| 862 |
-
|
| 863 |
-
|
| 864 |
-
|
| 865 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 866 |
|
| 867 |
-
|
| 868 |
-
|
| 869 |
-
|
|
|
|
| 870 |
|
| 871 |
# Enhanced root endpoint that explains the API structure
|
| 872 |
@app.get("/")
|
| 873 |
async def root():
|
| 874 |
return {
|
| 875 |
-
"message": "Storybook Generator API is running!",
|
| 876 |
"api_endpoints": {
|
| 877 |
"health_check": "GET /api/health",
|
| 878 |
"generate_storybook": "POST /api/generate-storybook",
|
| 879 |
"check_job_status": "GET /api/job-status/{job_id}",
|
| 880 |
"local_images": "GET /api/local-images"
|
| 881 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 882 |
"web_interface": "GET /ui",
|
| 883 |
-
"note": "Use API endpoints for programmatic access
|
| 884 |
}
|
| 885 |
|
| 886 |
# Add a simple test endpoint
|
|
@@ -888,11 +1179,13 @@ async def root():
|
|
| 888 |
async def test_endpoint():
|
| 889 |
return {
|
| 890 |
"status": "success",
|
| 891 |
-
"message": "API is working correctly",
|
|
|
|
|
|
|
| 892 |
"timestamp": datetime.now().isoformat()
|
| 893 |
}
|
| 894 |
|
| 895 |
-
# For Hugging Face Spaces deployment
|
| 896 |
def get_app():
|
| 897 |
return app
|
| 898 |
|
|
@@ -907,9 +1200,10 @@ if __name__ == "__main__":
|
|
| 907 |
print("π Running on Hugging Face Spaces - Integrated Mode")
|
| 908 |
print("π API endpoints available at: /api/*")
|
| 909 |
print("π¨ Web interface available at: /ui")
|
|
|
|
| 910 |
print("π Both API and UI running on same port")
|
| 911 |
|
| 912 |
-
#
|
| 913 |
gr.mount_gradio_app(app, demo, path="/ui")
|
| 914 |
|
| 915 |
# Run the combined app
|
|
@@ -924,6 +1218,7 @@ if __name__ == "__main__":
|
|
| 924 |
print("π Running locally - Separate API and UI servers")
|
| 925 |
print("π API endpoints: http://localhost:8000/api/*")
|
| 926 |
print("π¨ Web interface: http://localhost:7860/ui")
|
|
|
|
| 927 |
|
| 928 |
def run_fastapi():
|
| 929 |
"""Run FastAPI on port 8000 for API calls"""
|
|
@@ -960,5 +1255,4 @@ if __name__ == "__main__":
|
|
| 960 |
while True:
|
| 961 |
time.sleep(1)
|
| 962 |
except KeyboardInterrupt:
|
| 963 |
-
print("π Shutting down servers...")
|
| 964 |
-
|
|
|
|
| 48 |
COMPLETED = "completed"
|
| 49 |
FAILED = "failed"
|
| 50 |
|
| 51 |
+
# Enhanced Story scene model with character consistency
|
| 52 |
class StoryScene(BaseModel):
|
| 53 |
visual: str
|
| 54 |
text: str
|
| 55 |
+
characters_present: List[str] = [] # Which characters are in this scene
|
| 56 |
+
scene_type: str = "general" # "action", "dialogue", "establishing", etc.
|
| 57 |
|
| 58 |
class CharacterDescription(BaseModel):
|
| 59 |
name: str
|
| 60 |
description: str
|
| 61 |
+
visual_prompt: str = "" # Detailed visual description for AI
|
| 62 |
+
key_features: List[str] = [] # Critical features that must stay consistent
|
| 63 |
|
| 64 |
class StorybookRequest(BaseModel):
|
| 65 |
story_title: str
|
|
|
|
| 68 |
model_choice: str = "dreamshaper-8"
|
| 69 |
style: str = "childrens_book"
|
| 70 |
callback_url: Optional[str] = None
|
| 71 |
+
consistency_seed: Optional[int] = None # For consistent character generation
|
| 72 |
|
| 73 |
class JobStatusResponse(BaseModel):
|
| 74 |
job_id: str
|
|
|
|
| 88 |
"sd-2.1": "stabilityai/stable-diffusion-2-1",
|
| 89 |
}
|
| 90 |
|
| 91 |
+
# FALLBACK CHARACTER TEMPLATES (used only if n8n doesn't provide character details)
|
| 92 |
+
FALLBACK_CHARACTER_TEMPLATES = {
|
| 93 |
+
"Sparkle the Star Cat": {
|
| 94 |
+
"visual_prompt": "small white kitten with distinctive silver star-shaped spots on fur, big golden eyes, shiny blue collar with star charm, playful expression",
|
| 95 |
+
"key_features": ["star-shaped spots", "blue collar", "golden eyes", "white fur"],
|
| 96 |
+
},
|
| 97 |
+
"Benny the Bunny": {
|
| 98 |
+
"visual_prompt": "fluffy brown rabbit with long ears, bright green eyes, red scarf around neck, cheerful expression",
|
| 99 |
+
"key_features": ["red scarf", "long ears", "green eyes", "brown fur"],
|
| 100 |
+
},
|
| 101 |
+
"Tilly the Turtle": {
|
| 102 |
+
"visual_prompt": "gentle green turtle with shiny turquoise shell decorated with swirl patterns, wise expression, slow-moving",
|
| 103 |
+
"key_features": ["turquoise shell", "swirl patterns", "green skin", "wise expression"],
|
| 104 |
+
}
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
# GLOBAL STORAGE
|
| 108 |
job_storage = {}
|
| 109 |
model_cache = {}
|
|
|
|
| 156 |
load_model("dreamshaper-8")
|
| 157 |
print("β
Model loaded and ready!")
|
| 158 |
|
| 159 |
+
# DYNAMIC CHARACTER PROCESSING FUNCTIONS
|
| 160 |
+
def process_character_descriptions(characters_from_request):
|
| 161 |
+
"""Process character descriptions from n8n and create consistency templates"""
|
| 162 |
+
character_templates = {}
|
| 163 |
+
|
| 164 |
+
for character in characters_from_request:
|
| 165 |
+
char_name = character.name
|
| 166 |
+
|
| 167 |
+
# Use provided visual_prompt or generate from description
|
| 168 |
+
if character.visual_prompt:
|
| 169 |
+
visual_prompt = character.visual_prompt
|
| 170 |
+
else:
|
| 171 |
+
# Generate visual prompt from description
|
| 172 |
+
visual_prompt = generate_visual_prompt_from_description(character.description, char_name)
|
| 173 |
+
|
| 174 |
+
# Use provided key_features or extract from description
|
| 175 |
+
if character.key_features:
|
| 176 |
+
key_features = character.key_features
|
| 177 |
+
else:
|
| 178 |
+
key_features = extract_key_features_from_description(character.description)
|
| 179 |
+
|
| 180 |
+
character_templates[char_name] = {
|
| 181 |
+
"visual_prompt": visual_prompt,
|
| 182 |
+
"key_features": key_features,
|
| 183 |
+
"consistency_keywords": f"consistent character, same {char_name.split()[-1].lower()}, maintaining appearance",
|
| 184 |
+
"source": "n8n_request" # Track where this template came from
|
| 185 |
+
}
|
| 186 |
+
|
| 187 |
+
print(f"β
Processed {len(character_templates)} characters from n8n request")
|
| 188 |
+
return character_templates
|
| 189 |
+
|
| 190 |
+
def generate_visual_prompt_from_description(description, character_name):
|
| 191 |
+
"""Generate a visual prompt from character description"""
|
| 192 |
+
# Basic extraction of visual elements
|
| 193 |
+
description_lower = description.lower()
|
| 194 |
+
|
| 195 |
+
# Extract species/type
|
| 196 |
+
species_keywords = ["kitten", "cat", "rabbit", "bunny", "turtle", "dog", "bird", "dragon", "bear", "fox"]
|
| 197 |
+
species = "character"
|
| 198 |
+
for keyword in species_keywords:
|
| 199 |
+
if keyword in description_lower:
|
| 200 |
+
species = keyword
|
| 201 |
+
break
|
| 202 |
+
|
| 203 |
+
# Extract colors
|
| 204 |
+
color_keywords = ["white", "black", "brown", "red", "blue", "green", "yellow", "golden", "silver", "orange"]
|
| 205 |
+
colors = []
|
| 206 |
+
for color in color_keywords:
|
| 207 |
+
if color in description_lower:
|
| 208 |
+
colors.append(color)
|
| 209 |
+
|
| 210 |
+
# Extract distinctive features
|
| 211 |
+
feature_keywords = ["spots", "stripes", "collar", "scarf", "shell", "wings", "horn", "tail", "ears", "eyes"]
|
| 212 |
+
features = []
|
| 213 |
+
for feature in feature_keywords:
|
| 214 |
+
if feature in description_lower:
|
| 215 |
+
features.append(feature)
|
| 216 |
+
|
| 217 |
+
# Build visual prompt
|
| 218 |
+
visual_prompt_parts = []
|
| 219 |
+
if colors:
|
| 220 |
+
visual_prompt_parts.append(f"{' '.join(colors)} {species}")
|
| 221 |
+
else:
|
| 222 |
+
visual_prompt_parts.append(species)
|
| 223 |
+
|
| 224 |
+
visual_prompt_parts.append(character_name)
|
| 225 |
|
| 226 |
+
if features:
|
| 227 |
+
visual_prompt_parts.append(f"with {', '.join(features)}")
|
| 228 |
+
|
| 229 |
+
# Add emotional/character traits
|
| 230 |
+
trait_keywords = ["playful", "brave", "curious", "kind", "cheerful", "wise", "calm", "friendly"]
|
| 231 |
+
traits = [trait for trait in trait_keywords if trait in description_lower]
|
| 232 |
+
if traits:
|
| 233 |
+
visual_prompt_parts.append(f"{', '.join(traits)} expression")
|
| 234 |
+
|
| 235 |
+
visual_prompt = " ".join(visual_prompt_parts)
|
| 236 |
+
print(f"π§ Generated visual prompt for {character_name}: {visual_prompt}")
|
| 237 |
+
|
| 238 |
+
return visual_prompt
|
| 239 |
+
|
| 240 |
+
def extract_key_features_from_description(description):
|
| 241 |
+
"""Extract key features from character description"""
|
| 242 |
+
description_lower = description.lower()
|
| 243 |
+
key_features = []
|
| 244 |
+
|
| 245 |
+
# Look for distinctive physical features
|
| 246 |
+
feature_patterns = [
|
| 247 |
+
r"(\w+)\s+(?:spots|stripes|marks)",
|
| 248 |
+
r"(\w+)\s+(?:collar|scarf|ribbon)",
|
| 249 |
+
r"(\w+)\s+(?:eyes|fur|skin|shell)",
|
| 250 |
+
r"(\w+)\s+(?:ears|tail|wings|horn)"
|
| 251 |
+
]
|
| 252 |
+
|
| 253 |
+
for pattern in feature_patterns:
|
| 254 |
+
matches = re.findall(pattern, description_lower)
|
| 255 |
+
key_features.extend(matches)
|
| 256 |
+
|
| 257 |
+
# Remove duplicates and limit to 3 most important features
|
| 258 |
+
key_features = list(set(key_features))[:3]
|
| 259 |
+
|
| 260 |
+
# If no features found, use some defaults based on character type
|
| 261 |
+
if not key_features:
|
| 262 |
+
if any(word in description_lower for word in ["kitten", "cat"]):
|
| 263 |
+
key_features = ["whiskers", "tail", "paws"]
|
| 264 |
+
elif any(word in description_lower for word in ["rabbit", "bunny"]):
|
| 265 |
+
key_features = ["long ears", "fluffy tail", "paws"]
|
| 266 |
+
elif any(word in description_lower for word in ["turtle"]):
|
| 267 |
+
key_features = ["shell", "slow moving", "wise eyes"]
|
| 268 |
+
else:
|
| 269 |
+
key_features = ["distinctive appearance", "consistent features"]
|
| 270 |
+
|
| 271 |
+
print(f"π§ Extracted key features: {key_features}")
|
| 272 |
+
return key_features
|
| 273 |
+
|
| 274 |
+
# ENHANCED PROMPT ENGINEERING WITH DYNAMIC CHARACTER CONSISTENCY
|
| 275 |
+
def enhance_prompt_with_characters(scene_visual, characters_present, character_templates, style="childrens_book", scene_number=1):
|
| 276 |
+
"""Create prompts that maintain character consistency using dynamic templates"""
|
| 277 |
+
|
| 278 |
+
# Get character descriptions for this scene
|
| 279 |
+
character_descriptions = []
|
| 280 |
+
consistency_keywords = []
|
| 281 |
+
|
| 282 |
+
for char_name in characters_present:
|
| 283 |
+
if char_name in character_templates:
|
| 284 |
+
char_data = character_templates[char_name]
|
| 285 |
+
character_descriptions.append(f"{char_name}: {char_data['visual_prompt']}")
|
| 286 |
+
consistency_keywords.append(char_data['consistency_keywords'])
|
| 287 |
+
else:
|
| 288 |
+
# Fallback if character not in templates
|
| 289 |
+
character_descriptions.append(f"{char_name}: distinctive character")
|
| 290 |
+
consistency_keywords.append(f"consistent {char_name}")
|
| 291 |
+
|
| 292 |
+
# Style templates
|
| 293 |
style_templates = {
|
| 294 |
+
"childrens_book": "children's book illustration, watercolor style, soft colors, whimsical, magical, storybook art, professional illustration",
|
| 295 |
+
"realistic": "photorealistic, detailed, natural lighting, professional photography",
|
| 296 |
+
"fantasy": "fantasy art, magical, ethereal, digital painting, concept art",
|
| 297 |
+
"anime": "anime style, Japanese animation, vibrant colors, detailed artwork"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 298 |
}
|
| 299 |
|
| 300 |
+
style_prompt = style_templates.get(style, style_templates["childrens_book"])
|
| 301 |
+
|
| 302 |
+
# Build the enhanced prompt
|
| 303 |
+
character_context = ". ".join(character_descriptions)
|
| 304 |
+
consistency_context = ", ".join(consistency_keywords)
|
| 305 |
|
| 306 |
+
enhanced_prompt = (
|
| 307 |
+
f"{style_prompt}, {scene_visual}. "
|
| 308 |
+
f"Featuring: {character_context}. "
|
| 309 |
+
f"Maintain character consistency: {consistency_context}. "
|
| 310 |
+
f"Scene {scene_number} of storybook series. "
|
| 311 |
+
)
|
| 312 |
|
| 313 |
+
# Quality boosters for consistency
|
| 314 |
quality_boosters = [
|
| 315 |
+
"consistent character design", "maintain identical features",
|
| 316 |
+
"same characters throughout", "continuous visual narrative",
|
| 317 |
+
"professional storybook illustration", "cohesive art style",
|
| 318 |
+
"character continuity", "consistent proportions"
|
| 319 |
]
|
| 320 |
|
| 321 |
+
enhanced_prompt += ", ".join(quality_boosters)
|
|
|
|
| 322 |
|
| 323 |
+
# Enhanced negative prompt to avoid inconsistencies
|
| 324 |
negative_prompt = (
|
| 325 |
+
"inconsistent characters, different appearances, changing features, "
|
| 326 |
+
"multiple versions of same character, inconsistent art style, "
|
| 327 |
+
"blurry, low quality, bad anatomy, deformed characters, "
|
| 328 |
+
"wrong proportions, mismatched features, different art style"
|
| 329 |
)
|
| 330 |
|
| 331 |
+
return enhanced_prompt, negative_prompt
|
| 332 |
|
| 333 |
+
def extract_characters_from_visual(visual_description, available_characters):
|
| 334 |
+
"""Extract character names from visual description using available characters"""
|
| 335 |
+
characters = []
|
| 336 |
+
visual_lower = visual_description.lower()
|
| 337 |
+
|
| 338 |
+
# Check for each available character name in the visual description
|
| 339 |
+
for char_name in available_characters:
|
| 340 |
+
# Use the first word or main identifier from character name
|
| 341 |
+
char_identifier = char_name.split()[0].lower()
|
| 342 |
+
if char_identifier in visual_lower or char_name.lower() in visual_lower:
|
| 343 |
+
characters.append(char_name)
|
| 344 |
+
|
| 345 |
+
return characters
|
| 346 |
+
|
| 347 |
+
def generate_character_reference_sheet(characters):
|
| 348 |
+
"""Generate reference descriptions for consistent character generation"""
|
| 349 |
+
reference_sheet = {}
|
| 350 |
+
|
| 351 |
+
for character in characters:
|
| 352 |
+
char_name = character.name
|
| 353 |
+
reference_sheet[char_name] = {
|
| 354 |
+
"name": char_name,
|
| 355 |
+
"base_prompt": character.visual_prompt if character.visual_prompt else generate_visual_prompt_from_description(character.description, char_name),
|
| 356 |
+
"key_features": character.key_features if character.key_features else extract_key_features_from_description(character.description),
|
| 357 |
+
"must_include": character.key_features[:2] if character.key_features else []
|
| 358 |
+
}
|
| 359 |
+
|
| 360 |
+
return reference_sheet
|
| 361 |
+
|
| 362 |
+
def generate_consistent_image(prompt, model_choice, style, characters_present, character_templates, scene_number, consistency_seed=None):
|
| 363 |
+
"""Generate image with character consistency measures using dynamic templates"""
|
| 364 |
+
|
| 365 |
+
# Enhance prompt with character consistency
|
| 366 |
+
enhanced_prompt, negative_prompt = enhance_prompt_with_characters(
|
| 367 |
+
prompt, characters_present, character_templates, style, scene_number
|
| 368 |
+
)
|
| 369 |
+
|
| 370 |
+
# Use a consistent seed for character generation
|
| 371 |
+
if consistency_seed:
|
| 372 |
+
base_seed = consistency_seed
|
| 373 |
+
else:
|
| 374 |
+
base_seed = hash("".join(characters_present)) % 1000000 if characters_present else random.randint(1000, 9999)
|
| 375 |
+
|
| 376 |
+
# Adjust seed slightly per scene but maintain character consistency
|
| 377 |
+
scene_seed = base_seed + scene_number
|
| 378 |
+
|
| 379 |
try:
|
| 380 |
pipe = load_model(model_choice)
|
| 381 |
|
| 382 |
image = pipe(
|
| 383 |
+
prompt=enhanced_prompt,
|
| 384 |
negative_prompt=negative_prompt,
|
| 385 |
+
num_inference_steps=35, # Increased for better quality
|
| 386 |
+
guidance_scale=7.5, # Slightly lower for more consistency
|
| 387 |
width=768,
|
| 388 |
height=768,
|
| 389 |
+
generator=torch.Generator(device="cpu").manual_seed(scene_seed)
|
| 390 |
).images[0]
|
| 391 |
|
| 392 |
+
print(f"β
Generated consistent image for scene {scene_number}")
|
| 393 |
+
print(f"π₯ Characters: {characters_present}")
|
| 394 |
+
print(f"π± Seed used: {scene_seed}")
|
| 395 |
+
|
| 396 |
return image
|
| 397 |
|
| 398 |
except Exception as e:
|
| 399 |
+
print(f"β Consistent generation failed: {str(e)}")
|
| 400 |
raise
|
| 401 |
|
| 402 |
+
# Backward compatibility functions
|
| 403 |
+
def enhance_prompt(prompt, style="childrens_book"):
|
| 404 |
+
"""Legacy function for backward compatibility"""
|
| 405 |
+
return enhance_prompt_with_characters(prompt, [], {}, style, 1)
|
| 406 |
+
|
| 407 |
+
def generate_high_quality_image(prompt, model_choice="dreamshaper-8", style="childrens_book", negative_prompt=""):
|
| 408 |
+
"""Legacy function for backward compatibility"""
|
| 409 |
+
return generate_consistent_image(prompt, model_choice, style, [], {}, 1)
|
| 410 |
+
|
| 411 |
+
# LOCAL FILE MANAGEMENT FUNCTIONS (unchanged)
|
| 412 |
def save_image_to_local(image, prompt, style="test"):
|
| 413 |
"""Save image to local persistent storage"""
|
| 414 |
try:
|
|
|
|
| 488 |
print(f"Error refreshing local images: {e}")
|
| 489 |
return []
|
| 490 |
|
| 491 |
+
# OCI BUCKET FUNCTIONS (unchanged)
|
| 492 |
def save_to_oci_bucket(image, text_content, story_title, page_number, file_type="image"):
|
| 493 |
"""Save both images and text to OCI bucket via your OCI API"""
|
| 494 |
try:
|
|
|
|
| 529 |
except Exception as e:
|
| 530 |
raise Exception(f"OCI upload failed: {str(e)}")
|
| 531 |
|
| 532 |
+
# JOB MANAGEMENT FUNCTIONS
|
| 533 |
def create_job(story_request: StorybookRequest) -> str:
|
| 534 |
job_id = str(uuid.uuid4())
|
| 535 |
|
| 536 |
+
# Process character descriptions from n8n
|
| 537 |
+
character_templates = process_character_descriptions(story_request.characters)
|
| 538 |
+
character_references = generate_character_reference_sheet(story_request.characters)
|
| 539 |
+
|
| 540 |
job_storage[job_id] = {
|
| 541 |
"status": JobStatus.PENDING,
|
| 542 |
"progress": 0,
|
|
|
|
| 545 |
"result": None,
|
| 546 |
"created_at": time.time(),
|
| 547 |
"updated_at": time.time(),
|
| 548 |
+
"pages": [],
|
| 549 |
+
"character_templates": character_templates,
|
| 550 |
+
"character_references": character_references
|
| 551 |
}
|
| 552 |
|
| 553 |
print(f"π Created job {job_id} for story: {story_request.story_title}")
|
| 554 |
+
print(f"π₯ Processed {len(character_templates)} characters from n8n request")
|
| 555 |
+
|
| 556 |
return job_id
|
| 557 |
|
| 558 |
def update_job_status(job_id: str, status: JobStatus, progress: int, message: str, result=None):
|
|
|
|
| 577 |
try:
|
| 578 |
callback_url = request_data["callback_url"]
|
| 579 |
|
| 580 |
+
# Enhanced callback data
|
| 581 |
callback_data = {
|
| 582 |
"job_id": job_id,
|
| 583 |
"status": status.value,
|
|
|
|
| 585 |
"message": message,
|
| 586 |
"story_title": request_data["story_title"],
|
| 587 |
"total_scenes": len(request_data["scenes"]),
|
| 588 |
+
"total_characters": len(request_data["characters"]),
|
| 589 |
"timestamp": time.time(),
|
| 590 |
"source": "huggingface-storybook-generator",
|
| 591 |
"estimated_time_remaining": calculate_remaining_time(job_id, progress)
|
|
|
|
| 597 |
"total_pages": result.get("total_pages", 0),
|
| 598 |
"generation_time": result.get("generation_time", 0),
|
| 599 |
"oci_bucket_url": result.get("oci_bucket_url", ""),
|
| 600 |
+
"pages_generated": result.get("generated_pages", 0),
|
| 601 |
+
"characters_used": result.get("characters_used", 0)
|
| 602 |
}
|
| 603 |
|
| 604 |
# Add current scene info for processing jobs
|
|
|
|
| 606 |
current_scene = progress // (100 // len(request_data["scenes"])) + 1
|
| 607 |
callback_data["current_scene"] = current_scene
|
| 608 |
callback_data["total_scenes"] = len(request_data["scenes"])
|
| 609 |
+
if current_scene <= len(request_data["scenes"]):
|
| 610 |
+
scene_visual = request_data["scenes"][current_scene-1]["visual"]
|
| 611 |
+
callback_data["scene_description"] = scene_visual[:100] + "..."
|
| 612 |
+
|
| 613 |
+
# Add characters in current scene
|
| 614 |
+
if "characters_present" in request_data["scenes"][current_scene-1]:
|
| 615 |
+
callback_data["characters_in_scene"] = request_data["scenes"][current_scene-1]["characters_present"]
|
| 616 |
|
| 617 |
headers = {
|
| 618 |
'Content-Type': 'application/json',
|
|
|
|
| 650 |
|
| 651 |
return "Unknown"
|
| 652 |
|
| 653 |
+
# ENHANCED BACKGROUND TASK WITH DYNAMIC CHARACTER CONSISTENCY
|
| 654 |
def generate_storybook_background(job_id: str):
|
| 655 |
+
"""Background task to generate complete storybook with dynamic character consistency"""
|
| 656 |
try:
|
| 657 |
job_data = job_storage[job_id]
|
| 658 |
story_request_data = job_data["request"]
|
| 659 |
story_request = StorybookRequest(**story_request_data)
|
| 660 |
+
character_templates = job_data["character_templates"]
|
| 661 |
|
| 662 |
+
print(f"π¬ Starting DYNAMIC storybook generation for job {job_id}")
|
| 663 |
print(f"π Story: {story_request.story_title}")
|
| 664 |
+
print(f"π₯ Characters: {len(story_request.characters)} (from n8n)")
|
| 665 |
+
print(f"π Scenes: {len(story_request.scenes)}")
|
| 666 |
+
print(f"π± Consistency seed: {story_request.consistency_seed}")
|
| 667 |
+
|
| 668 |
+
# Log character details
|
| 669 |
+
for char in story_request.characters:
|
| 670 |
+
print(f" - {char.name}: {char.description[:50]}...")
|
| 671 |
|
| 672 |
+
update_job_status(job_id, JobStatus.PROCESSING, 5, "Starting storybook generation with dynamic character consistency...")
|
| 673 |
|
| 674 |
total_scenes = len(story_request.scenes)
|
| 675 |
generated_pages = []
|
|
|
|
| 677 |
|
| 678 |
for i, scene in enumerate(story_request.scenes):
|
| 679 |
progress = 5 + int((i / total_scenes) * 90)
|
| 680 |
+
|
| 681 |
+
# Extract characters for this scene
|
| 682 |
+
characters_present = []
|
| 683 |
+
if hasattr(scene, 'characters_present') and scene.characters_present:
|
| 684 |
+
characters_present = scene.characters_present
|
| 685 |
+
else:
|
| 686 |
+
# Fallback: extract from visual description using available characters
|
| 687 |
+
available_chars = [char.name for char in story_request.characters]
|
| 688 |
+
characters_present = extract_characters_from_visual(scene.visual, available_chars)
|
| 689 |
+
|
| 690 |
update_job_status(
|
| 691 |
job_id,
|
| 692 |
JobStatus.PROCESSING,
|
| 693 |
progress,
|
| 694 |
+
f"Generating page {i+1}/{total_scenes} with {len(characters_present)} characters: {scene.visual[:50]}..."
|
| 695 |
)
|
| 696 |
|
| 697 |
try:
|
| 698 |
+
print(f"πΌοΈ Generating page {i+1} with characters: {characters_present}")
|
| 699 |
|
| 700 |
+
# Generate consistent image using dynamic character templates
|
| 701 |
+
image = generate_consistent_image(
|
| 702 |
+
scene.visual,
|
|
|
|
|
|
|
|
|
|
| 703 |
story_request.model_choice,
|
| 704 |
story_request.style,
|
| 705 |
+
characters_present,
|
| 706 |
+
character_templates,
|
| 707 |
+
i + 1,
|
| 708 |
+
story_request.consistency_seed
|
| 709 |
)
|
| 710 |
|
| 711 |
# Save IMAGE to OCI bucket
|
|
|
|
| 732 |
"image_url": image_url,
|
| 733 |
"text_url": text_url,
|
| 734 |
"text_content": scene.text,
|
| 735 |
+
"visual_description": scene.visual,
|
| 736 |
+
"characters_present": characters_present,
|
| 737 |
+
"prompt_used": f"Dynamic consistent generation with {len(characters_present)} characters"
|
| 738 |
}
|
| 739 |
generated_pages.append(page_data)
|
| 740 |
|
| 741 |
+
print(f"β
Page {i+1} completed - Characters: {characters_present}")
|
| 742 |
|
| 743 |
except Exception as e:
|
| 744 |
error_msg = f"Failed to generate page {i+1}: {str(e)}"
|
|
|
|
| 757 |
"generation_time": round(generation_time, 2),
|
| 758 |
"folder_path": f"stories/{story_request.story_title}",
|
| 759 |
"oci_bucket_url": f"https://oci.com/stories/{story_request.story_title}",
|
| 760 |
+
"consistency_seed": story_request.consistency_seed,
|
| 761 |
+
"character_names": [char.name for char in story_request.characters],
|
| 762 |
"pages": generated_pages,
|
| 763 |
"file_structure": {
|
| 764 |
"images": [f"page_{i+1:03d}.png" for i in range(total_scenes)],
|
|
|
|
| 770 |
job_id,
|
| 771 |
JobStatus.COMPLETED,
|
| 772 |
100,
|
| 773 |
+
f"π Storybook completed! {len(generated_pages)} pages with {len(story_request.characters)} dynamic characters created in {generation_time:.2f}s.",
|
| 774 |
result
|
| 775 |
)
|
| 776 |
|
| 777 |
+
print(f"π DYNAMIC Storybook generation finished for job {job_id}")
|
| 778 |
print(f"π Saved to: stories/{story_request.story_title} in OCI bucket")
|
| 779 |
+
print(f"π₯ Dynamic character consistency maintained for {len(story_request.characters)} characters across {total_scenes} scenes")
|
| 780 |
|
| 781 |
except Exception as e:
|
| 782 |
+
error_msg = f"Dynamic story generation failed: {str(e)}"
|
| 783 |
print(f"β {error_msg}")
|
| 784 |
update_job_status(job_id, JobStatus.FAILED, 0, error_msg)
|
| 785 |
|
| 786 |
# FASTAPI ENDPOINTS (for n8n)
|
| 787 |
@app.post("/api/generate-storybook")
|
| 788 |
async def generate_storybook(request: dict, background_tasks: BackgroundTasks):
|
| 789 |
+
"""Main endpoint for n8n integration - generates complete storybook with dynamic character consistency"""
|
| 790 |
try:
|
| 791 |
print(f"π₯ Received n8n request for story: {request.get('story_title', 'Unknown')}")
|
| 792 |
|
| 793 |
+
# Add consistency seed if not provided
|
| 794 |
+
if 'consistency_seed' not in request or not request['consistency_seed']:
|
| 795 |
+
request['consistency_seed'] = random.randint(1000, 9999)
|
| 796 |
+
print(f"π± Generated consistency seed: {request['consistency_seed']}")
|
| 797 |
+
|
| 798 |
+
# Ensure characters have required fields
|
| 799 |
+
if 'characters' in request:
|
| 800 |
+
for char in request['characters']:
|
| 801 |
+
if 'visual_prompt' not in char or not char['visual_prompt']:
|
| 802 |
+
# Generate visual prompt from description if not provided
|
| 803 |
+
char['visual_prompt'] = ""
|
| 804 |
+
if 'key_features' not in char:
|
| 805 |
+
char['key_features'] = []
|
| 806 |
+
|
| 807 |
# Convert to Pydantic model
|
| 808 |
story_request = StorybookRequest(**request)
|
| 809 |
|
|
|
|
| 820 |
# Immediate response for n8n
|
| 821 |
response_data = {
|
| 822 |
"status": "success",
|
| 823 |
+
"message": "Storybook generation with dynamic character consistency started successfully",
|
| 824 |
"job_id": job_id,
|
| 825 |
"story_title": story_request.story_title,
|
| 826 |
"total_scenes": len(story_request.scenes),
|
| 827 |
+
"total_characters": len(story_request.characters),
|
| 828 |
+
"character_names": [char.name for char in story_request.characters],
|
| 829 |
+
"consistency_seed": story_request.consistency_seed,
|
| 830 |
"callback_url": story_request.callback_url,
|
| 831 |
+
"estimated_time_seconds": len(story_request.scenes) * 35,
|
| 832 |
"timestamp": datetime.now().isoformat()
|
| 833 |
}
|
| 834 |
|
| 835 |
+
print(f"β
Job {job_id} started with dynamic character consistency for: {story_request.story_title}")
|
| 836 |
|
| 837 |
return response_data
|
| 838 |
|
|
|
|
| 867 |
"timestamp": datetime.now().isoformat(),
|
| 868 |
"active_jobs": len(job_storage),
|
| 869 |
"models_loaded": list(model_cache.keys()),
|
| 870 |
+
"fallback_templates": list(FALLBACK_CHARACTER_TEMPLATES.keys()),
|
| 871 |
"oci_api_connected": OCI_API_BASE_URL
|
| 872 |
}
|
| 873 |
|
|
|
|
| 887 |
except Exception as e:
|
| 888 |
return {"status": "error", "message": str(e)}
|
| 889 |
|
| 890 |
+
# Enhanced Gradio interface with dynamic character testing
|
| 891 |
+
def create_gradio_interface():
|
| 892 |
+
"""Create Gradio interface with dynamic character consistency features"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 893 |
|
| 894 |
+
def generate_test_image_with_characters(prompt, model_choice, style_choice, character_names_text):
|
| 895 |
+
"""Generate a single image for testing character consistency"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 896 |
try:
|
| 897 |
if not prompt.strip():
|
| 898 |
return None, "β Please enter a prompt", None
|
| 899 |
|
| 900 |
+
# Parse character names from text input
|
| 901 |
+
character_names = [name.strip() for name in character_names_text.split(",") if name.strip()]
|
| 902 |
+
|
| 903 |
print(f"π¨ Generating test image with prompt: {prompt}")
|
| 904 |
+
print(f"π₯ Character names: {character_names}")
|
| 905 |
|
| 906 |
+
# Create dynamic character templates for testing
|
| 907 |
+
character_templates = {}
|
| 908 |
+
for char_name in character_names:
|
| 909 |
+
character_templates[char_name] = {
|
| 910 |
+
"visual_prompt": f"{char_name}, distinctive appearance, consistent features",
|
| 911 |
+
"key_features": ["consistent appearance", "maintain features"],
|
| 912 |
+
"consistency_keywords": f"consistent {char_name}"
|
| 913 |
+
}
|
| 914 |
+
|
| 915 |
+
# Enhance the prompt with character consistency
|
| 916 |
+
enhanced_prompt, negative_prompt = enhance_prompt_with_characters(
|
| 917 |
+
prompt, character_names, character_templates, style_choice, 1
|
| 918 |
+
)
|
| 919 |
|
| 920 |
# Generate the image
|
| 921 |
+
image = generate_consistent_image(
|
| 922 |
+
prompt,
|
| 923 |
model_choice,
|
| 924 |
style_choice,
|
| 925 |
+
character_names,
|
| 926 |
+
character_templates,
|
| 927 |
+
1
|
| 928 |
)
|
| 929 |
|
| 930 |
# Save to local storage
|
| 931 |
filepath, filename = save_image_to_local(image, prompt, style_choice)
|
| 932 |
|
| 933 |
+
character_info = f"π₯ Characters: {', '.join(character_names)}" if character_names else "π₯ No specific characters"
|
|
|
|
|
|
|
|
|
|
| 934 |
|
| 935 |
status_msg = f"""β
Success! Generated: {prompt}
|
| 936 |
|
| 937 |
+
{character_info}
|
| 938 |
|
| 939 |
+
π¨ Enhanced prompt: {enhanced_prompt[:200]}...
|
| 940 |
|
| 941 |
π **Local file:** {filename if filename else 'Not saved'}"""
|
| 942 |
|
|
|
|
| 947 |
print(error_msg)
|
| 948 |
return None, error_msg, None
|
| 949 |
|
| 950 |
+
with gr.Blocks(title="Premium Children's Book Illustrator with Dynamic Character Consistency", theme="soft") as demo:
|
| 951 |
+
gr.Markdown("# π¨ Premium Children's Book Illustrator")
|
| 952 |
+
gr.Markdown("Generate **studio-quality** storybook images with **dynamic character consistency**")
|
|
|
|
| 953 |
|
| 954 |
+
# Storage info display
|
| 955 |
+
storage_info = gr.Textbox(
|
| 956 |
+
label="π Local Storage Information",
|
| 957 |
+
interactive=False,
|
| 958 |
+
lines=2
|
| 959 |
+
)
|
| 960 |
|
| 961 |
+
def update_storage_info():
|
| 962 |
+
info = get_local_storage_info()
|
| 963 |
+
if "error" not in info:
|
| 964 |
+
return f"π Local Storage: {info['total_files']} images, {info['total_size_mb']} MB used"
|
| 965 |
+
return "π Local Storage: Unable to calculate"
|
| 966 |
+
|
| 967 |
+
with gr.Row():
|
| 968 |
+
with gr.Column(scale=1):
|
| 969 |
+
gr.Markdown("### π― Quality Settings")
|
| 970 |
+
|
| 971 |
+
model_dropdown = gr.Dropdown(
|
| 972 |
+
label="AI Model",
|
| 973 |
+
choices=list(MODEL_CHOICES.keys()),
|
| 974 |
+
value="dreamshaper-8"
|
| 975 |
+
)
|
| 976 |
+
|
| 977 |
+
style_dropdown = gr.Dropdown(
|
| 978 |
+
label="Art Style",
|
| 979 |
+
choices=["childrens_book", "realistic", "fantasy", "anime"],
|
| 980 |
+
value="childrens_book"
|
| 981 |
+
)
|
| 982 |
+
|
| 983 |
+
# Dynamic character input for testing
|
| 984 |
+
character_names_input = gr.Textbox(
|
| 985 |
+
label="Character Names (comma-separated)",
|
| 986 |
+
placeholder="Enter character names: Sparkle the Star Cat, Benny the Bunny, Tilly the Turtle",
|
| 987 |
+
info="Enter character names to test consistency features",
|
| 988 |
+
lines=2
|
| 989 |
+
)
|
| 990 |
+
|
| 991 |
+
prompt_input = gr.Textbox(
|
| 992 |
+
label="Scene Description",
|
| 993 |
+
placeholder="Describe your scene with character interactions...\nExample: Sparkle the Star Cat chasing butterflies while Benny the Bunny watches",
|
| 994 |
+
lines=3
|
| 995 |
+
)
|
| 996 |
+
|
| 997 |
+
generate_btn = gr.Button("β¨ Generate Premium Image", variant="primary")
|
| 998 |
+
|
| 999 |
+
# Current image management
|
| 1000 |
+
current_file_path = gr.State()
|
| 1001 |
+
delete_btn = gr.Button("ποΈ Delete This Image", variant="stop")
|
| 1002 |
+
delete_status = gr.Textbox(label="Delete Status", interactive=False, lines=2)
|
| 1003 |
+
|
| 1004 |
+
gr.Markdown("### π API Usage for n8n")
|
| 1005 |
+
gr.Markdown("""
|
| 1006 |
+
**For complete storybooks (OCI bucket):**
|
| 1007 |
+
- Endpoint: `POST /api/generate-storybook`
|
| 1008 |
+
- Input: `story_title`, `scenes[]`, `characters[]`
|
| 1009 |
+
- Output: Saves to OCI bucket with dynamic character consistency
|
| 1010 |
+
""")
|
| 1011 |
|
| 1012 |
+
with gr.Column(scale=2):
|
| 1013 |
+
image_output = gr.Image(label="Generated Image", height=500, show_download_button=True)
|
| 1014 |
+
status_output = gr.Textbox(label="Status", interactive=False, lines=4)
|
| 1015 |
+
|
| 1016 |
+
# Dynamic character guidance section
|
| 1017 |
+
with gr.Accordion("π₯ Dynamic Character Guidance", open=False):
|
| 1018 |
+
gr.Markdown("""
|
| 1019 |
+
### How to Use Dynamic Characters from n8n:
|
| 1020 |
|
| 1021 |
+
**n8n Payload Structure:**
|
| 1022 |
+
```json
|
| 1023 |
+
{
|
| 1024 |
+
"story_title": "Your Story Title",
|
| 1025 |
+
"characters": [
|
| 1026 |
+
{
|
| 1027 |
+
"name": "Character Name",
|
| 1028 |
+
"description": "Character description...",
|
| 1029 |
+
"visual_prompt": "Detailed visual description", // Optional
|
| 1030 |
+
"key_features": ["feature1", "feature2"] // Optional
|
| 1031 |
+
}
|
| 1032 |
+
],
|
| 1033 |
+
"scenes": [
|
| 1034 |
+
{
|
| 1035 |
+
"visual": "Scene description with characters...",
|
| 1036 |
+
"text": "Scene text...",
|
| 1037 |
+
"characters_present": ["Character Name"] // Optional
|
| 1038 |
+
}
|
| 1039 |
+
]
|
| 1040 |
+
}
|
| 1041 |
+
```
|
| 1042 |
+
|
| 1043 |
+
**Features:**
|
| 1044 |
+
- β
Dynamic character processing from n8n
|
| 1045 |
+
- β
Automatic visual prompt generation
|
| 1046 |
+
- β
Key feature extraction
|
| 1047 |
+
- β
Cross-scene consistency
|
| 1048 |
+
- β
Flexible character numbers and types
|
| 1049 |
+
""")
|
| 1050 |
+
|
| 1051 |
+
# Examples section
|
| 1052 |
+
with gr.Accordion("π‘ Prompt Examples & Tips", open=False):
|
| 1053 |
+
gr.Markdown("""
|
| 1054 |
+
## π¨ Professional Prompt Examples with Dynamic Characters:
|
| 1055 |
+
|
| 1056 |
+
**Best Results with Dynamic Characters:**
|
| 1057 |
+
- "Sparkle the Star Cat chasing butterflies in a sunny meadow"
|
| 1058 |
+
- "Benny the Bunny and Tilly the Turtle having a picnic"
|
| 1059 |
+
- "Multiple characters discovering a magical portal together"
|
| 1060 |
+
|
| 1061 |
+
## β‘ Dynamic Character Consistency Tips:
|
| 1062 |
+
1. **Always mention character names** in your prompts
|
| 1063 |
+
2. **n8n will send character details** automatically
|
| 1064 |
+
3. **The system processes any number** of characters dynamically
|
| 1065 |
+
4. **Consistency is maintained** across all scenes automatically
|
| 1066 |
+
""")
|
| 1067 |
+
|
| 1068 |
+
# Local file management section
|
| 1069 |
+
with gr.Accordion("π Manage Local Test Images", open=True):
|
| 1070 |
+
gr.Markdown("### Locally Saved Images")
|
| 1071 |
+
|
| 1072 |
+
with gr.Row():
|
| 1073 |
+
refresh_btn = gr.Button("π Refresh List")
|
| 1074 |
+
clear_all_btn = gr.Button("ποΈ Clear All Images", variant="stop")
|
| 1075 |
+
|
| 1076 |
+
file_gallery = gr.Gallery(
|
| 1077 |
+
label="Local Images",
|
| 1078 |
+
show_label=True,
|
| 1079 |
+
elem_id="gallery",
|
| 1080 |
+
columns=4,
|
| 1081 |
+
height="auto"
|
| 1082 |
+
)
|
| 1083 |
+
|
| 1084 |
+
clear_status = gr.Textbox(label="Clear Status", interactive=False)
|
| 1085 |
+
|
| 1086 |
+
# Debug section
|
| 1087 |
+
with gr.Accordion("π§ Advanced Settings", open=False):
|
| 1088 |
+
debug_btn = gr.Button("π Check System Status", variant="secondary")
|
| 1089 |
+
debug_output = gr.Textbox(label="System Info", interactive=False, lines=4)
|
| 1090 |
+
|
| 1091 |
+
def check_system_status():
|
| 1092 |
+
"""Check system status"""
|
| 1093 |
+
active_jobs = len(job_storage)
|
| 1094 |
+
return f"""**System Status:**
|
| 1095 |
- Model: {current_model_name}
|
| 1096 |
+
- Dynamic Character Processing: β
Enabled
|
| 1097 |
+
- Fallback Templates: {len(FALLBACK_CHARACTER_TEMPLATES)} available
|
| 1098 |
- OCI API: {OCI_API_BASE_URL}
|
| 1099 |
- Local Storage: {get_local_storage_info().get('total_files', 0)} images
|
| 1100 |
+
- Active Jobs: {active_jobs}
|
| 1101 |
+
- Ready for dynamic character consistency generation!"""
|
| 1102 |
+
|
| 1103 |
+
# Connect buttons to functions
|
| 1104 |
+
generate_btn.click(
|
| 1105 |
+
fn=generate_test_image_with_characters,
|
| 1106 |
+
inputs=[prompt_input, model_dropdown, style_dropdown, character_names_input],
|
| 1107 |
+
outputs=[image_output, status_output, current_file_path]
|
| 1108 |
+
).then(
|
| 1109 |
+
fn=refresh_local_images,
|
| 1110 |
+
outputs=file_gallery
|
| 1111 |
+
).then(
|
| 1112 |
+
fn=update_storage_info,
|
| 1113 |
+
outputs=storage_info
|
| 1114 |
+
)
|
| 1115 |
+
|
| 1116 |
+
delete_btn.click(
|
| 1117 |
+
fn=delete_current_image,
|
| 1118 |
+
inputs=current_file_path,
|
| 1119 |
+
outputs=[delete_status, image_output, status_output, file_gallery]
|
| 1120 |
+
).then(
|
| 1121 |
+
fn=update_storage_info,
|
| 1122 |
+
outputs=storage_info
|
| 1123 |
+
)
|
| 1124 |
+
|
| 1125 |
+
refresh_btn.click(
|
| 1126 |
+
fn=refresh_local_images,
|
| 1127 |
+
outputs=file_gallery
|
| 1128 |
+
).then(
|
| 1129 |
+
fn=update_storage_info,
|
| 1130 |
+
outputs=storage_info
|
| 1131 |
+
)
|
| 1132 |
+
|
| 1133 |
+
clear_all_btn.click(
|
| 1134 |
+
fn=clear_all_images,
|
| 1135 |
+
outputs=[clear_status, file_gallery]
|
| 1136 |
+
).then(
|
| 1137 |
+
fn=update_storage_info,
|
| 1138 |
+
outputs=storage_info
|
| 1139 |
+
)
|
| 1140 |
+
|
| 1141 |
+
debug_btn.click(
|
| 1142 |
+
fn=check_system_status,
|
| 1143 |
+
inputs=None,
|
| 1144 |
+
outputs=debug_output
|
| 1145 |
+
)
|
| 1146 |
+
|
| 1147 |
+
# Initialize on load
|
| 1148 |
+
demo.load(fn=refresh_local_images, outputs=file_gallery)
|
| 1149 |
+
demo.load(fn=update_storage_info, outputs=storage_info)
|
| 1150 |
|
| 1151 |
+
return demo
|
| 1152 |
+
|
| 1153 |
+
# Create enhanced Gradio app
|
| 1154 |
+
demo = create_gradio_interface()
|
| 1155 |
|
| 1156 |
# Enhanced root endpoint that explains the API structure
|
| 1157 |
@app.get("/")
|
| 1158 |
async def root():
|
| 1159 |
return {
|
| 1160 |
+
"message": "Storybook Generator API with Dynamic Character Consistency is running!",
|
| 1161 |
"api_endpoints": {
|
| 1162 |
"health_check": "GET /api/health",
|
| 1163 |
"generate_storybook": "POST /api/generate-storybook",
|
| 1164 |
"check_job_status": "GET /api/job-status/{job_id}",
|
| 1165 |
"local_images": "GET /api/local-images"
|
| 1166 |
},
|
| 1167 |
+
"features": {
|
| 1168 |
+
"dynamic_characters": "β
Enabled",
|
| 1169 |
+
"character_consistency": "β
Enabled",
|
| 1170 |
+
"flexible_storytelling": "β
Enabled",
|
| 1171 |
+
"n8n_integration": "β
Enabled"
|
| 1172 |
+
},
|
| 1173 |
"web_interface": "GET /ui",
|
| 1174 |
+
"note": "Use API endpoints for programmatic access with dynamic characters from n8n"
|
| 1175 |
}
|
| 1176 |
|
| 1177 |
# Add a simple test endpoint
|
|
|
|
| 1179 |
async def test_endpoint():
|
| 1180 |
return {
|
| 1181 |
"status": "success",
|
| 1182 |
+
"message": "API with dynamic character consistency is working correctly",
|
| 1183 |
+
"dynamic_processing": "β
Enabled",
|
| 1184 |
+
"fallback_templates": len(FALLBACK_CHARACTER_TEMPLATES),
|
| 1185 |
"timestamp": datetime.now().isoformat()
|
| 1186 |
}
|
| 1187 |
|
| 1188 |
+
# For Hugging Face Spaces deployment
|
| 1189 |
def get_app():
|
| 1190 |
return app
|
| 1191 |
|
|
|
|
| 1200 |
print("π Running on Hugging Face Spaces - Integrated Mode")
|
| 1201 |
print("π API endpoints available at: /api/*")
|
| 1202 |
print("π¨ Web interface available at: /ui")
|
| 1203 |
+
print("π₯ Dynamic character consistency features enabled")
|
| 1204 |
print("π Both API and UI running on same port")
|
| 1205 |
|
| 1206 |
+
# Mount Gradio without reassigning app
|
| 1207 |
gr.mount_gradio_app(app, demo, path="/ui")
|
| 1208 |
|
| 1209 |
# Run the combined app
|
|
|
|
| 1218 |
print("π Running locally - Separate API and UI servers")
|
| 1219 |
print("π API endpoints: http://localhost:8000/api/*")
|
| 1220 |
print("π¨ Web interface: http://localhost:7860/ui")
|
| 1221 |
+
print("π₯ Dynamic character consistency features enabled")
|
| 1222 |
|
| 1223 |
def run_fastapi():
|
| 1224 |
"""Run FastAPI on port 8000 for API calls"""
|
|
|
|
| 1255 |
while True:
|
| 1256 |
time.sleep(1)
|
| 1257 |
except KeyboardInterrupt:
|
| 1258 |
+
print("π Shutting down servers...")
|
|
|