yukee1992 commited on
Commit
a5c960b
·
verified ·
1 Parent(s): f880e38

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +281 -723
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
  import torch
3
- from diffusers import StableDiffusionPipeline, EulerAncestralDiscreteScheduler
4
- from PIL import Image
5
  import io
6
  import requests
7
  import os
@@ -9,7 +9,7 @@ from datetime import datetime
9
  import re
10
  import time
11
  import json
12
- from typing import List, Optional, Dict
13
  from fastapi import FastAPI, HTTPException, BackgroundTasks
14
  from pydantic import BaseModel
15
  import gc
@@ -19,6 +19,7 @@ import uuid
19
  import hashlib
20
  from enum import Enum
21
  import random
 
22
 
23
  # External OCI API URL - YOUR BUCKET SAVING API
24
  OCI_API_BASE_URL = "https://yukee1992-oci-story-book.hf.space"
@@ -29,7 +30,7 @@ os.makedirs(PERSISTENT_IMAGE_DIR, exist_ok=True)
29
  print(f"📁 Created local image directory: {PERSISTENT_IMAGE_DIR}")
30
 
31
  # Initialize FastAPI app
32
- app = FastAPI(title="Storybook Generator API")
33
 
34
  # Add CORS middleware
35
  from fastapi.middleware.cors import CORSMiddleware
@@ -44,6 +45,9 @@ app.add_middleware(
44
  # Job Status Enum
45
  class JobStatus(str, Enum):
46
  PENDING = "pending"
 
 
 
47
  PROCESSING = "processing"
48
  COMPLETED = "completed"
49
  FAILED = "failed"
@@ -52,23 +56,26 @@ class JobStatus(str, Enum):
52
  class StoryScene(BaseModel):
53
  visual: str
54
  text: str
55
- characters_present: List[str] = [] # Which characters are in this scene
56
- scene_type: str = "general" # "action", "dialogue", "establishing", etc.
 
57
 
58
  class CharacterDescription(BaseModel):
59
  name: str
60
  description: str
61
- visual_prompt: str = "" # Detailed visual description for AI
62
- key_features: List[str] = [] # Critical features that must stay consistent
 
63
 
64
  class StorybookRequest(BaseModel):
65
  story_title: str
66
  scenes: List[StoryScene]
67
  characters: List[CharacterDescription] = []
68
- model_choice: str = "dreamshaper-8"
69
  style: str = "childrens_book"
70
  callback_url: Optional[str] = None
71
- consistency_seed: Optional[int] = None # For consistent character generation
 
72
 
73
  class JobStatusResponse(BaseModel):
74
  job_id: str
@@ -79,16 +86,14 @@ class JobStatusResponse(BaseModel):
79
  created_at: float
80
  updated_at: float
81
 
82
- # HIGH-QUALITY MODEL SELECTION
83
  MODEL_CHOICES = {
84
- "dreamshaper-8": "lykon/dreamshaper-8",
85
- "realistic-vision": "SG161222/Realistic_Vision_V5.1",
86
- "anything-v5": "andite/anything-v5.0",
87
- "openjourney": "prompthero/openjourney",
88
- "sd-2.1": "stabilityai/stable-diffusion-2-1",
89
  }
90
 
91
- # FALLBACK CHARACTER TEMPLATES (used only if n8n doesn't provide character details)
92
  FALLBACK_CHARACTER_TEMPLATES = {
93
  "Sparkle the Star Cat": {
94
  "visual_prompt": "small white kitten with distinctive silver star-shaped spots on fur, big golden eyes, shiny blue collar with star charm, playful expression",
@@ -97,22 +102,20 @@ FALLBACK_CHARACTER_TEMPLATES = {
97
  "Benny the Bunny": {
98
  "visual_prompt": "fluffy brown rabbit with long ears, bright green eyes, red scarf around neck, cheerful expression",
99
  "key_features": ["red scarf", "long ears", "green eyes", "brown fur"],
100
- },
101
- "Tilly the Turtle": {
102
- "visual_prompt": "gentle green turtle with shiny turquoise shell decorated with swirl patterns, wise expression, slow-moving",
103
- "key_features": ["turquoise shell", "swirl patterns", "green skin", "wise expression"],
104
  }
105
  }
106
 
107
  # GLOBAL STORAGE
108
  job_storage = {}
109
  model_cache = {}
 
110
  current_model_name = None
111
  current_pipe = None
112
  model_lock = threading.Lock()
113
 
114
- def load_model(model_name="dreamshaper-8"):
115
- """Thread-safe model loading with HIGH-QUALITY settings"""
 
116
  global model_cache, current_model_name, current_pipe
117
 
118
  with model_lock:
@@ -121,9 +124,9 @@ def load_model(model_name="dreamshaper-8"):
121
  current_model_name = model_name
122
  return current_pipe
123
 
124
- print(f"🔄 Loading HIGH-QUALITY model: {model_name}")
125
  try:
126
- model_id = MODEL_CHOICES.get(model_name, "lykon/dreamshaper-8")
127
 
128
  pipe = StableDiffusionPipeline.from_pretrained(
129
  model_id,
@@ -139,24 +142,57 @@ def load_model(model_name="dreamshaper-8"):
139
  current_pipe = pipe
140
  current_model_name = model_name
141
 
142
- print(f"✅ HIGH-QUALITY Model loaded: {model_name}")
143
  return pipe
144
 
145
  except Exception as e:
146
  print(f"❌ Model loading failed: {e}")
147
- return StableDiffusionPipeline.from_pretrained(
148
- "runwayml/stable-diffusion-v1-5",
149
- torch_dtype=torch.float32,
150
- safety_checker=None,
151
- requires_safety_checker=False
152
- ).to("cpu")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
 
154
- # Initialize default model
155
- print("🚀 Initializing Storybook Generator API...")
156
- load_model("dreamshaper-8")
157
- print("✅ Model loaded and ready!")
158
 
159
- # DYNAMIC CHARACTER PROCESSING FUNCTIONS
160
  def process_character_descriptions(characters_from_request):
161
  """Process character descriptions from n8n and create consistency templates"""
162
  character_templates = {}
@@ -164,14 +200,11 @@ def process_character_descriptions(characters_from_request):
164
  for character in characters_from_request:
165
  char_name = character.name
166
 
167
- # Use provided visual_prompt or generate from description
168
  if character.visual_prompt:
169
  visual_prompt = character.visual_prompt
170
  else:
171
- # Generate visual prompt from description
172
  visual_prompt = generate_visual_prompt_from_description(character.description, char_name)
173
 
174
- # Use provided key_features or extract from description
175
  if character.key_features:
176
  key_features = character.key_features
177
  else:
@@ -181,7 +214,7 @@ def process_character_descriptions(characters_from_request):
181
  "visual_prompt": visual_prompt,
182
  "key_features": key_features,
183
  "consistency_keywords": f"consistent character, same {char_name.split()[-1].lower()}, maintaining appearance",
184
- "source": "n8n_request" # Track where this template came from
185
  }
186
 
187
  print(f"✅ Processed {len(character_templates)} characters from n8n request")
@@ -189,10 +222,8 @@ def process_character_descriptions(characters_from_request):
189
 
190
  def generate_visual_prompt_from_description(description, character_name):
191
  """Generate a visual prompt from character description"""
192
- # Basic extraction of visual elements
193
  description_lower = description.lower()
194
 
195
- # Extract species/type
196
  species_keywords = ["kitten", "cat", "rabbit", "bunny", "turtle", "dog", "bird", "dragon", "bear", "fox"]
197
  species = "character"
198
  for keyword in species_keywords:
@@ -200,21 +231,18 @@ def generate_visual_prompt_from_description(description, character_name):
200
  species = keyword
201
  break
202
 
203
- # Extract colors
204
  color_keywords = ["white", "black", "brown", "red", "blue", "green", "yellow", "golden", "silver", "orange"]
205
  colors = []
206
  for color in color_keywords:
207
  if color in description_lower:
208
  colors.append(color)
209
 
210
- # Extract distinctive features
211
  feature_keywords = ["spots", "stripes", "collar", "scarf", "shell", "wings", "horn", "tail", "ears", "eyes"]
212
  features = []
213
  for feature in feature_keywords:
214
  if feature in description_lower:
215
  features.append(feature)
216
 
217
- # Build visual prompt
218
  visual_prompt_parts = []
219
  if colors:
220
  visual_prompt_parts.append(f"{' '.join(colors)} {species}")
@@ -226,7 +254,6 @@ def generate_visual_prompt_from_description(description, character_name):
226
  if features:
227
  visual_prompt_parts.append(f"with {', '.join(features)}")
228
 
229
- # Add emotional/character traits
230
  trait_keywords = ["playful", "brave", "curious", "kind", "cheerful", "wise", "calm", "friendly"]
231
  traits = [trait for trait in trait_keywords if trait in description_lower]
232
  if traits:
@@ -242,7 +269,6 @@ def extract_key_features_from_description(description):
242
  description_lower = description.lower()
243
  key_features = []
244
 
245
- # Look for distinctive physical features
246
  feature_patterns = [
247
  r"(\w+)\s+(?:spots|stripes|marks)",
248
  r"(\w+)\s+(?:collar|scarf|ribbon)",
@@ -254,10 +280,8 @@ def extract_key_features_from_description(description):
254
  matches = re.findall(pattern, description_lower)
255
  key_features.extend(matches)
256
 
257
- # Remove duplicates and limit to 3 most important features
258
  key_features = list(set(key_features))[:3]
259
 
260
- # If no features found, use some defaults based on character type
261
  if not key_features:
262
  if any(word in description_lower for word in ["kitten", "cat"]):
263
  key_features = ["whiskers", "tail", "paws"]
@@ -271,11 +295,25 @@ def extract_key_features_from_description(description):
271
  print(f"🔧 Extracted key features: {key_features}")
272
  return key_features
273
 
274
- # ENHANCED PROMPT ENGINEERING WITH DYNAMIC CHARACTER CONSISTENCY
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275
  def enhance_prompt_with_characters(scene_visual, characters_present, character_templates, style="childrens_book", scene_number=1):
276
  """Create prompts that maintain character consistency using dynamic templates"""
277
 
278
- # Get character descriptions for this scene
279
  character_descriptions = []
280
  consistency_keywords = []
281
 
@@ -285,11 +323,9 @@ def enhance_prompt_with_characters(scene_visual, characters_present, character_t
285
  character_descriptions.append(f"{char_name}: {char_data['visual_prompt']}")
286
  consistency_keywords.append(char_data['consistency_keywords'])
287
  else:
288
- # Fallback if character not in templates
289
  character_descriptions.append(f"{char_name}: distinctive character")
290
  consistency_keywords.append(f"consistent {char_name}")
291
 
292
- # Style templates
293
  style_templates = {
294
  "childrens_book": "children's book illustration, watercolor style, soft colors, whimsical, magical, storybook art, professional illustration",
295
  "realistic": "photorealistic, detailed, natural lighting, professional photography",
@@ -299,7 +335,6 @@ def enhance_prompt_with_characters(scene_visual, characters_present, character_t
299
 
300
  style_prompt = style_templates.get(style, style_templates["childrens_book"])
301
 
302
- # Build the enhanced prompt
303
  character_context = ". ".join(character_descriptions)
304
  consistency_context = ", ".join(consistency_keywords)
305
 
@@ -310,7 +345,6 @@ def enhance_prompt_with_characters(scene_visual, characters_present, character_t
310
  f"Scene {scene_number} of storybook series. "
311
  )
312
 
313
- # Quality boosters for consistency
314
  quality_boosters = [
315
  "consistent character design", "maintain identical features",
316
  "same characters throughout", "continuous visual narrative",
@@ -320,7 +354,6 @@ def enhance_prompt_with_characters(scene_visual, characters_present, character_t
320
 
321
  enhanced_prompt += ", ".join(quality_boosters)
322
 
323
- # Enhanced negative prompt to avoid inconsistencies
324
  negative_prompt = (
325
  "inconsistent characters, different appearances, changing features, "
326
  "multiple versions of same character, inconsistent art style, "
@@ -330,60 +363,30 @@ def enhance_prompt_with_characters(scene_visual, characters_present, character_t
330
 
331
  return enhanced_prompt, negative_prompt
332
 
333
- def extract_characters_from_visual(visual_description, available_characters):
334
- """Extract character names from visual description using available characters"""
335
- characters = []
336
- visual_lower = visual_description.lower()
337
-
338
- # Check for each available character name in the visual description
339
- for char_name in available_characters:
340
- # Use the first word or main identifier from character name
341
- char_identifier = char_name.split()[0].lower()
342
- if char_identifier in visual_lower or char_name.lower() in visual_lower:
343
- characters.append(char_name)
344
-
345
- return characters
346
-
347
- def generate_character_reference_sheet(characters):
348
- """Generate reference descriptions for consistent character generation"""
349
- reference_sheet = {}
350
-
351
- for character in characters:
352
- char_name = character.name
353
- reference_sheet[char_name] = {
354
- "name": char_name,
355
- "base_prompt": character.visual_prompt if character.visual_prompt else generate_visual_prompt_from_description(character.description, char_name),
356
- "key_features": character.key_features if character.key_features else extract_key_features_from_description(character.description),
357
- "must_include": character.key_features[:2] if character.key_features else []
358
- }
359
-
360
- return reference_sheet
361
-
362
  def generate_consistent_image(prompt, model_choice, style, characters_present, character_templates, scene_number, consistency_seed=None):
363
  """Generate image with character consistency measures using dynamic templates"""
364
 
365
- # Enhance prompt with character consistency
366
  enhanced_prompt, negative_prompt = enhance_prompt_with_characters(
367
  prompt, characters_present, character_templates, style, scene_number
368
  )
369
 
370
- # Use a consistent seed for character generation
371
  if consistency_seed:
372
  base_seed = consistency_seed
373
  else:
374
  base_seed = hash("".join(characters_present)) % 1000000 if characters_present else random.randint(1000, 9999)
375
 
376
- # Adjust seed slightly per scene but maintain character consistency
377
  scene_seed = base_seed + scene_number
378
 
379
  try:
380
  pipe = load_model(model_choice)
 
 
381
 
382
  image = pipe(
383
  prompt=enhanced_prompt,
384
  negative_prompt=negative_prompt,
385
- num_inference_steps=35, # Increased for better quality
386
- guidance_scale=7.5, # Slightly lower for more consistency
387
  width=768,
388
  height=768,
389
  generator=torch.Generator(device="cpu").manual_seed(scene_seed)
@@ -399,118 +402,123 @@ def generate_consistent_image(prompt, model_choice, style, characters_present, c
399
  print(f"❌ Consistent generation failed: {str(e)}")
400
  raise
401
 
402
- # Backward compatibility functions
403
- def enhance_prompt(prompt, style="childrens_book"):
404
- """Legacy function for backward compatibility"""
405
- return enhance_prompt_with_characters(prompt, [], {}, style, 1)
406
 
407
- def generate_high_quality_image(prompt, model_choice="dreamshaper-8", style="childrens_book", negative_prompt=""):
408
- """Legacy function for backward compatibility"""
409
- return generate_consistent_image(prompt, model_choice, style, [], {}, 1)
410
-
411
- # LOCAL FILE MANAGEMENT FUNCTIONS
412
- def save_image_to_local(image, prompt, style="test"):
413
- """Save image to local persistent storage"""
414
- try:
415
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
416
- safe_prompt = "".join(c for c in prompt[:50] if c.isalnum() or c in (' ', '-', '_')).rstrip()
417
- filename = f"image_{safe_prompt}_{timestamp}.png"
418
-
419
- # Create style subfolder
420
- style_dir = os.path.join(PERSISTENT_IMAGE_DIR, style)
421
- os.makedirs(style_dir, exist_ok=True)
422
- filepath = os.path.join(style_dir, filename)
423
-
424
- # Save the image
425
- image.save(filepath)
426
- print(f"💾 Image saved locally: {filepath}")
427
-
428
- return filepath, filename
429
-
430
- except Exception as e:
431
- print(f"❌ Failed to save locally: {e}")
432
- return None, None
 
 
 
 
433
 
434
- def delete_local_image(filepath):
435
- """Delete an image from local storage"""
436
- try:
437
- if os.path.exists(filepath):
438
- os.remove(filepath)
439
- print(f"🗑️ Deleted local image: {filepath}")
440
- return True, f"✅ Deleted: {os.path.basename(filepath)}"
441
- else:
442
- return False, f"❌ File not found: {filepath}"
443
- except Exception as e:
444
- return False, f"❌ Error deleting: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
445
 
446
- def get_local_storage_info():
447
- """Get information about local storage usage"""
448
- try:
449
- total_size = 0
450
- file_count = 0
451
- images_list = []
452
-
453
- for root, dirs, files in os.walk(PERSISTENT_IMAGE_DIR):
454
- for file in files:
455
- if file.endswith(('.png', '.jpg', '.jpeg')):
456
- filepath = os.path.join(root, file)
457
- if os.path.exists(filepath):
458
- file_size = os.path.getsize(filepath)
459
- total_size += file_size
460
- file_count += 1
461
- images_list.append({
462
- 'path': filepath,
463
- 'filename': file,
464
- 'size_kb': round(file_size / 1024, 1),
465
- 'created': os.path.getctime(filepath)
466
- })
 
 
 
 
 
467
 
468
- return {
469
- "total_files": file_count,
470
- "total_size_mb": round(total_size / (1024 * 1024), 2),
471
- "images": sorted(images_list, key=lambda x: x['created'], reverse=True)
472
- }
473
- except Exception as e:
474
- return {"error": str(e)}
475
 
476
- def refresh_local_images():
477
- """Get list of all locally saved images"""
478
- try:
479
- image_files = []
480
- for root, dirs, files in os.walk(PERSISTENT_IMAGE_DIR):
481
- for file in files:
482
- if file.endswith(('.png', '.jpg', '.jpeg')):
483
- filepath = os.path.join(root, file)
484
- if os.path.exists(filepath):
485
- image_files.append(filepath)
486
- return image_files
487
- except Exception as e:
488
- print(f"Error refreshing local images: {e}")
489
- return []
490
 
491
- # OCI BUCKET FUNCTIONS
492
- def save_to_oci_bucket(image, text_content, story_title, page_number, file_type="image"):
493
- """Save both images and text to OCI bucket via your OCI API"""
494
  try:
495
- if file_type == "image":
496
- # Convert image to bytes
497
- img_bytes = io.BytesIO()
498
- image.save(img_bytes, format='PNG')
499
- file_data = img_bytes.getvalue()
500
- filename = f"page_{page_number:03d}.png"
501
- mime_type = "image/png"
502
- else: # text
503
- file_data = text_content.encode('utf-8')
504
- filename = f"page_{page_number:03d}.txt"
505
- mime_type = "text/plain"
506
-
507
- # Use your OCI API to save the file
508
  api_url = f"{OCI_API_BASE_URL}/api/upload"
509
 
 
 
 
 
 
 
510
  files = {'file': (filename, file_data, mime_type)}
511
  data = {
512
  'project_id': 'storybook-library',
513
- 'subfolder': f'stories/{story_title}'
514
  }
515
 
516
  response = requests.post(api_url, files=files, data=data, timeout=30)
@@ -529,13 +537,14 @@ def save_to_oci_bucket(image, text_content, story_title, page_number, file_type=
529
  except Exception as e:
530
  raise Exception(f"OCI upload failed: {str(e)}")
531
 
532
- # JOB MANAGEMENT FUNCTIONS
 
 
 
533
  def create_job(story_request: StorybookRequest) -> str:
534
  job_id = str(uuid.uuid4())
535
 
536
- # Process character descriptions from n8n
537
  character_templates = process_character_descriptions(story_request.characters)
538
- character_references = generate_character_reference_sheet(story_request.characters)
539
 
540
  job_storage[job_id] = {
541
  "status": JobStatus.PENDING,
@@ -547,11 +556,10 @@ def create_job(story_request: StorybookRequest) -> str:
547
  "updated_at": time.time(),
548
  "pages": [],
549
  "character_templates": character_templates,
550
- "character_references": character_references
551
  }
552
 
553
  print(f"📝 Created job {job_id} for story: {story_request.story_title}")
554
- print(f"👥 Processed {len(character_templates)} characters from n8n request")
555
 
556
  return job_id
557
 
@@ -569,7 +577,6 @@ def update_job_status(job_id: str, status: JobStatus, progress: int, message: st
569
  if result:
570
  job_storage[job_id]["result"] = result
571
 
572
- # Send webhook notification if callback URL exists
573
  job_data = job_storage[job_id]
574
  request_data = job_data["request"]
575
 
@@ -577,7 +584,6 @@ def update_job_status(job_id: str, status: JobStatus, progress: int, message: st
577
  try:
578
  callback_url = request_data["callback_url"]
579
 
580
- # Enhanced callback data
581
  callback_data = {
582
  "job_id": job_id,
583
  "status": status.value,
@@ -586,46 +592,12 @@ def update_job_status(job_id: str, status: JobStatus, progress: int, message: st
586
  "story_title": request_data["story_title"],
587
  "total_scenes": len(request_data["scenes"]),
588
  "total_characters": len(request_data["characters"]),
 
589
  "timestamp": time.time(),
590
- "source": "huggingface-storybook-generator",
591
- "estimated_time_remaining": calculate_remaining_time(job_id, progress)
592
  }
593
 
594
- # Add result data for completed jobs
595
- if status == JobStatus.COMPLETED and result:
596
- callback_data["result"] = {
597
- "total_pages": result.get("total_pages", 0),
598
- "generation_time": result.get("generation_time", 0),
599
- "oci_bucket_url": result.get("oci_bucket_url", ""),
600
- "pages_generated": result.get("generated_pages", 0),
601
- "characters_used": result.get("characters_used", 0)
602
- }
603
-
604
- # Add current scene info for processing jobs
605
- if status == JobStatus.PROCESSING:
606
- current_scene = progress // (100 // len(request_data["scenes"])) + 1
607
- callback_data["current_scene"] = current_scene
608
- callback_data["total_scenes"] = len(request_data["scenes"])
609
- if current_scene <= len(request_data["scenes"]):
610
- scene_visual = request_data["scenes"][current_scene-1]["visual"]
611
- callback_data["scene_description"] = scene_visual[:100] + "..."
612
-
613
- # Add characters in current scene
614
- if "characters_present" in request_data["scenes"][current_scene-1]:
615
- callback_data["characters_in_scene"] = request_data["scenes"][current_scene-1]["characters_present"]
616
-
617
- headers = {
618
- 'Content-Type': 'application/json',
619
- 'User-Agent': 'Storybook-Generator/1.0'
620
- }
621
-
622
- response = requests.post(
623
- callback_url,
624
- json=callback_data,
625
- headers=headers,
626
- timeout=30
627
- )
628
-
629
  print(f"📢 Callback sent: Status {response.status_code}")
630
 
631
  except Exception as e:
@@ -633,43 +605,21 @@ def update_job_status(job_id: str, status: JobStatus, progress: int, message: st
633
 
634
  return True
635
 
636
- def calculate_remaining_time(job_id, progress):
637
- """Calculate estimated time remaining"""
638
- if progress == 0:
639
- return "Calculating..."
640
-
641
- job_data = job_storage.get(job_id)
642
- if not job_data:
643
- return "Unknown"
644
-
645
- time_elapsed = time.time() - job_data["created_at"]
646
- if progress > 0:
647
- total_estimated = (time_elapsed / progress) * 100
648
- remaining = total_estimated - time_elapsed
649
- return f"{int(remaining // 60)}m {int(remaining % 60)}s"
650
-
651
- return "Unknown"
652
 
653
- # ENHANCED BACKGROUND TASK WITH DYNAMIC CHARACTER CONSISTENCY
654
- def generate_storybook_background(job_id: str):
655
- """Background task to generate complete storybook with dynamic character consistency"""
656
  try:
657
  job_data = job_storage[job_id]
658
  story_request_data = job_data["request"]
659
  story_request = StorybookRequest(**story_request_data)
660
  character_templates = job_data["character_templates"]
661
 
662
- print(f"🎬 Starting DYNAMIC storybook generation for job {job_id}")
663
- print(f"📖 Story: {story_request.story_title}")
664
- print(f"👥 Characters: {len(story_request.characters)} (from n8n)")
665
- print(f"📄 Scenes: {len(story_request.scenes)}")
666
- print(f"🌱 Consistency seed: {story_request.consistency_seed}")
667
 
668
- # Log character details
669
- for char in story_request.characters:
670
- print(f" - {char.name}: {char.description[:50]}...")
671
-
672
- update_job_status(job_id, JobStatus.PROCESSING, 5, "Starting storybook generation with dynamic character consistency...")
673
 
674
  total_scenes = len(story_request.scenes)
675
  generated_pages = []
@@ -678,12 +628,10 @@ def generate_storybook_background(job_id: str):
678
  for i, scene in enumerate(story_request.scenes):
679
  progress = 5 + int((i / total_scenes) * 90)
680
 
681
- # Extract characters for this scene
682
  characters_present = []
683
  if hasattr(scene, 'characters_present') and scene.characters_present:
684
  characters_present = scene.characters_present
685
  else:
686
- # Fallback: extract from visual description using available characters
687
  available_chars = [char.name for char in story_request.characters]
688
  characters_present = extract_characters_from_visual(scene.visual, available_chars)
689
 
@@ -691,13 +639,12 @@ def generate_storybook_background(job_id: str):
691
  job_id,
692
  JobStatus.PROCESSING,
693
  progress,
694
- f"Generating page {i+1}/{total_scenes} with {len(characters_present)} characters: {scene.visual[:50]}..."
695
  )
696
 
697
  try:
698
- print(f"🖼️ Generating page {i+1} with characters: {characters_present}")
699
 
700
- # Generate consistent image using dynamic character templates
701
  image = generate_consistent_image(
702
  scene.visual,
703
  story_request.model_choice,
@@ -708,37 +655,31 @@ def generate_storybook_background(job_id: str):
708
  story_request.consistency_seed
709
  )
710
 
711
- # Save IMAGE to OCI bucket
 
712
  image_url = save_to_oci_bucket(
713
- image,
714
- "", # No text for image
715
- story_request.story_title,
716
- i + 1,
717
  "image"
718
  )
719
 
720
- # Save TEXT to OCI bucket
721
  text_url = save_to_oci_bucket(
722
- None, # No image for text
723
- scene.text,
724
- story_request.story_title,
725
- i + 1,
726
  "text"
727
  )
728
 
729
- # Store page data
730
  page_data = {
731
  "page_number": i + 1,
732
  "image_url": image_url,
733
  "text_url": text_url,
734
  "text_content": scene.text,
735
- "visual_description": scene.visual,
736
- "characters_present": characters_present,
737
- "prompt_used": f"Dynamic consistent generation with {len(characters_present)} characters"
738
  }
739
  generated_pages.append(page_data)
740
 
741
- print(f"✅ Page {i+1} completed - Characters: {characters_present}")
742
 
743
  except Exception as e:
744
  error_msg = f"Failed to generate page {i+1}: {str(e)}"
@@ -746,93 +687,79 @@ def generate_storybook_background(job_id: str):
746
  update_job_status(job_id, JobStatus.FAILED, 0, error_msg)
747
  return
748
 
749
- # Complete the job
750
  generation_time = time.time() - start_time
751
 
752
  result = {
753
  "story_title": story_request.story_title,
754
  "total_pages": total_scenes,
755
- "characters_used": len(story_request.characters),
756
  "generated_pages": len(generated_pages),
757
  "generation_time": round(generation_time, 2),
758
- "folder_path": f"stories/{story_request.story_title}",
759
- "oci_bucket_url": f"https://oci.com/stories/{story_request.story_title}",
760
- "consistency_seed": story_request.consistency_seed,
761
- "character_names": [char.name for char in story_request.characters],
762
- "pages": generated_pages,
763
- "file_structure": {
764
- "images": [f"page_{i+1:03d}.png" for i in range(total_scenes)],
765
- "texts": [f"page_{i+1:03d}.txt" for i in range(total_scenes)]
766
- }
767
  }
768
 
769
  update_job_status(
770
  job_id,
771
  JobStatus.COMPLETED,
772
  100,
773
- f"🎉 Storybook completed! {len(generated_pages)} pages with {len(story_request.characters)} dynamic characters created in {generation_time:.2f}s.",
774
  result
775
  )
776
 
777
- print(f"🎉 DYNAMIC Storybook generation finished for job {job_id}")
778
- print(f"📁 Saved to: stories/{story_request.story_title} in OCI bucket")
779
- print(f"👥 Dynamic character consistency maintained for {len(story_request.characters)} characters across {total_scenes} scenes")
780
 
781
  except Exception as e:
782
- error_msg = f"Dynamic story generation failed: {str(e)}"
783
  print(f"❌ {error_msg}")
784
  update_job_status(job_id, JobStatus.FAILED, 0, error_msg)
785
 
786
- # FASTAPI ENDPOINTS (for n8n)
 
 
 
 
 
 
 
 
 
 
 
 
 
787
  @app.post("/api/generate-storybook")
788
- async def generate_storybook(request: dict, background_tasks: BackgroundTasks):
789
- """Main endpoint for n8n integration - generates complete storybook with dynamic character consistency"""
790
  try:
791
- print(f"📥 Received n8n request for story: {request.get('story_title', 'Unknown')}")
792
 
793
- # Add consistency seed if not provided
794
  if 'consistency_seed' not in request or not request['consistency_seed']:
795
  request['consistency_seed'] = random.randint(1000, 9999)
796
- print(f"🌱 Generated consistency seed: {request['consistency_seed']}")
797
 
798
- # Ensure characters have required fields
799
- if 'characters' in request:
800
- for char in request['characters']:
801
- if 'visual_prompt' not in char or not char['visual_prompt']:
802
- # Generate visual prompt from description if not provided
803
- char['visual_prompt'] = ""
804
- if 'key_features' not in char:
805
- char['key_features'] = []
806
 
807
- # Convert to Pydantic model
808
  story_request = StorybookRequest(**request)
809
 
810
- # Validate required fields
811
  if not story_request.story_title or not story_request.scenes:
812
  raise HTTPException(status_code=400, detail="story_title and scenes are required")
813
 
814
- # Create job immediately
815
  job_id = create_job(story_request)
 
816
 
817
- # Start background processing (runs independently of HF idle)
818
- background_tasks.add_task(generate_storybook_background, job_id)
819
-
820
- # Immediate response for n8n
821
  response_data = {
822
  "status": "success",
823
- "message": "Storybook generation with dynamic character consistency started successfully",
824
  "job_id": job_id,
825
  "story_title": story_request.story_title,
826
  "total_scenes": len(story_request.scenes),
827
- "total_characters": len(story_request.characters),
828
- "character_names": [char.name for char in story_request.characters],
829
- "consistency_seed": story_request.consistency_seed,
830
- "callback_url": story_request.callback_url,
831
- "estimated_time_seconds": len(story_request.scenes) * 35,
832
  "timestamp": datetime.now().isoformat()
833
  }
834
 
835
- print(f"✅ Job {job_id} started with dynamic character consistency for: {story_request.story_title}")
836
 
837
  return response_data
838
 
@@ -843,7 +770,6 @@ async def generate_storybook(request: dict, background_tasks: BackgroundTasks):
843
 
844
  @app.get("/api/job-status/{job_id}")
845
  async def get_job_status_endpoint(job_id: str):
846
- """Check job status"""
847
  job_data = job_storage.get(job_id)
848
  if not job_data:
849
  raise HTTPException(status_code=404, detail="Job not found")
@@ -860,364 +786,50 @@ async def get_job_status_endpoint(job_id: str):
860
 
861
  @app.get("/api/health")
862
  async def api_health():
863
- """Health check endpoint for n8n"""
864
  return {
865
  "status": "healthy",
866
  "service": "storybook-generator",
867
  "timestamp": datetime.now().isoformat(),
868
  "active_jobs": len(job_storage),
869
  "models_loaded": list(model_cache.keys()),
870
- "fallback_templates": list(FALLBACK_CHARACTER_TEMPLATES.keys()),
871
  "oci_api_connected": OCI_API_BASE_URL
872
  }
873
 
874
- @app.get("/api/local-images")
875
- async def get_local_images():
876
- """API endpoint to get locally saved test images"""
877
- storage_info = get_local_storage_info()
878
- return storage_info
879
-
880
- @app.delete("/api/local-images/{filename:path}")
881
- async def delete_local_image_api(filename: str):
882
- """API endpoint to delete a local image"""
883
- try:
884
- filepath = os.path.join(PERSISTENT_IMAGE_DIR, filename)
885
- success, message = delete_local_image(filepath)
886
- return {"status": "success" if success else "error", "message": message}
887
- except Exception as e:
888
- return {"status": "error", "message": str(e)}
889
-
890
- # MISSING HELPER FUNCTIONS FOR GRADIO INTERFACE
891
- def delete_current_image(filepath):
892
- """Delete the currently displayed image"""
893
- if not filepath:
894
- return "❌ No image to delete", None, None, refresh_local_images()
895
-
896
- success, message = delete_local_image(filepath)
897
- updated_files = refresh_local_images()
898
-
899
- if success:
900
- status_msg = f"✅ {message}"
901
- return status_msg, None, "Image deleted successfully!", updated_files
902
- else:
903
- return f"❌ {message}", None, "Delete failed", updated_files
904
-
905
- def clear_all_images():
906
- """Delete all local images"""
907
- try:
908
- storage_info = get_local_storage_info()
909
- deleted_count = 0
910
-
911
- if "images" in storage_info:
912
- for image_info in storage_info["images"]:
913
- success, _ = delete_local_image(image_info["path"])
914
- if success:
915
- deleted_count += 1
916
-
917
- updated_files = refresh_local_images()
918
- return f"✅ Deleted {deleted_count} images", updated_files
919
- except Exception as e:
920
- return f"❌ Error: {str(e)}", refresh_local_images()
921
-
922
- # Enhanced Gradio interface with dynamic character testing
923
- def create_gradio_interface():
924
- """Create Gradio interface with dynamic character consistency features"""
925
-
926
- def generate_test_image_with_characters(prompt, model_choice, style_choice, character_names_text):
927
- """Generate a single image for testing character consistency"""
928
- try:
929
- if not prompt.strip():
930
- return None, "❌ Please enter a prompt", None
931
-
932
- # Parse character names from text input
933
- character_names = [name.strip() for name in character_names_text.split(",") if name.strip()]
934
-
935
- print(f"🎨 Generating test image with prompt: {prompt}")
936
- print(f"👥 Character names: {character_names}")
937
-
938
- # Create dynamic character templates for testing
939
- character_templates = {}
940
- for char_name in character_names:
941
- character_templates[char_name] = {
942
- "visual_prompt": f"{char_name}, distinctive appearance, consistent features",
943
- "key_features": ["consistent appearance", "maintain features"],
944
- "consistency_keywords": f"consistent {char_name}"
945
- }
946
-
947
- # Enhance the prompt with character consistency
948
- enhanced_prompt, negative_prompt = enhance_prompt_with_characters(
949
- prompt, character_names, character_templates, style_choice, 1
950
- )
951
-
952
- # Generate the image
953
- image = generate_consistent_image(
954
- prompt,
955
- model_choice,
956
- style_choice,
957
- character_names,
958
- character_templates,
959
- 1
960
- )
961
-
962
- # Save to local storage
963
- filepath, filename = save_image_to_local(image, prompt, style_choice)
964
-
965
- character_info = f"👥 Characters: {', '.join(character_names)}" if character_names else "👥 No specific characters"
966
-
967
- status_msg = f"""✅ Success! Generated: {prompt}
968
-
969
- {character_info}
970
-
971
- 🎨 Enhanced prompt: {enhanced_prompt[:200]}...
972
-
973
- 📁 **Local file:** {filename if filename else 'Not saved'}"""
974
-
975
- return image, status_msg, filepath
976
-
977
- except Exception as e:
978
- error_msg = f"❌ Generation failed: {str(e)}"
979
- print(error_msg)
980
- return None, error_msg, None
981
-
982
- with gr.Blocks(title="Premium Children's Book Illustrator with Dynamic Character Consistency", theme="soft") as demo:
983
- gr.Markdown("# 🎨 Premium Children's Book Illustrator")
984
- gr.Markdown("Generate **studio-quality** storybook images with **dynamic character consistency**")
985
-
986
- # Storage info display
987
- storage_info = gr.Textbox(
988
- label="📊 Local Storage Information",
989
- interactive=False,
990
- lines=2
991
- )
992
-
993
- def update_storage_info():
994
- info = get_local_storage_info()
995
- if "error" not in info:
996
- return f"📁 Local Storage: {info['total_files']} images, {info['total_size_mb']} MB used"
997
- return "📁 Local Storage: Unable to calculate"
998
 
999
  with gr.Row():
1000
- with gr.Column(scale=1):
1001
- gr.Markdown("### 🎯 Quality Settings")
1002
-
1003
- model_dropdown = gr.Dropdown(
1004
- label="AI Model",
1005
- choices=list(MODEL_CHOICES.keys()),
1006
- value="dreamshaper-8"
1007
- )
1008
-
1009
- style_dropdown = gr.Dropdown(
1010
- label="Art Style",
1011
- choices=["childrens_book", "realistic", "fantasy", "anime"],
1012
- value="childrens_book"
1013
- )
1014
-
1015
- # Dynamic character input for testing
1016
- character_names_input = gr.Textbox(
1017
- label="Character Names (comma-separated)",
1018
- placeholder="Enter character names: Sparkle the Star Cat, Benny the Bunny, Tilly the Turtle",
1019
- info="Enter character names to test consistency features",
1020
- lines=2
1021
- )
1022
-
1023
- prompt_input = gr.Textbox(
1024
- label="Scene Description",
1025
- placeholder="Describe your scene with character interactions...\nExample: Sparkle the Star Cat chasing butterflies while Benny the Bunny watches",
1026
- lines=3
1027
- )
1028
-
1029
- generate_btn = gr.Button("✨ Generate Premium Image", variant="primary")
1030
-
1031
- # Current image management
1032
- current_file_path = gr.State()
1033
- delete_btn = gr.Button("🗑️ Delete This Image", variant="stop")
1034
- delete_status = gr.Textbox(label="Delete Status", interactive=False, lines=2)
1035
-
1036
- gr.Markdown("### 📚 API Usage for n8n")
1037
- gr.Markdown("""
1038
- **For complete storybooks (OCI bucket):**
1039
- - Endpoint: `POST /api/generate-storybook`
1040
- - Input: `story_title`, `scenes[]`, `characters[]`
1041
- - Output: Saves to OCI bucket with dynamic character consistency
1042
- """)
1043
-
1044
- with gr.Column(scale=2):
1045
- image_output = gr.Image(label="Generated Image", height=500, show_download_button=True)
1046
- status_output = gr.Textbox(label="Status", interactive=False, lines=4)
1047
-
1048
- # Dynamic character guidance section
1049
- with gr.Accordion("👥 Dynamic Character Guidance", open=False):
1050
- gr.Markdown("""
1051
- ### How to Use Dynamic Characters from n8n:
1052
-
1053
- **n8n Payload Structure:**
1054
- ```json
1055
- {
1056
- "story_title": "Your Story Title",
1057
- "characters": [
1058
- {
1059
- "name": "Character Name",
1060
- "description": "Character description...",
1061
- "visual_prompt": "Detailed visual description", // Optional
1062
- "key_features": ["feature1", "feature2"] // Optional
1063
- }
1064
- ],
1065
- "scenes": [
1066
- {
1067
- "visual": "Scene description with characters...",
1068
- "text": "Scene text...",
1069
- "characters_present": ["Character Name"] // Optional
1070
- }
1071
- ]
1072
- }
1073
- ```
1074
-
1075
- **Features:**
1076
- - ✅ Dynamic character processing from n8n
1077
- - ✅ Automatic visual prompt generation
1078
- - ✅ Key feature extraction
1079
- - ✅ Cross-scene consistency
1080
- - ✅ Flexible character numbers and types
1081
- """)
1082
-
1083
- # Examples section
1084
- with gr.Accordion("💡 Prompt Examples & Tips", open=False):
1085
- gr.Markdown("""
1086
- ## 🎨 Professional Prompt Examples with Dynamic Characters:
1087
-
1088
- **Best Results with Dynamic Characters:**
1089
- - "Sparkle the Star Cat chasing butterflies in a sunny meadow"
1090
- - "Benny the Bunny and Tilly the Turtle having a picnic"
1091
- - "Multiple characters discovering a magical portal together"
1092
-
1093
- ## ⚡ Dynamic Character Consistency Tips:
1094
- 1. **Always mention character names** in your prompts
1095
- 2. **n8n will send character details** automatically
1096
- 3. **The system processes any number** of characters dynamically
1097
- 4. **Consistency is maintained** across all scenes automatically
1098
- """)
1099
-
1100
- # Local file management section
1101
- with gr.Accordion("📁 Manage Local Test Images", open=True):
1102
- gr.Markdown("### Locally Saved Images")
1103
-
1104
- with gr.Row():
1105
- refresh_btn = gr.Button("🔄 Refresh List")
1106
- clear_all_btn = gr.Button("🗑️ Clear All Images", variant="stop")
1107
-
1108
- file_gallery = gr.Gallery(
1109
- label="Local Images",
1110
- show_label=True,
1111
- elem_id="gallery",
1112
- columns=4,
1113
- height="auto"
1114
- )
1115
-
1116
- clear_status = gr.Textbox(label="Clear Status", interactive=False)
1117
 
1118
- # Debug section
1119
- with gr.Accordion("🔧 Advanced Settings", open=False):
1120
- debug_btn = gr.Button("🔄 Check System Status", variant="secondary")
1121
- debug_output = gr.Textbox(label="System Info", interactive=False, lines=4)
1122
-
1123
- def check_system_status():
1124
- """Check system status"""
1125
- active_jobs = len(job_storage)
1126
- return f"""**System Status:**
1127
- - Model: {current_model_name}
1128
- - Dynamic Character Processing: ✅ Enabled
1129
- - Fallback Templates: {len(FALLBACK_CHARACTER_TEMPLATES)} available
1130
- - OCI API: {OCI_API_BASE_URL}
1131
- - Local Storage: {get_local_storage_info().get('total_files', 0)} images
1132
- - Active Jobs: {active_jobs}
1133
- - Ready for dynamic character consistency generation!"""
1134
-
1135
- # Connect buttons to functions
1136
- generate_btn.click(
1137
- fn=generate_test_image_with_characters,
1138
- inputs=[prompt_input, model_dropdown, style_dropdown, character_names_input],
1139
- outputs=[image_output, status_output, current_file_path]
1140
- ).then(
1141
- fn=refresh_local_images,
1142
- outputs=file_gallery
1143
- ).then(
1144
- fn=update_storage_info,
1145
- outputs=storage_info
1146
- )
1147
 
1148
- delete_btn.click(
1149
- fn=delete_current_image,
1150
- inputs=current_file_path,
1151
- outputs=[delete_status, image_output, status_output, file_gallery]
1152
- ).then(
1153
- fn=update_storage_info,
1154
- outputs=storage_info
1155
- )
1156
-
1157
- refresh_btn.click(
1158
- fn=refresh_local_images,
1159
- outputs=file_gallery
1160
- ).then(
1161
- fn=update_storage_info,
1162
- outputs=storage_info
1163
- )
1164
-
1165
- clear_all_btn.click(
1166
- fn=clear_all_images,
1167
- outputs=[clear_status, file_gallery]
1168
- ).then(
1169
- fn=update_storage_info,
1170
- outputs=storage_info
1171
- )
1172
-
1173
- debug_btn.click(
1174
- fn=check_system_status,
1175
- inputs=None,
1176
- outputs=debug_output
1177
- )
1178
-
1179
- # Initialize on load
1180
- demo.load(fn=refresh_local_images, outputs=file_gallery)
1181
- demo.load(fn=update_storage_info, outputs=storage_info)
1182
 
1183
  return demo
1184
 
1185
- # Create enhanced Gradio app
1186
- demo = create_gradio_interface()
1187
 
1188
- # Enhanced root endpoint that explains the API structure
1189
  @app.get("/")
1190
  async def root():
1191
  return {
1192
- "message": "Storybook Generator API with Dynamic Character Consistency is running!",
1193
- "api_endpoints": {
1194
- "health_check": "GET /api/health",
1195
- "generate_storybook": "POST /api/generate-storybook",
1196
- "check_job_status": "GET /api/job-status/{job_id}",
1197
- "local_images": "GET /api/local-images"
1198
- },
1199
- "features": {
1200
- "dynamic_characters": "✅ Enabled",
1201
- "character_consistency": "✅ Enabled",
1202
- "flexible_storytelling": "✅ Enabled",
1203
- "n8n_integration": "✅ Enabled"
1204
- },
1205
- "web_interface": "GET /ui",
1206
- "note": "Use API endpoints for programmatic access with dynamic characters from n8n"
1207
- }
1208
-
1209
- # Add a simple test endpoint
1210
- @app.get("/api/test")
1211
- async def test_endpoint():
1212
- return {
1213
- "status": "success",
1214
- "message": "API with dynamic character consistency is working correctly",
1215
- "dynamic_processing": "✅ Enabled",
1216
- "fallback_templates": len(FALLBACK_CHARACTER_TEMPLATES),
1217
- "timestamp": datetime.now().isoformat()
1218
  }
1219
 
1220
- # For Hugging Face Spaces deployment
1221
  def get_app():
1222
  return app
1223
 
@@ -1225,66 +837,12 @@ if __name__ == "__main__":
1225
  import uvicorn
1226
  import os
1227
 
1228
- # Check if we're running on Hugging Face Spaces
1229
  HF_SPACE = os.environ.get('SPACE_ID') is not None
1230
 
1231
  if HF_SPACE:
1232
- print("🚀 Running on Hugging Face Spaces - Integrated Mode")
1233
- print("📚 API endpoints available at: /api/*")
1234
- print("🎨 Web interface available at: /ui")
1235
- print("👥 Dynamic character consistency features enabled")
1236
- print("🔌 Both API and UI running on same port")
1237
-
1238
- # Mount Gradio without reassigning app
1239
  gr.mount_gradio_app(app, demo, path="/ui")
1240
-
1241
- # Run the combined app
1242
- uvicorn.run(
1243
- app,
1244
- host="0.0.0.0",
1245
- port=7860,
1246
- log_level="info"
1247
- )
1248
  else:
1249
- # Local development - run separate servers
1250
- print("🚀 Running locally - Separate API and UI servers")
1251
- print("📚 API endpoints: http://localhost:8000/api/*")
1252
- print("🎨 Web interface: http://localhost:7860/ui")
1253
- print("👥 Dynamic character consistency features enabled")
1254
-
1255
- def run_fastapi():
1256
- """Run FastAPI on port 8000 for API calls"""
1257
- uvicorn.run(
1258
- app,
1259
- host="0.0.0.0",
1260
- port=8000,
1261
- log_level="info",
1262
- access_log=False
1263
- )
1264
-
1265
- def run_gradio():
1266
- """Run Gradio on port 7860 for web interface"""
1267
- demo.launch(
1268
- server_name="0.0.0.0",
1269
- server_port=7860,
1270
- share=False,
1271
- show_error=True,
1272
- quiet=True
1273
- )
1274
-
1275
- # Start both servers in separate threads
1276
- api_thread = threading.Thread(target=run_fastapi, daemon=True)
1277
- ui_thread = threading.Thread(target=run_gradio, daemon=True)
1278
-
1279
- api_thread.start()
1280
- print("✅ FastAPI server started on port 8000")
1281
-
1282
- ui_thread.start()
1283
- print("✅ Gradio server started on port 7860")
1284
-
1285
- # Keep the main thread alive
1286
- try:
1287
- while True:
1288
- time.sleep(1)
1289
- except KeyboardInterrupt:
1290
- print("🛑 Shutting down servers...")
 
1
  import gradio as gr
2
  import torch
3
+ from diffusers import StableDiffusionPipeline, EulerAncestralDiscreteScheduler, StableDiffusionInpaintPipeline
4
+ from PIL import Image, ImageDraw
5
  import io
6
  import requests
7
  import os
 
9
  import re
10
  import time
11
  import json
12
+ from typing import List, Optional, Dict, Tuple
13
  from fastapi import FastAPI, HTTPException, BackgroundTasks
14
  from pydantic import BaseModel
15
  import gc
 
19
  import hashlib
20
  from enum import Enum
21
  import random
22
+ import numpy as np
23
 
24
  # External OCI API URL - YOUR BUCKET SAVING API
25
  OCI_API_BASE_URL = "https://yukee1992-oci-story-book.hf.space"
 
30
  print(f"📁 Created local image directory: {PERSISTENT_IMAGE_DIR}")
31
 
32
  # Initialize FastAPI app
33
+ app = FastAPI(title="Dual-Pipeline Storybook Generator API")
34
 
35
  # Add CORS middleware
36
  from fastapi.middleware.cors import CORSMiddleware
 
45
  # Job Status Enum
46
  class JobStatus(str, Enum):
47
  PENDING = "pending"
48
+ GENERATING_CHARACTERS = "generating_characters"
49
+ GENERATING_BACKGROUNDS = "generating_backgrounds"
50
+ COMPOSING_SCENES = "composing_scenes"
51
  PROCESSING = "processing"
52
  COMPLETED = "completed"
53
  FAILED = "failed"
 
56
  class StoryScene(BaseModel):
57
  visual: str
58
  text: str
59
+ characters_present: List[str] = []
60
+ scene_type: str = "general"
61
+ background_context: str = ""
62
 
63
  class CharacterDescription(BaseModel):
64
  name: str
65
  description: str
66
+ visual_prompt: str = ""
67
+ key_features: List[str] = []
68
+ pose_reference: str = "standing naturally"
69
 
70
  class StorybookRequest(BaseModel):
71
  story_title: str
72
  scenes: List[StoryScene]
73
  characters: List[CharacterDescription] = []
74
+ model_choice: str = "sd-1.5" # CHANGED: Default to working model
75
  style: str = "childrens_book"
76
  callback_url: Optional[str] = None
77
+ consistency_seed: Optional[int] = None
78
+ pipeline_type: str = "standard"
79
 
80
  class JobStatusResponse(BaseModel):
81
  job_id: str
 
86
  created_at: float
87
  updated_at: float
88
 
89
+ # UPDATED MODEL CHOICES - Only use working models
90
  MODEL_CHOICES = {
91
+ "sd-1.5": "runwayml/stable-diffusion-v1-5", # Most reliable
92
+ "openjourney": "prompthero/openjourney", # Public & free
93
+ "sd-2.1": "stabilityai/stable-diffusion-2-1", # Public alternative
 
 
94
  }
95
 
96
+ # FALLBACK CHARACTER TEMPLATES
97
  FALLBACK_CHARACTER_TEMPLATES = {
98
  "Sparkle the Star Cat": {
99
  "visual_prompt": "small white kitten with distinctive silver star-shaped spots on fur, big golden eyes, shiny blue collar with star charm, playful expression",
 
102
  "Benny the Bunny": {
103
  "visual_prompt": "fluffy brown rabbit with long ears, bright green eyes, red scarf around neck, cheerful expression",
104
  "key_features": ["red scarf", "long ears", "green eyes", "brown fur"],
 
 
 
 
105
  }
106
  }
107
 
108
  # GLOBAL STORAGE
109
  job_storage = {}
110
  model_cache = {}
111
+ inpaint_pipe = None
112
  current_model_name = None
113
  current_pipe = None
114
  model_lock = threading.Lock()
115
 
116
+ # FIXED MODEL LOADING - With fallback like old script
117
+ def load_model(model_name="sd-1.5"):
118
+ """Thread-safe model loading with FALLBACK like old working script"""
119
  global model_cache, current_model_name, current_pipe
120
 
121
  with model_lock:
 
124
  current_model_name = model_name
125
  return current_pipe
126
 
127
+ print(f"🔄 Loading model: {model_name}")
128
  try:
129
+ model_id = MODEL_CHOICES.get(model_name, "runwayml/stable-diffusion-v1-5")
130
 
131
  pipe = StableDiffusionPipeline.from_pretrained(
132
  model_id,
 
142
  current_pipe = pipe
143
  current_model_name = model_name
144
 
145
+ print(f"✅ Model loaded: {model_name}")
146
  return pipe
147
 
148
  except Exception as e:
149
  print(f"❌ Model loading failed: {e}")
150
+ # FALLBACK TO SD 1.5 LIKE OLD SCRIPT
151
+ print("🔄 Falling back to stable-diffusion-v1-5")
152
+ try:
153
+ fallback_pipe = StableDiffusionPipeline.from_pretrained(
154
+ "runwayml/stable-diffusion-v1-5",
155
+ torch_dtype=torch.float32,
156
+ safety_checker=None,
157
+ requires_safety_checker=False
158
+ ).to("cpu")
159
+ model_cache["sd-1.5"] = fallback_pipe
160
+ return fallback_pipe
161
+ except Exception as fallback_error:
162
+ print(f"❌ Fallback model also failed: {fallback_error}")
163
+ return None
164
+
165
+ def load_inpaint_model():
166
+ """Load inpainting model for composition"""
167
+ global inpaint_pipe
168
+
169
+ if inpaint_pipe is not None:
170
+ return inpaint_pipe
171
+
172
+ print("🔄 Loading inpainting model...")
173
+ try:
174
+ inpaint_pipe = StableDiffusionInpaintPipeline.from_pretrained(
175
+ "runwayml/stable-diffusion-inpainting",
176
+ torch_dtype=torch.float32,
177
+ safety_checker=None,
178
+ requires_safety_checker=False
179
+ )
180
+ inpaint_pipe = inpaint_pipe.to("cpu")
181
+ print("✅ Inpainting model loaded")
182
+ return inpaint_pipe
183
+ except Exception as e:
184
+ print(f"❌ Inpainting model failed: {e}")
185
+ return None
186
+
187
+ # Initialize models
188
+ print("🚀 Initializing Dual-Pipeline Storybook Generator API...")
189
+ load_model("sd-1.5") # CHANGED: Initialize with working model
190
+ print("✅ Models loaded and ready!")
191
 
192
+ # ============================================================================
193
+ # CHARACTER PROCESSING FUNCTIONS (from old script - working)
194
+ # ============================================================================
 
195
 
 
196
  def process_character_descriptions(characters_from_request):
197
  """Process character descriptions from n8n and create consistency templates"""
198
  character_templates = {}
 
200
  for character in characters_from_request:
201
  char_name = character.name
202
 
 
203
  if character.visual_prompt:
204
  visual_prompt = character.visual_prompt
205
  else:
 
206
  visual_prompt = generate_visual_prompt_from_description(character.description, char_name)
207
 
 
208
  if character.key_features:
209
  key_features = character.key_features
210
  else:
 
214
  "visual_prompt": visual_prompt,
215
  "key_features": key_features,
216
  "consistency_keywords": f"consistent character, same {char_name.split()[-1].lower()}, maintaining appearance",
217
+ "source": "n8n_request"
218
  }
219
 
220
  print(f"✅ Processed {len(character_templates)} characters from n8n request")
 
222
 
223
  def generate_visual_prompt_from_description(description, character_name):
224
  """Generate a visual prompt from character description"""
 
225
  description_lower = description.lower()
226
 
 
227
  species_keywords = ["kitten", "cat", "rabbit", "bunny", "turtle", "dog", "bird", "dragon", "bear", "fox"]
228
  species = "character"
229
  for keyword in species_keywords:
 
231
  species = keyword
232
  break
233
 
 
234
  color_keywords = ["white", "black", "brown", "red", "blue", "green", "yellow", "golden", "silver", "orange"]
235
  colors = []
236
  for color in color_keywords:
237
  if color in description_lower:
238
  colors.append(color)
239
 
 
240
  feature_keywords = ["spots", "stripes", "collar", "scarf", "shell", "wings", "horn", "tail", "ears", "eyes"]
241
  features = []
242
  for feature in feature_keywords:
243
  if feature in description_lower:
244
  features.append(feature)
245
 
 
246
  visual_prompt_parts = []
247
  if colors:
248
  visual_prompt_parts.append(f"{' '.join(colors)} {species}")
 
254
  if features:
255
  visual_prompt_parts.append(f"with {', '.join(features)}")
256
 
 
257
  trait_keywords = ["playful", "brave", "curious", "kind", "cheerful", "wise", "calm", "friendly"]
258
  traits = [trait for trait in trait_keywords if trait in description_lower]
259
  if traits:
 
269
  description_lower = description.lower()
270
  key_features = []
271
 
 
272
  feature_patterns = [
273
  r"(\w+)\s+(?:spots|stripes|marks)",
274
  r"(\w+)\s+(?:collar|scarf|ribbon)",
 
280
  matches = re.findall(pattern, description_lower)
281
  key_features.extend(matches)
282
 
 
283
  key_features = list(set(key_features))[:3]
284
 
 
285
  if not key_features:
286
  if any(word in description_lower for word in ["kitten", "cat"]):
287
  key_features = ["whiskers", "tail", "paws"]
 
295
  print(f"🔧 Extracted key features: {key_features}")
296
  return key_features
297
 
298
+ def extract_characters_from_visual(visual_description, available_characters):
299
+ """Extract character names from visual description using available characters"""
300
+ characters = []
301
+ visual_lower = visual_description.lower()
302
+
303
+ for char_name in available_characters:
304
+ char_identifier = char_name.split()[0].lower()
305
+ if char_identifier in visual_lower or char_name.lower() in visual_lower:
306
+ characters.append(char_name)
307
+
308
+ return characters
309
+
310
+ # ============================================================================
311
+ # STANDARD PIPELINE FUNCTIONS (from old script - working)
312
+ # ============================================================================
313
+
314
  def enhance_prompt_with_characters(scene_visual, characters_present, character_templates, style="childrens_book", scene_number=1):
315
  """Create prompts that maintain character consistency using dynamic templates"""
316
 
 
317
  character_descriptions = []
318
  consistency_keywords = []
319
 
 
323
  character_descriptions.append(f"{char_name}: {char_data['visual_prompt']}")
324
  consistency_keywords.append(char_data['consistency_keywords'])
325
  else:
 
326
  character_descriptions.append(f"{char_name}: distinctive character")
327
  consistency_keywords.append(f"consistent {char_name}")
328
 
 
329
  style_templates = {
330
  "childrens_book": "children's book illustration, watercolor style, soft colors, whimsical, magical, storybook art, professional illustration",
331
  "realistic": "photorealistic, detailed, natural lighting, professional photography",
 
335
 
336
  style_prompt = style_templates.get(style, style_templates["childrens_book"])
337
 
 
338
  character_context = ". ".join(character_descriptions)
339
  consistency_context = ", ".join(consistency_keywords)
340
 
 
345
  f"Scene {scene_number} of storybook series. "
346
  )
347
 
 
348
  quality_boosters = [
349
  "consistent character design", "maintain identical features",
350
  "same characters throughout", "continuous visual narrative",
 
354
 
355
  enhanced_prompt += ", ".join(quality_boosters)
356
 
 
357
  negative_prompt = (
358
  "inconsistent characters, different appearances, changing features, "
359
  "multiple versions of same character, inconsistent art style, "
 
363
 
364
  return enhanced_prompt, negative_prompt
365
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
366
  def generate_consistent_image(prompt, model_choice, style, characters_present, character_templates, scene_number, consistency_seed=None):
367
  """Generate image with character consistency measures using dynamic templates"""
368
 
 
369
  enhanced_prompt, negative_prompt = enhance_prompt_with_characters(
370
  prompt, characters_present, character_templates, style, scene_number
371
  )
372
 
 
373
  if consistency_seed:
374
  base_seed = consistency_seed
375
  else:
376
  base_seed = hash("".join(characters_present)) % 1000000 if characters_present else random.randint(1000, 9999)
377
 
 
378
  scene_seed = base_seed + scene_number
379
 
380
  try:
381
  pipe = load_model(model_choice)
382
+ if pipe is None:
383
+ raise Exception("Model not available")
384
 
385
  image = pipe(
386
  prompt=enhanced_prompt,
387
  negative_prompt=negative_prompt,
388
+ num_inference_steps=35,
389
+ guidance_scale=7.5,
390
  width=768,
391
  height=768,
392
  generator=torch.Generator(device="cpu").manual_seed(scene_seed)
 
402
  print(f"❌ Consistent generation failed: {str(e)}")
403
  raise
404
 
405
+ # ============================================================================
406
+ # SIMPLIFIED ENHANCED PIPELINE (Basic composition without complex models)
407
+ # ============================================================================
 
408
 
409
+ def generate_character_image(character: CharacterDescription, model_choice: str, style: str, seed: int = None) -> Image.Image:
410
+ """Generate a single character with simple background"""
411
+
412
+ character_prompt = f"{character.visual_prompt or character.description}, {character.pose_reference}, full body character, children's book character design"
413
+
414
+ character_prompt = re.sub(r'\s+', ' ', character_prompt).strip()
415
+
416
+ negative_prompt = "background, scenery, environment, other characters, blurry, low quality"
417
+
418
+ pipe = load_model(model_choice)
419
+ if pipe is None:
420
+ raise Exception("Model not available")
421
+
422
+ if seed is None:
423
+ seed = hash(character.name) % 1000000
424
+
425
+ generator = torch.Generator(device="cpu").manual_seed(seed)
426
+
427
+ image = pipe(
428
+ prompt=character_prompt,
429
+ negative_prompt=negative_prompt,
430
+ num_inference_steps=25, # Reduced for speed
431
+ guidance_scale=7.0,
432
+ width=512,
433
+ height=768,
434
+ generator=generator
435
+ ).images[0]
436
+
437
+ print(f"✅ Generated character: {character.name}")
438
+ return image
439
 
440
+ def generate_scene_background(scene: StoryScene, model_choice: str, style: str, seed: int = None) -> Image.Image:
441
+ """Generate scene background without characters"""
442
+
443
+ background_prompt = f"{scene.visual} {scene.background_context}, empty scene, no characters, background environment, children's book background"
444
+
445
+ background_prompt = re.sub(r'\s+', ' ', background_prompt).strip()
446
+
447
+ negative_prompt = "characters, people, animals, person, human, animal, blurry, low quality"
448
+
449
+ pipe = load_model(model_choice)
450
+ if pipe is None:
451
+ raise Exception("Model not available")
452
+
453
+ if seed is None:
454
+ seed = random.randint(1000, 9999)
455
+
456
+ generator = torch.Generator(device="cpu").manual_seed(seed)
457
+
458
+ image = pipe(
459
+ prompt=background_prompt,
460
+ negative_prompt=negative_prompt,
461
+ num_inference_steps=25, # Reduced for speed
462
+ guidance_scale=7.0,
463
+ width=768,
464
+ height=768,
465
+ generator=generator
466
+ ).images[0]
467
+
468
+ print(f"✅ Generated background for scene")
469
+ return image
470
 
471
+ def compose_scene_with_characters(background: Image.Image, character_images: Dict[str, Image.Image],
472
+ characters_present: List[str], scene_context: str) -> Image.Image:
473
+ """Simple composition by placing characters on background"""
474
+
475
+ final_image = background.copy()
476
+
477
+ # Simple positioning
478
+ positions = []
479
+ num_chars = len(characters_present)
480
+
481
+ if num_chars == 1:
482
+ positions.append((284, 300, 200, 300)) # Center
483
+ elif num_chars == 2:
484
+ positions.extend([(184, 300, 200, 300), (484, 300, 200, 300)]) # Left & right
485
+ else:
486
+ for i in range(num_chars):
487
+ x = 150 + (i % 3) * 200
488
+ y = 250 + (i // 3) * 200
489
+ positions.append((x, y, 180, 270))
490
+
491
+ for i, char_name in enumerate(characters_present):
492
+ if i >= len(positions) or char_name not in character_images:
493
+ continue
494
+
495
+ char_image = character_images[char_name]
496
+ x, y, width, height = positions[i]
497
 
498
+ char_resized = char_image.resize((width, height))
499
+ final_image.paste(char_resized, (x, y), char_resized)
500
+
501
+ return final_image
 
 
 
502
 
503
+ # ============================================================================
504
+ # OCI BUCKET FUNCTIONS (from old script)
505
+ # ============================================================================
 
 
 
 
 
 
 
 
 
 
 
506
 
507
+ def save_to_oci_bucket(file_data, filename, story_title, file_type="image", subfolder=""):
508
+ """Save files to OCI bucket"""
 
509
  try:
 
 
 
 
 
 
 
 
 
 
 
 
 
510
  api_url = f"{OCI_API_BASE_URL}/api/upload"
511
 
512
+ if subfolder:
513
+ full_subfolder = f'stories/{story_title}/{subfolder}'
514
+ else:
515
+ full_subfolder = f'stories/{story_title}'
516
+
517
+ mime_type = "image/png" if file_type == "image" else "text/plain"
518
  files = {'file': (filename, file_data, mime_type)}
519
  data = {
520
  'project_id': 'storybook-library',
521
+ 'subfolder': full_subfolder
522
  }
523
 
524
  response = requests.post(api_url, files=files, data=data, timeout=30)
 
537
  except Exception as e:
538
  raise Exception(f"OCI upload failed: {str(e)}")
539
 
540
+ # ============================================================================
541
+ # JOB MANAGEMENT (from old script with enhancements)
542
+ # ============================================================================
543
+
544
  def create_job(story_request: StorybookRequest) -> str:
545
  job_id = str(uuid.uuid4())
546
 
 
547
  character_templates = process_character_descriptions(story_request.characters)
 
548
 
549
  job_storage[job_id] = {
550
  "status": JobStatus.PENDING,
 
556
  "updated_at": time.time(),
557
  "pages": [],
558
  "character_templates": character_templates,
 
559
  }
560
 
561
  print(f"📝 Created job {job_id} for story: {story_request.story_title}")
562
+ print(f"🚀 Pipeline type: {story_request.pipeline_type}")
563
 
564
  return job_id
565
 
 
577
  if result:
578
  job_storage[job_id]["result"] = result
579
 
 
580
  job_data = job_storage[job_id]
581
  request_data = job_data["request"]
582
 
 
584
  try:
585
  callback_url = request_data["callback_url"]
586
 
 
587
  callback_data = {
588
  "job_id": job_id,
589
  "status": status.value,
 
592
  "story_title": request_data["story_title"],
593
  "total_scenes": len(request_data["scenes"]),
594
  "total_characters": len(request_data["characters"]),
595
+ "pipeline_type": request_data.get("pipeline_type", "standard"),
596
  "timestamp": time.time(),
 
 
597
  }
598
 
599
+ headers = {'Content-Type': 'application/json'}
600
+ response = requests.post(callback_url, json=callback_data, headers=headers, timeout=30)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
601
  print(f"📢 Callback sent: Status {response.status_code}")
602
 
603
  except Exception as e:
 
605
 
606
  return True
607
 
608
+ # ============================================================================
609
+ # BACKGROUND TASKS - SIMPLIFIED
610
+ # ============================================================================
 
 
 
 
 
 
 
 
 
 
 
 
 
611
 
612
+ def generate_storybook_standard(job_id: str):
613
+ """Standard pipeline background task"""
 
614
  try:
615
  job_data = job_storage[job_id]
616
  story_request_data = job_data["request"]
617
  story_request = StorybookRequest(**story_request_data)
618
  character_templates = job_data["character_templates"]
619
 
620
+ print(f"🎬 Starting STANDARD pipeline for job {job_id}")
 
 
 
 
621
 
622
+ update_job_status(job_id, JobStatus.PROCESSING, 5, "Starting storybook generation...")
 
 
 
 
623
 
624
  total_scenes = len(story_request.scenes)
625
  generated_pages = []
 
628
  for i, scene in enumerate(story_request.scenes):
629
  progress = 5 + int((i / total_scenes) * 90)
630
 
 
631
  characters_present = []
632
  if hasattr(scene, 'characters_present') and scene.characters_present:
633
  characters_present = scene.characters_present
634
  else:
 
635
  available_chars = [char.name for char in story_request.characters]
636
  characters_present = extract_characters_from_visual(scene.visual, available_chars)
637
 
 
639
  job_id,
640
  JobStatus.PROCESSING,
641
  progress,
642
+ f"Generating page {i+1}/{total_scenes}..."
643
  )
644
 
645
  try:
646
+ print(f"🖼️ Generating page {i+1}")
647
 
 
648
  image = generate_consistent_image(
649
  scene.visual,
650
  story_request.model_choice,
 
655
  story_request.consistency_seed
656
  )
657
 
658
+ img_bytes = io.BytesIO()
659
+ image.save(img_bytes, format='PNG')
660
  image_url = save_to_oci_bucket(
661
+ img_bytes.getvalue(),
662
+ f"page_{i+1:03d}.png",
663
+ story_request.story_title,
 
664
  "image"
665
  )
666
 
 
667
  text_url = save_to_oci_bucket(
668
+ scene.text.encode('utf-8'),
669
+ f"page_{i+1:03d}.txt",
670
+ story_request.story_title,
 
671
  "text"
672
  )
673
 
 
674
  page_data = {
675
  "page_number": i + 1,
676
  "image_url": image_url,
677
  "text_url": text_url,
678
  "text_content": scene.text,
 
 
 
679
  }
680
  generated_pages.append(page_data)
681
 
682
+ print(f"✅ Page {i+1} completed")
683
 
684
  except Exception as e:
685
  error_msg = f"Failed to generate page {i+1}: {str(e)}"
 
687
  update_job_status(job_id, JobStatus.FAILED, 0, error_msg)
688
  return
689
 
 
690
  generation_time = time.time() - start_time
691
 
692
  result = {
693
  "story_title": story_request.story_title,
694
  "total_pages": total_scenes,
 
695
  "generated_pages": len(generated_pages),
696
  "generation_time": round(generation_time, 2),
697
+ "pipeline_used": "standard",
698
+ "pages": generated_pages
 
 
 
 
 
 
 
699
  }
700
 
701
  update_job_status(
702
  job_id,
703
  JobStatus.COMPLETED,
704
  100,
705
+ f"🎉 Standard pipeline completed! {len(generated_pages)} pages in {generation_time:.2f}s.",
706
  result
707
  )
708
 
709
+ print(f"🎉 STANDARD pipeline finished for job {job_id}")
 
 
710
 
711
  except Exception as e:
712
+ error_msg = f"Standard pipeline failed: {str(e)}"
713
  print(f"❌ {error_msg}")
714
  update_job_status(job_id, JobStatus.FAILED, 0, error_msg)
715
 
716
+ def generate_storybook_dispatcher(job_id: str):
717
+ """Choose between pipelines"""
718
+ job_data = job_storage[job_id]
719
+ story_request_data = job_data["request"]
720
+
721
+ pipeline_type = story_request_data.get("pipeline_type", "standard")
722
+
723
+ # For now, only use standard pipeline until models are stable
724
+ generate_storybook_standard(job_id)
725
+
726
+ # ============================================================================
727
+ # FASTAPI ENDPOINTS (simplified)
728
+ # ============================================================================
729
+
730
  @app.post("/api/generate-storybook")
731
+ async def generate_storybook_unified(request: dict, background_tasks: BackgroundTasks):
732
+ """Unified endpoint that handles both pipelines"""
733
  try:
734
+ print(f"📥 Received storybook request: {request.get('story_title', 'Unknown')}")
735
 
 
736
  if 'consistency_seed' not in request or not request['consistency_seed']:
737
  request['consistency_seed'] = random.randint(1000, 9999)
 
738
 
739
+ # Ensure model_choice is valid
740
+ if request.get('model_choice') not in MODEL_CHOICES:
741
+ request['model_choice'] = "sd-1.5" # Force to working model
 
 
 
 
 
742
 
 
743
  story_request = StorybookRequest(**request)
744
 
 
745
  if not story_request.story_title or not story_request.scenes:
746
  raise HTTPException(status_code=400, detail="story_title and scenes are required")
747
 
 
748
  job_id = create_job(story_request)
749
+ background_tasks.add_task(generate_storybook_dispatcher, job_id)
750
 
 
 
 
 
751
  response_data = {
752
  "status": "success",
753
+ "message": f"Storybook generation started with {story_request.pipeline_type} pipeline",
754
  "job_id": job_id,
755
  "story_title": story_request.story_title,
756
  "total_scenes": len(story_request.scenes),
757
+ "model_choice": story_request.model_choice,
758
+ "pipeline_type": story_request.pipeline_type,
 
 
 
759
  "timestamp": datetime.now().isoformat()
760
  }
761
 
762
+ print(f"✅ Job {job_id} started")
763
 
764
  return response_data
765
 
 
770
 
771
  @app.get("/api/job-status/{job_id}")
772
  async def get_job_status_endpoint(job_id: str):
 
773
  job_data = job_storage.get(job_id)
774
  if not job_data:
775
  raise HTTPException(status_code=404, detail="Job not found")
 
786
 
787
  @app.get("/api/health")
788
  async def api_health():
 
789
  return {
790
  "status": "healthy",
791
  "service": "storybook-generator",
792
  "timestamp": datetime.now().isoformat(),
793
  "active_jobs": len(job_storage),
794
  "models_loaded": list(model_cache.keys()),
795
+ "available_models": list(MODEL_CHOICES.keys()),
796
  "oci_api_connected": OCI_API_BASE_URL
797
  }
798
 
799
+ # Simple Gradio interface
800
+ def create_simple_interface():
801
+ with gr.Blocks(title="Storybook Generator") as demo:
802
+ gr.Markdown("# Storybook Generator")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
803
 
804
  with gr.Row():
805
+ with gr.Column():
806
+ prompt = gr.Textbox(label="Prompt")
807
+ generate_btn = gr.Button("Generate")
808
+ with gr.Column():
809
+ output = gr.Image(label="Output")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
810
 
811
+ def generate_image(prompt_text):
812
+ pipe = load_model("sd-1.5")
813
+ if pipe:
814
+ image = pipe(prompt_text, num_inference_steps=20).images[0]
815
+ return image
816
+ return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
817
 
818
+ generate_btn.click(generate_image, inputs=prompt, outputs=output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
819
 
820
  return demo
821
 
822
+ demo = create_simple_interface()
 
823
 
 
824
  @app.get("/")
825
  async def root():
826
  return {
827
+ "message": "Storybook Generator API is running!",
828
+ "available_models": list(MODEL_CHOICES.keys()),
829
+ "default_model": "sd-1.5"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
830
  }
831
 
832
+ # Mount Gradio for Hugging Face Spaces
833
  def get_app():
834
  return app
835
 
 
837
  import uvicorn
838
  import os
839
 
 
840
  HF_SPACE = os.environ.get('SPACE_ID') is not None
841
 
842
  if HF_SPACE:
843
+ print("🚀 Running on Hugging Face Spaces")
 
 
 
 
 
 
844
  gr.mount_gradio_app(app, demo, path="/ui")
845
+ uvicorn.run(app, host="0.0.0.0", port=7860, log_level="info")
 
 
 
 
 
 
 
846
  else:
847
+ print("🚀 Running locally")
848
+ uvicorn.run(app, host="0.0.0.0", port=8000, log_level="info")