yukee1992 commited on
Commit
e881598
·
verified ·
1 Parent(s): 3b58c39

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +95 -1367
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
  import torch
3
- from diffusers import StableDiffusionPipeline, EulerAncestralDiscreteScheduler, StableDiffusionInpaintPipeline
4
- from PIL import Image, ImageDraw
5
  import io
6
  import requests
7
  import os
@@ -9,30 +9,24 @@ from datetime import datetime
9
  import re
10
  import time
11
  import json
12
- from typing import List, Optional, Dict, Tuple
13
  from fastapi import FastAPI, HTTPException, BackgroundTasks
14
  from pydantic import BaseModel
15
- import gc
16
- import psutil
17
  import threading
18
  import uuid
19
- import hashlib
20
- from enum import Enum
21
  import random
22
- import numpy as np
23
 
24
- # External OCI API URL - YOUR BUCKET SAVING API
25
  OCI_API_BASE_URL = "https://yukee1992-oci-story-book.hf.space"
26
 
27
- # Create local directories for test images
28
  PERSISTENT_IMAGE_DIR = "generated_test_images"
29
  os.makedirs(PERSISTENT_IMAGE_DIR, exist_ok=True)
30
  print(f"📁 Created local image directory: {PERSISTENT_IMAGE_DIR}")
31
 
32
  # Initialize FastAPI app
33
- app = FastAPI(title="Dual-Pipeline Storybook Generator API")
34
 
35
- # Add CORS middleware
36
  from fastapi.middleware.cors import CORSMiddleware
37
  app.add_middleware(
38
  CORSMiddleware,
@@ -42,40 +36,31 @@ app.add_middleware(
42
  allow_headers=["*"],
43
  )
44
 
45
- # Job Status Enum
46
  class JobStatus(str, Enum):
47
  PENDING = "pending"
48
- GENERATING_CHARACTERS = "generating_characters"
49
- GENERATING_BACKGROUNDS = "generating_backgrounds"
50
- COMPOSING_SCENES = "composing_scenes"
51
  PROCESSING = "processing"
52
  COMPLETED = "completed"
53
  FAILED = "failed"
54
 
55
- # Enhanced Story scene model with character consistency
56
  class StoryScene(BaseModel):
57
  visual: str
58
  text: str
59
- characters_present: List[str] = [] # Which characters are in this scene
60
- scene_type: str = "general" # "action", "dialogue", "establishing", etc.
61
- background_context: str = "" # Specific background description
62
 
63
  class CharacterDescription(BaseModel):
64
  name: str
65
  description: str
66
- visual_prompt: str = "" # Detailed visual description for AI
67
- key_features: List[str] = [] # Critical features that must stay consistent
68
- pose_reference: str = "standing naturally" # For consistent posing
69
 
70
  class StorybookRequest(BaseModel):
71
  story_title: str
72
  scenes: List[StoryScene]
73
  characters: List[CharacterDescription] = []
74
- model_choice: str = "dreamshaper-8"
75
  style: str = "childrens_book"
76
  callback_url: Optional[str] = None
77
  consistency_seed: Optional[int] = None
78
- pipeline_type: str = "standard" # "standard" or "enhanced"
79
 
80
  class JobStatusResponse(BaseModel):
81
  job_id: str
@@ -86,41 +71,19 @@ class JobStatusResponse(BaseModel):
86
  created_at: float
87
  updated_at: float
88
 
89
- # HIGH-QUALITY MODEL SELECTION
90
  MODEL_CHOICES = {
91
- "dreamshaper-8": "lykon/dreamshaper-8",
92
- "realistic-vision": "SG161222/Realistic_Vision_V5.1",
93
- "anything-v5": "andite/anything-v5.0",
94
- "openjourney": "prompthero/openjourney",
95
  "sd-1.5": "runwayml/stable-diffusion-v1-5",
96
  }
97
 
98
- # FALLBACK CHARACTER TEMPLATES
99
- FALLBACK_CHARACTER_TEMPLATES = {
100
- "Sparkle the Star Cat": {
101
- "visual_prompt": "small white kitten with distinctive silver star-shaped spots on fur, big golden eyes, shiny blue collar with star charm, playful expression",
102
- "key_features": ["star-shaped spots", "blue collar", "golden eyes", "white fur"],
103
- },
104
- "Benny the Bunny": {
105
- "visual_prompt": "fluffy brown rabbit with long ears, bright green eyes, red scarf around neck, cheerful expression",
106
- "key_features": ["red scarf", "long ears", "green eyes", "brown fur"],
107
- },
108
- "Tilly the Turtle": {
109
- "visual_prompt": "gentle green turtle with shiny turquoise shell decorated with swirl patterns, wise expression, slow-moving",
110
- "key_features": ["turquoise shell", "swirl patterns", "green skin", "wise expression"],
111
- }
112
- }
113
-
114
- # GLOBAL STORAGE
115
  job_storage = {}
116
  model_cache = {}
117
- inpaint_pipe = None
118
  current_model_name = None
119
  current_pipe = None
120
  model_lock = threading.Lock()
121
 
122
- def load_model(model_name="dreamshaper-8"):
123
- """Thread-safe model loading with HIGH-QUALITY settings"""
124
  global model_cache, current_model_name, current_pipe
125
 
126
  with model_lock:
@@ -129,9 +92,9 @@ def load_model(model_name="dreamshaper-8"):
129
  current_model_name = model_name
130
  return current_pipe
131
 
132
- print(f"🔄 Loading HIGH-QUALITY model: {model_name}")
133
  try:
134
- model_id = MODEL_CHOICES.get(model_name, "lykon/dreamshaper-8")
135
 
136
  pipe = StableDiffusionPipeline.from_pretrained(
137
  model_id,
@@ -147,535 +110,54 @@ def load_model(model_name="dreamshaper-8"):
147
  current_pipe = pipe
148
  current_model_name = model_name
149
 
150
- print(f"✅ HIGH-QUALITY Model loaded: {model_name}")
151
  return pipe
152
 
153
  except Exception as e:
154
  print(f"❌ Model loading failed: {e}")
155
- return None
156
-
157
- def load_inpaint_model():
158
- """Load inpainting model for composition"""
159
- global inpaint_pipe
160
-
161
- if inpaint_pipe is not None:
162
- return inpaint_pipe
163
-
164
- print("🔄 Loading inpainting model...")
165
- try:
166
- inpaint_pipe = StableDiffusionInpaintPipeline.from_pretrained(
167
- "runwayml/stable-diffusion-inpainting",
168
- torch_dtype=torch.float32,
169
- safety_checker=None,
170
- requires_safety_checker=False
171
- )
172
- inpaint_pipe = inpaint_pipe.to("cpu")
173
- print("✅ Inpainting model loaded")
174
- return inpaint_pipe
175
- except Exception as e:
176
- print(f"❌ Inpainting model failed: {e}")
177
- return None
178
-
179
- # Initialize models
180
- print("🚀 Initializing Dual-Pipeline Storybook Generator API...")
181
- load_model("dreamshaper-8")
182
- load_inpaint_model()
183
- print("✅ Models loaded and ready!")
184
-
185
- # ============================================================================
186
- # CHARACTER PROCESSING FUNCTIONS (for both pipelines)
187
- # ============================================================================
188
-
189
- def process_character_descriptions(characters_from_request):
190
- """Process character descriptions from n8n and create consistency templates"""
191
- character_templates = {}
192
-
193
- for character in characters_from_request:
194
- char_name = character.name
195
-
196
- # Use provided visual_prompt or generate from description
197
- if character.visual_prompt:
198
- visual_prompt = character.visual_prompt
199
- else:
200
- # Generate visual prompt from description
201
- visual_prompt = generate_visual_prompt_from_description(character.description, char_name)
202
-
203
- # Use provided key_features or extract from description
204
- if character.key_features:
205
- key_features = character.key_features
206
- else:
207
- key_features = extract_key_features_from_description(character.description)
208
-
209
- character_templates[char_name] = {
210
- "visual_prompt": visual_prompt,
211
- "key_features": key_features,
212
- "consistency_keywords": f"consistent character, same {char_name.split()[-1].lower()}, maintaining appearance",
213
- "source": "n8n_request"
214
- }
215
-
216
- print(f"✅ Processed {len(character_templates)} characters from n8n request")
217
- return character_templates
218
-
219
- def generate_visual_prompt_from_description(description, character_name):
220
- """Generate a visual prompt from character description"""
221
- description_lower = description.lower()
222
-
223
- # Extract species/type
224
- species_keywords = ["kitten", "cat", "rabbit", "bunny", "turtle", "dog", "bird", "dragon", "bear", "fox"]
225
- species = "character"
226
- for keyword in species_keywords:
227
- if keyword in description_lower:
228
- species = keyword
229
- break
230
-
231
- # Extract colors
232
- color_keywords = ["white", "black", "brown", "red", "blue", "green", "yellow", "golden", "silver", "orange"]
233
- colors = []
234
- for color in color_keywords:
235
- if color in description_lower:
236
- colors.append(color)
237
-
238
- # Extract distinctive features
239
- feature_keywords = ["spots", "stripes", "collar", "scarf", "shell", "wings", "horn", "tail", "ears", "eyes"]
240
- features = []
241
- for feature in feature_keywords:
242
- if feature in description_lower:
243
- features.append(feature)
244
-
245
- # Build visual prompt
246
- visual_prompt_parts = []
247
- if colors:
248
- visual_prompt_parts.append(f"{' '.join(colors)} {species}")
249
- else:
250
- visual_prompt_parts.append(species)
251
-
252
- visual_prompt_parts.append(character_name)
253
-
254
- if features:
255
- visual_prompt_parts.append(f"with {', '.join(features)}")
256
-
257
- # Add emotional/character traits
258
- trait_keywords = ["playful", "brave", "curious", "kind", "cheerful", "wise", "calm", "friendly"]
259
- traits = [trait for trait in trait_keywords if trait in description_lower]
260
- if traits:
261
- visual_prompt_parts.append(f"{', '.join(traits)} expression")
262
-
263
- visual_prompt = " ".join(visual_prompt_parts)
264
- print(f"🔧 Generated visual prompt for {character_name}: {visual_prompt}")
265
-
266
- return visual_prompt
267
-
268
- def extract_key_features_from_description(description):
269
- """Extract key features from character description"""
270
- description_lower = description.lower()
271
- key_features = []
272
-
273
- # Look for distinctive physical features
274
- feature_patterns = [
275
- r"(\w+)\s+(?:spots|stripes|marks)",
276
- r"(\w+)\s+(?:collar|scarf|ribbon)",
277
- r"(\w+)\s+(?:eyes|fur|skin|shell)",
278
- r"(\w+)\s+(?:ears|tail|wings|horn)"
279
- ]
280
-
281
- for pattern in feature_patterns:
282
- matches = re.findall(pattern, description_lower)
283
- key_features.extend(matches)
284
-
285
- # Remove duplicates and limit to 3 most important features
286
- key_features = list(set(key_features))[:3]
287
-
288
- # If no features found, use some defaults based on character type
289
- if not key_features:
290
- if any(word in description_lower for word in ["kitten", "cat"]):
291
- key_features = ["whiskers", "tail", "paws"]
292
- elif any(word in description_lower for word in ["rabbit", "bunny"]):
293
- key_features = ["long ears", "fluffy tail", "paws"]
294
- elif any(word in description_lower for word in ["turtle"]):
295
- key_features = ["shell", "slow moving", "wise eyes"]
296
- else:
297
- key_features = ["distinctive appearance", "consistent features"]
298
-
299
- print(f"🔧 Extracted key features: {key_features}")
300
- return key_features
301
-
302
- def extract_characters_from_visual(visual_description, available_characters):
303
- """Extract character names from visual description using available characters"""
304
- characters = []
305
- visual_lower = visual_description.lower()
306
-
307
- # Check for each available character name in the visual description
308
- for char_name in available_characters:
309
- # Use the first word or main identifier from character name
310
- char_identifier = char_name.split()[0].lower()
311
- if char_identifier in visual_lower or char_name.lower() in visual_lower:
312
- characters.append(char_name)
313
-
314
- return characters
315
-
316
- def generate_character_reference_sheet(characters):
317
- """Generate reference descriptions for consistent character generation"""
318
- reference_sheet = {}
319
-
320
- for character in characters:
321
- char_name = character.name
322
- reference_sheet[char_name] = {
323
- "name": char_name,
324
- "base_prompt": character.visual_prompt if character.visual_prompt else generate_visual_prompt_from_description(character.description, char_name),
325
- "key_features": character.key_features if character.key_features else extract_key_features_from_description(character.description),
326
- "must_include": character.key_features[:2] if character.key_features else []
327
- }
328
-
329
- return reference_sheet
330
-
331
- # ============================================================================
332
- # STANDARD PIPELINE FUNCTIONS (Your original approach)
333
- # ============================================================================
334
-
335
- def enhance_prompt_with_characters(scene_visual, characters_present, character_templates, style="childrens_book", scene_number=1):
336
- """Create prompts that maintain character consistency using dynamic templates"""
337
-
338
- # Get character descriptions for this scene
339
- character_descriptions = []
340
- consistency_keywords = []
341
-
342
- for char_name in characters_present:
343
- if char_name in character_templates:
344
- char_data = character_templates[char_name]
345
- character_descriptions.append(f"{char_name}: {char_data['visual_prompt']}")
346
- consistency_keywords.append(char_data['consistency_keywords'])
347
- else:
348
- # Fallback if character not in templates
349
- character_descriptions.append(f"{char_name}: distinctive character")
350
- consistency_keywords.append(f"consistent {char_name}")
351
-
352
- # Style templates
353
- style_templates = {
354
- "childrens_book": "children's book illustration, watercolor style, soft colors, whimsical, magical, storybook art, professional illustration",
355
- "realistic": "photorealistic, detailed, natural lighting, professional photography",
356
- "fantasy": "fantasy art, magical, ethereal, digital painting, concept art",
357
- "anime": "anime style, Japanese animation, vibrant colors, detailed artwork"
358
- }
359
-
360
- style_prompt = style_templates.get(style, style_templates["childrens_book"])
361
-
362
- # Build the enhanced prompt
363
- character_context = ". ".join(character_descriptions)
364
- consistency_context = ", ".join(consistency_keywords)
365
-
366
- enhanced_prompt = (
367
- f"{style_prompt}, {scene_visual}. "
368
- f"Featuring: {character_context}. "
369
- f"Maintain character consistency: {consistency_context}. "
370
- f"Scene {scene_number} of storybook series. "
371
- )
372
-
373
- # Quality boosters for consistency
374
- quality_boosters = [
375
- "consistent character design", "maintain identical features",
376
- "same characters throughout", "continuous visual narrative",
377
- "professional storybook illustration", "cohesive art style",
378
- "character continuity", "consistent proportions"
379
- ]
380
-
381
- enhanced_prompt += ", ".join(quality_boosters)
382
-
383
- # Enhanced negative prompt to avoid inconsistencies
384
- negative_prompt = (
385
- "inconsistent characters, different appearances, changing features, "
386
- "multiple versions of same character, inconsistent art style, "
387
- "blurry, low quality, bad anatomy, deformed characters, "
388
- "wrong proportions, mismatched features, different art style"
389
- )
390
-
391
- return enhanced_prompt, negative_prompt
392
-
393
- def generate_consistent_image(prompt, model_choice, style, characters_present, character_templates, scene_number, consistency_seed=None):
394
- """Generate image with character consistency measures using dynamic templates"""
395
-
396
- # Enhance prompt with character consistency
397
- enhanced_prompt, negative_prompt = enhance_prompt_with_characters(
398
- prompt, characters_present, character_templates, style, scene_number
399
- )
400
-
401
- # Use a consistent seed for character generation
402
- if consistency_seed:
403
- base_seed = consistency_seed
404
- else:
405
- base_seed = hash("".join(characters_present)) % 1000000 if characters_present else random.randint(1000, 9999)
406
-
407
- # Adjust seed slightly per scene but maintain character consistency
408
- scene_seed = base_seed + scene_number
409
-
410
- try:
411
- pipe = load_model(model_choice)
412
- if pipe is None:
413
- raise Exception("Model not available")
414
-
415
- image = pipe(
416
- prompt=enhanced_prompt,
417
- negative_prompt=negative_prompt,
418
- num_inference_steps=35,
419
- guidance_scale=7.5,
420
- width=768,
421
- height=768,
422
- generator=torch.Generator(device="cpu").manual_seed(scene_seed)
423
- ).images[0]
424
-
425
- print(f"✅ Generated consistent image for scene {scene_number}")
426
- print(f"👥 Characters: {characters_present}")
427
- print(f"🌱 Seed used: {scene_seed}")
428
-
429
- return image
430
-
431
- except Exception as e:
432
- print(f"❌ Consistent generation failed: {str(e)}")
433
- raise
434
-
435
- # ============================================================================
436
- # ENHANCED PIPELINE FUNCTIONS (3-stage approach)
437
- # ============================================================================
438
-
439
- def generate_character_image(character: CharacterDescription, model_choice: str, style: str, seed: int = None) -> Image.Image:
440
- """Generate a single character with transparent background"""
441
-
442
- character_prompt = f"""
443
- {character.visual_prompt or character.description},
444
- {character.pose_reference},
445
- full body character, clean outline, studio lighting,
446
- plain white background, isolated character, no background,
447
- children's book character design, professional illustration,
448
- {style} style, detailed features, vibrant colors
449
- """
450
-
451
- # Clean up prompt
452
- character_prompt = re.sub(r'\s+', ' ', character_prompt).strip()
453
-
454
- negative_prompt = """
455
- background, scenery, environment, other characters,
456
- blurry, low quality, bad anatomy, deformed,
457
- complex background, shadows, ground, text, watermark
458
- """
459
-
460
- pipe = load_model(model_choice)
461
- if pipe is None:
462
- raise Exception("Model not available")
463
-
464
- # Use consistent seed for character
465
- if seed is None:
466
- seed = hash(character.name) % 1000000
467
-
468
- generator = torch.Generator(device="cpu").manual_seed(seed)
469
-
470
- image = pipe(
471
- prompt=character_prompt,
472
- negative_prompt=negative_prompt,
473
- num_inference_steps=30,
474
- guidance_scale=7.5,
475
- width=512,
476
- height=768,
477
- generator=generator
478
- ).images[0]
479
-
480
- # Simple background removal
481
- image = remove_background_simple(image)
482
-
483
- print(f"✅ Generated character: {character.name}")
484
- return image
485
-
486
- def remove_background_simple(image: Image.Image) -> Image.Image:
487
- """Simple background removal (replace with proper segmentation in production)"""
488
- # Convert to RGBA if not already
489
- if image.mode != 'RGBA':
490
- image = image.convert('RGBA')
491
-
492
- # Simple white background removal
493
- datas = image.getdata()
494
- new_data = []
495
- for item in datas:
496
- # Change white (and near-white) pixels to transparent
497
- if item[0] > 200 and item[1] > 200 and item[2] > 200:
498
- new_data.append((255, 255, 255, 0))
499
- else:
500
- new_data.append(item)
501
-
502
- image.putdata(new_data)
503
- return image
504
-
505
- def generate_scene_background(scene: StoryScene, model_choice: str, style: str, seed: int = None) -> Image.Image:
506
- """Generate scene background without characters"""
507
-
508
- background_prompt = f"""
509
- {scene.visual} {scene.background_context},
510
- empty scene, no characters, no people, no animals,
511
- background environment, landscape, setting,
512
- children's book background, {style} style,
513
- detailed background, vibrant colors, professional illustration
514
- """
515
-
516
- # Clean up prompt
517
- background_prompt = re.sub(r'\s+', ' ', background_prompt).strip()
518
-
519
- negative_prompt = """
520
- characters, people, animals, creatures, person, human, animal,
521
- blurry, low quality, deformed objects, text, watermark,
522
- foreground elements, main subject, face, body
523
- """
524
-
525
- pipe = load_model(model_choice)
526
  if pipe is None:
527
  raise Exception("Model not available")
528
 
529
- if seed is None:
530
- seed = random.randint(1000, 9999)
531
-
532
- generator = torch.Generator(device="cpu").manual_seed(seed)
533
 
534
  image = pipe(
535
- prompt=background_prompt,
536
  negative_prompt=negative_prompt,
537
- num_inference_steps=30,
538
  guidance_scale=7.5,
539
  width=768,
540
  height=768,
541
  generator=generator
542
  ).images[0]
543
 
544
- print(f"✅ Generated background for scene")
545
  return image
546
 
547
- def create_character_mask(character_image: Image.Image, position: Tuple[int, int], size: Tuple[int, int]) -> Image.Image:
548
- """Create mask for character placement"""
549
- mask = Image.new("L", (768, 768), 0)
550
- char_resized = character_image.resize(size)
551
-
552
- # Create white mask where character will be placed
553
- mask_canvas = Image.new("L", (768, 768), 0)
554
- mask_canvas.paste(Image.new("L", size, 255), position)
555
-
556
- return mask_canvas
557
-
558
- def smart_character_placement(background: Image.Image, num_characters: int, scene_context: str) -> List[Tuple[int, int, int, int]]:
559
- """Calculate smart positions for characters based on scene context"""
560
- positions = []
561
-
562
- if num_characters == 1:
563
- # Center character
564
- positions.append((284, 300, 200, 300))
565
- elif num_characters == 2:
566
- # Two characters side by side
567
- positions.extend([
568
- (184, 300, 200, 300),
569
- (484, 300, 200, 300)
570
- ])
571
- elif num_characters >= 3:
572
- # Arrange in a grid
573
- for i in range(num_characters):
574
- x = 150 + (i % 3) * 200
575
- y = 250 + (i // 3) * 200
576
- positions.append((x, y, 180, 270))
577
-
578
- return positions
579
-
580
- def compose_scene_with_characters(background: Image.Image, character_images: Dict[str, Image.Image],
581
- characters_present: List[str], scene_context: str) -> Image.Image:
582
- """Simple composition by placing characters on background"""
583
-
584
- # Start with background
585
- final_image = background.copy()
586
-
587
- # Get character positions
588
- positions = smart_character_placement(background, len(characters_present), scene_context)
589
-
590
- for i, char_name in enumerate(characters_present):
591
- if i >= len(positions) or char_name not in character_images:
592
- continue
593
-
594
- char_image = character_images[char_name]
595
- x, y, width, height = positions[i]
596
-
597
- # Resize character
598
- char_resized = char_image.resize((width, height))
599
-
600
- # Paste character with transparency
601
- final_image.paste(char_resized, (x, y), char_resized)
602
-
603
- return final_image
604
-
605
- def compose_with_inpainting(background: Image.Image, character_images: Dict[str, Image.Image],
606
- characters_present: List[str], scene_prompt: str) -> Image.Image:
607
- """Enhanced composition using inpainting for better blending"""
608
-
609
- # Load inpainting model
610
- pipe = load_inpaint_model()
611
- if pipe is None:
612
- # Fallback to simple composition
613
- return compose_scene_with_characters(background, character_images, characters_present, scene_prompt)
614
-
615
- # Start with background
616
- composite = background.copy()
617
-
618
- # Get character positions
619
- positions = smart_character_placement(background, len(characters_present), scene_prompt)
620
-
621
- for i, char_name in enumerate(characters_present):
622
- if i >= len(positions) or char_name not in character_images:
623
- continue
624
-
625
- char_image = character_images[char_name]
626
- x, y, width, height = positions[i]
627
-
628
- # Resize character
629
- char_resized = char_image.resize((width, height))
630
-
631
- # Create temporary composite with character
632
- temp_composite = composite.copy()
633
- temp_composite.paste(char_resized, (x, y), char_resized)
634
-
635
- # Create mask for this character
636
- mask = create_character_mask(char_resized, (x, y), (width, height))
637
-
638
- # Use inpainting to blend character into background
639
- inpainting_prompt = f"""
640
- {scene_prompt}, with {char_name} naturally integrated into the scene,
641
- proper lighting and shadows, realistic composition,
642
- children's book illustration style, consistent lighting
643
- """
644
-
645
- try:
646
- # Apply inpainting
647
- inpainted_image = pipe(
648
- prompt=inpainting_prompt,
649
- image=temp_composite,
650
- mask_image=mask,
651
- num_inference_steps=20,
652
- guidance_scale=7.0,
653
- strength=0.7
654
- ).images[0]
655
-
656
- composite = inpainted_image
657
- print(f"✅ Blended {char_name} into scene with inpainting")
658
-
659
- except Exception as e:
660
- print(f"❌ Inpainting failed for {char_name}, using simple composition: {e}")
661
- composite.paste(char_resized, (x, y), char_resized)
662
-
663
- return composite
664
-
665
- # ============================================================================
666
- # OCI BUCKET FUNCTIONS
667
- # ============================================================================
668
-
669
- def save_to_oci_bucket(file_data, filename, story_title, file_type="image", subfolder=""):
670
- """Save files to OCI bucket with organized structure"""
671
  try:
672
  api_url = f"{OCI_API_BASE_URL}/api/upload"
673
 
674
- if subfolder:
675
- full_subfolder = f'stories/{story_title}/{subfolder}'
676
- else:
677
- full_subfolder = f'stories/{story_title}'
678
-
679
  mime_type = "image/png" if file_type == "image" else "text/plain"
680
  files = {'file': (filename, file_data, mime_type)}
681
  data = {
@@ -685,8 +167,6 @@ def save_to_oci_bucket(file_data, filename, story_title, file_type="image", subf
685
 
686
  response = requests.post(api_url, files=files, data=data, timeout=30)
687
 
688
- print(f"📨 OCI API Response: {response.status_code}")
689
-
690
  if response.status_code == 200:
691
  result = response.json()
692
  if result['status'] == 'success':
@@ -699,17 +179,9 @@ def save_to_oci_bucket(file_data, filename, story_title, file_type="image", subf
699
  except Exception as e:
700
  raise Exception(f"OCI upload failed: {str(e)}")
701
 
702
- # ============================================================================
703
- # JOB MANAGEMENT FUNCTIONS
704
- # ============================================================================
705
-
706
  def create_job(story_request: StorybookRequest) -> str:
707
  job_id = str(uuid.uuid4())
708
 
709
- # Process character descriptions
710
- character_templates = process_character_descriptions(story_request.characters)
711
- character_references = generate_character_reference_sheet(story_request.characters)
712
-
713
  job_storage[job_id] = {
714
  "status": JobStatus.PENDING,
715
  "progress": 0,
@@ -718,15 +190,9 @@ def create_job(story_request: StorybookRequest) -> str:
718
  "result": None,
719
  "created_at": time.time(),
720
  "updated_at": time.time(),
721
- "pages": [],
722
- "character_templates": character_templates,
723
- "character_references": character_references
724
  }
725
 
726
  print(f"📝 Created job {job_id} for story: {story_request.story_title}")
727
- print(f"👥 Processed {len(character_templates)} characters")
728
- print(f"🚀 Pipeline type: {story_request.pipeline_type}")
729
-
730
  return job_id
731
 
732
  def update_job_status(job_id: str, status: JobStatus, progress: int, message: str, result=None):
@@ -743,35 +209,21 @@ def update_job_status(job_id: str, status: JobStatus, progress: int, message: st
743
  if result:
744
  job_storage[job_id]["result"] = result
745
 
746
- # Send webhook notification if callback URL exists
747
  job_data = job_storage[job_id]
748
  request_data = job_data["request"]
749
 
750
  if request_data.get("callback_url"):
751
  try:
752
  callback_url = request_data["callback_url"]
753
-
754
  callback_data = {
755
  "job_id": job_id,
756
  "status": status.value,
757
  "progress": progress,
758
  "message": message,
759
  "story_title": request_data["story_title"],
760
- "total_scenes": len(request_data["scenes"]),
761
- "total_characters": len(request_data["characters"]),
762
- "pipeline_type": request_data.get("pipeline_type", "standard"),
763
  "timestamp": time.time(),
764
- "source": "huggingface-storybook-generator"
765
  }
766
 
767
- if status == JobStatus.COMPLETED and result:
768
- callback_data["result"] = {
769
- "total_pages": result.get("total_pages", 0),
770
- "generation_time": result.get("generation_time", 0),
771
- "pipeline_used": result.get("pipeline_used", "standard"),
772
- "consistency_level": result.get("consistency_level", "good")
773
- }
774
-
775
  headers = {'Content-Type': 'application/json'}
776
  response = requests.post(callback_url, json=callback_data, headers=headers, timeout=30)
777
  print(f"📢 Callback sent: Status {response.status_code}")
@@ -781,60 +233,39 @@ def update_job_status(job_id: str, status: JobStatus, progress: int, message: st
781
 
782
  return True
783
 
784
- # ============================================================================
785
- # BACKGROUND TASKS - BOTH PIPELINES
786
- # ============================================================================
787
-
788
- def generate_storybook_standard(job_id: str):
789
- """Original standard pipeline background task"""
790
  try:
791
  job_data = job_storage[job_id]
792
  story_request_data = job_data["request"]
793
  story_request = StorybookRequest(**story_request_data)
794
- character_templates = job_data["character_templates"]
795
 
796
- print(f"🎬 Starting STANDARD pipeline for job {job_id}")
797
- print(f"📖 Story: {story_request.story_title}")
798
 
799
- update_job_status(job_id, JobStatus.PROCESSING, 5, "Starting standard storybook generation...")
800
 
801
  total_scenes = len(story_request.scenes)
802
  generated_pages = []
803
  start_time = time.time()
804
 
805
  for i, scene in enumerate(story_request.scenes):
806
- progress = 5 + int((i / total_scenes) * 90)
807
-
808
- # Extract characters for this scene
809
- characters_present = []
810
- if hasattr(scene, 'characters_present') and scene.characters_present:
811
- characters_present = scene.characters_present
812
- else:
813
- available_chars = [char.name for char in story_request.characters]
814
- characters_present = extract_characters_from_visual(scene.visual, available_chars)
815
-
816
- update_job_status(
817
- job_id,
818
- JobStatus.PROCESSING,
819
- progress,
820
- f"Generating page {i+1}/{total_scenes} with {len(characters_present)} characters..."
821
- )
822
 
823
  try:
824
- print(f"🖼️ Generating page {i+1} with characters: {characters_present}")
825
 
826
- # Generate consistent image
827
- image = generate_consistent_image(
828
- scene.visual,
829
- story_request.model_choice,
830
- story_request.style,
831
- characters_present,
832
- character_templates,
833
- i + 1,
834
  story_request.consistency_seed
835
  )
836
 
837
- # Save to OCI bucket
838
  img_bytes = io.BytesIO()
839
  image.save(img_bytes, format='PNG')
840
  image_url = save_to_oci_bucket(
@@ -856,9 +287,6 @@ def generate_storybook_standard(job_id: str):
856
  "page_number": i + 1,
857
  "image_url": image_url,
858
  "text_url": text_url,
859
- "text_content": scene.text,
860
- "visual_description": scene.visual,
861
- "characters_present": characters_present
862
  }
863
  generated_pages.append(page_data)
864
 
@@ -870,18 +298,12 @@ def generate_storybook_standard(job_id: str):
870
  update_job_status(job_id, JobStatus.FAILED, 0, error_msg)
871
  return
872
 
873
- # Complete the job
874
  generation_time = time.time() - start_time
875
 
876
  result = {
877
  "story_title": story_request.story_title,
878
  "total_pages": total_scenes,
879
- "characters_used": len(story_request.characters),
880
- "generated_pages": len(generated_pages),
881
  "generation_time": round(generation_time, 2),
882
- "folder_path": f"stories/{story_request.story_title}",
883
- "pipeline_used": "standard",
884
- "consistency_level": "good",
885
  "pages": generated_pages
886
  }
887
 
@@ -889,372 +311,48 @@ def generate_storybook_standard(job_id: str):
889
  job_id,
890
  JobStatus.COMPLETED,
891
  100,
892
- f"🎉 Standard pipeline completed! {len(generated_pages)} pages in {generation_time:.2f}s.",
893
- result
894
- )
895
-
896
- print(f"🎉 STANDARD pipeline finished for job {job_id}")
897
-
898
- except Exception as e:
899
- error_msg = f"Standard pipeline failed: {str(e)}"
900
- print(f"❌ {error_msg}")
901
- update_job_status(job_id, JobStatus.FAILED, 0, error_msg)
902
-
903
- def generate_storybook_enhanced(job_id: str):
904
- """Enhanced 3-stage pipeline background task"""
905
- try:
906
- job_data = job_storage[job_id]
907
- story_request_data = job_data["request"]
908
- story_request = StorybookRequest(**story_request_data)
909
-
910
- print(f"🎬 Starting ENHANCED 3-stage pipeline for: {story_request.story_title}")
911
- print(f"👥 Characters: {len(story_request.characters)}")
912
- print(f"📄 Scenes: {len(story_request.scenes)}")
913
-
914
- # STAGE 1: Generate Characters
915
- update_job_status(job_id, JobStatus.GENERATING_CHARACTERS, 10, "Generating character images...")
916
-
917
- character_images = {}
918
- for i, character in enumerate(story_request.characters):
919
- progress = 10 + int((i / len(story_request.characters)) * 30)
920
- update_job_status(job_id, JobStatus.GENERATING_CHARACTERS, progress, f"Generating {character.name}...")
921
-
922
- try:
923
- char_image = generate_character_image(
924
- character,
925
- story_request.model_choice,
926
- story_request.style,
927
- story_request.consistency_seed
928
- )
929
-
930
- # Save character to OCI
931
- img_bytes = io.BytesIO()
932
- char_image.save(img_bytes, format='PNG')
933
- char_url = save_to_oci_bucket(
934
- img_bytes.getvalue(),
935
- f"character_{character.name.replace(' ', '_')}.png",
936
- story_request.story_title,
937
- "image",
938
- "characters"
939
- )
940
-
941
- character_images[character.name] = char_image
942
- print(f"✅ Saved character: {character.name}")
943
-
944
- except Exception as e:
945
- print(f"❌ Failed to generate {character.name}: {e}")
946
- raise
947
-
948
- # STAGE 2: Generate Backgrounds
949
- update_job_status(job_id, JobStatus.GENERATING_BACKGROUNDS, 40, "Generating scene backgrounds...")
950
-
951
- background_images = []
952
- for i, scene in enumerate(story_request.scenes):
953
- progress = 40 + int((i / len(story_request.scenes)) * 30)
954
- update_job_status(job_id, JobStatus.GENERATING_BACKGROUNDS, progress, f"Generating background {i+1}...")
955
-
956
- try:
957
- bg_image = generate_scene_background(
958
- scene,
959
- story_request.model_choice,
960
- story_request.style,
961
- (story_request.consistency_seed or 42) + i + 1000
962
- )
963
-
964
- # Save background to OCI
965
- img_bytes = io.BytesIO()
966
- bg_image.save(img_bytes, format='PNG')
967
- bg_url = save_to_oci_bucket(
968
- img_bytes.getvalue(),
969
- f"background_scene_{i+1:03d}.png",
970
- story_request.story_title,
971
- "image",
972
- "backgrounds"
973
- )
974
-
975
- background_images.append(bg_image)
976
- print(f"✅ Saved background for scene {i+1}")
977
-
978
- except Exception as e:
979
- print(f"❌ Failed to generate background {i+1}: {e}")
980
- raise
981
-
982
- # STAGE 3: Compose Final Scenes
983
- update_job_status(job_id, JobStatus.COMPOSING_SCENES, 70, "Composing final scenes...")
984
-
985
- final_pages = []
986
- start_time = time.time()
987
-
988
- for i, (scene, background) in enumerate(zip(story_request.scenes, background_images)):
989
- progress = 70 + int((i / len(story_request.scenes)) * 25)
990
- update_job_status(job_id, JobStatus.COMPOSING_SCENES, progress, f"Composing scene {i+1}...")
991
-
992
- try:
993
- # Get characters for this scene
994
- scene_characters = scene.characters_present if scene.characters_present else []
995
- characters_in_scene = {name: character_images[name] for name in scene_characters if name in character_images}
996
-
997
- # Compose final image
998
- final_image = compose_with_inpainting(
999
- background,
1000
- characters_in_scene,
1001
- scene_characters,
1002
- scene.visual
1003
- )
1004
-
1005
- # Save final image to OCI
1006
- img_bytes = io.BytesIO()
1007
- final_image.save(img_bytes, format='PNG')
1008
- final_url = save_to_oci_bucket(
1009
- img_bytes.getvalue(),
1010
- f"page_{i+1:03d}.png",
1011
- story_request.story_title,
1012
- "image",
1013
- "final"
1014
- )
1015
-
1016
- # Save text
1017
- text_url = save_to_oci_bucket(
1018
- scene.text.encode('utf-8'),
1019
- f"page_{i+1:03d}.txt",
1020
- story_request.story_title,
1021
- "text",
1022
- "text"
1023
- )
1024
-
1025
- final_pages.append({
1026
- "page_number": i + 1,
1027
- "image_url": final_url,
1028
- "text_url": text_url,
1029
- "text_content": scene.text,
1030
- "characters_present": scene_characters
1031
- })
1032
-
1033
- print(f"✅ Composed final scene {i+1}")
1034
-
1035
- except Exception as e:
1036
- print(f"❌ Failed to compose scene {i+1}: {e}")
1037
- raise
1038
-
1039
- # Complete job
1040
- generation_time = time.time() - start_time
1041
-
1042
- result = {
1043
- "story_title": story_request.story_title,
1044
- "total_pages": len(final_pages),
1045
- "characters_generated": len(character_images),
1046
- "backgrounds_generated": len(background_images),
1047
- "final_pages": len(final_pages),
1048
- "generation_time": round(generation_time, 2),
1049
- "pipeline_used": "enhanced",
1050
- "consistency_level": "perfect",
1051
- "folder_structure": {
1052
- "characters": f"stories/{story_request.story_title}/characters/",
1053
- "backgrounds": f"stories/{story_request.story_title}/backgrounds/",
1054
- "final": f"stories/{story_request.story_title}/final/",
1055
- "text": f"stories/{story_request.story_title}/text/"
1056
- },
1057
- "pages": final_pages
1058
- }
1059
-
1060
- update_job_status(
1061
- job_id,
1062
- JobStatus.COMPLETED,
1063
- 100,
1064
- f"🎉 Enhanced pipeline complete! {len(final_pages)} pages with perfect consistency in {generation_time:.2f}s",
1065
  result
1066
  )
1067
 
1068
- print(f"🎉 ENHANCED pipeline completed for job {job_id}")
1069
 
1070
  except Exception as e:
1071
- error_msg = f"Enhanced pipeline failed: {str(e)}"
1072
  print(f"❌ {error_msg}")
1073
  update_job_status(job_id, JobStatus.FAILED, 0, error_msg)
1074
 
1075
- def generate_storybook_dispatcher(job_id: str):
1076
- """Choose between standard or enhanced pipeline"""
1077
- job_data = job_storage[job_id]
1078
- story_request_data = job_data["request"]
1079
-
1080
- pipeline_type = story_request_data.get("pipeline_type", "standard")
1081
-
1082
- if pipeline_type == "enhanced":
1083
- generate_storybook_enhanced(job_id)
1084
- else:
1085
- generate_storybook_standard(job_id)
1086
-
1087
- # ============================================================================
1088
- # LOCAL FILE MANAGEMENT FUNCTIONS
1089
- # ============================================================================
1090
-
1091
- def save_image_to_local(image, prompt, style="test"):
1092
- """Save image to local persistent storage"""
1093
- try:
1094
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
1095
- safe_prompt = "".join(c for c in prompt[:50] if c.isalnum() or c in (' ', '-', '_')).rstrip()
1096
- filename = f"image_{safe_prompt}_{timestamp}.png"
1097
-
1098
- # Create style subfolder
1099
- style_dir = os.path.join(PERSISTENT_IMAGE_DIR, style)
1100
- os.makedirs(style_dir, exist_ok=True)
1101
- filepath = os.path.join(style_dir, filename)
1102
-
1103
- # Save the image
1104
- image.save(filepath)
1105
- print(f"💾 Image saved locally: {filepath}")
1106
-
1107
- return filepath, filename
1108
-
1109
- except Exception as e:
1110
- print(f"❌ Failed to save locally: {e}")
1111
- return None, None
1112
-
1113
- def delete_local_image(filepath):
1114
- """Delete an image from local storage"""
1115
- try:
1116
- if os.path.exists(filepath):
1117
- os.remove(filepath)
1118
- print(f"🗑️ Deleted local image: {filepath}")
1119
- return True, f"✅ Deleted: {os.path.basename(filepath)}"
1120
- else:
1121
- return False, f"❌ File not found: {filepath}"
1122
- except Exception as e:
1123
- return False, f"❌ Error deleting: {str(e)}"
1124
-
1125
- def get_local_storage_info():
1126
- """Get information about local storage usage"""
1127
- try:
1128
- total_size = 0
1129
- file_count = 0
1130
- images_list = []
1131
-
1132
- for root, dirs, files in os.walk(PERSISTENT_IMAGE_DIR):
1133
- for file in files:
1134
- if file.endswith(('.png', '.jpg', '.jpeg')):
1135
- filepath = os.path.join(root, file)
1136
- if os.path.exists(filepath):
1137
- file_size = os.path.getsize(filepath)
1138
- total_size += file_size
1139
- file_count += 1
1140
- images_list.append({
1141
- 'path': filepath,
1142
- 'filename': file,
1143
- 'size_kb': round(file_size / 1024, 1),
1144
- 'created': os.path.getctime(filepath)
1145
- })
1146
-
1147
- return {
1148
- "total_files": file_count,
1149
- "total_size_mb": round(total_size / (1024 * 1024), 2),
1150
- "images": sorted(images_list, key=lambda x: x['created'], reverse=True)
1151
- }
1152
- except Exception as e:
1153
- return {"error": str(e)}
1154
-
1155
- def refresh_local_images():
1156
- """Get list of all locally saved images"""
1157
- try:
1158
- image_files = []
1159
- for root, dirs, files in os.walk(PERSISTENT_IMAGE_DIR):
1160
- for file in files:
1161
- if file.endswith(('.png', '.jpg', '.jpeg')):
1162
- filepath = os.path.join(root, file)
1163
- if os.path.exists(filepath):
1164
- image_files.append(filepath)
1165
- return image_files
1166
- except Exception as e:
1167
- print(f"Error refreshing local images: {e}")
1168
- return []
1169
-
1170
- def delete_current_image(filepath):
1171
- """Delete the currently displayed image"""
1172
- if not filepath:
1173
- return "❌ No image to delete", None, None, refresh_local_images()
1174
-
1175
- success, message = delete_local_image(filepath)
1176
- updated_files = refresh_local_images()
1177
-
1178
- if success:
1179
- status_msg = f"✅ {message}"
1180
- return status_msg, None, "Image deleted successfully!", updated_files
1181
- else:
1182
- return f"❌ {message}", None, "Delete failed", updated_files
1183
-
1184
- def clear_all_images():
1185
- """Delete all local images"""
1186
- try:
1187
- storage_info = get_local_storage_info()
1188
- deleted_count = 0
1189
-
1190
- if "images" in storage_info:
1191
- for image_info in storage_info["images"]:
1192
- success, _ = delete_local_image(image_info["path"])
1193
- if success:
1194
- deleted_count += 1
1195
-
1196
- updated_files = refresh_local_images()
1197
- return f"✅ Deleted {deleted_count} images", updated_files
1198
- except Exception as e:
1199
- return f"❌ Error: {str(e)}", refresh_local_images()
1200
-
1201
- # ============================================================================
1202
- # FASTAPI ENDPOINTS
1203
- # ============================================================================
1204
-
1205
  @app.post("/api/generate-storybook")
1206
- async def generate_storybook_unified(request: dict, background_tasks: BackgroundTasks):
1207
- """Unified endpoint that handles both pipelines"""
1208
  try:
1209
  print(f"📥 Received storybook request: {request.get('story_title', 'Unknown')}")
1210
 
1211
- # Add consistency seed if not provided
1212
  if 'consistency_seed' not in request or not request['consistency_seed']:
1213
  request['consistency_seed'] = random.randint(1000, 9999)
1214
- print(f"🌱 Generated consistency seed: {request['consistency_seed']}")
1215
 
1216
- # Ensure characters have required fields
1217
- if 'characters' in request:
1218
- for char in request['characters']:
1219
- if 'visual_prompt' not in char or not char['visual_prompt']:
1220
- char['visual_prompt'] = ""
1221
- if 'key_features' not in char:
1222
- char['key_features'] = []
1223
 
1224
- # Convert to Pydantic model
1225
  story_request = StorybookRequest(**request)
1226
 
1227
- # Validate required fields
1228
  if not story_request.story_title or not story_request.scenes:
1229
  raise HTTPException(status_code=400, detail="story_title and scenes are required")
1230
 
1231
- # Create job
1232
  job_id = create_job(story_request)
1233
-
1234
- # Start background processing
1235
- background_tasks.add_task(generate_storybook_dispatcher, job_id)
1236
-
1237
- # Immediate response
1238
- pipeline_type = story_request.pipeline_type
1239
- estimated_time = "2-3 minutes" if pipeline_type == "standard" else "5-8 minutes"
1240
- consistency_level = "good" if pipeline_type == "standard" else "perfect"
1241
 
1242
  response_data = {
1243
  "status": "success",
1244
- "message": f"Storybook generation started with {pipeline_type} pipeline",
1245
  "job_id": job_id,
1246
  "story_title": story_request.story_title,
1247
  "total_scenes": len(story_request.scenes),
1248
- "total_characters": len(story_request.characters),
1249
- "pipeline_type": pipeline_type,
1250
- "estimated_time": estimated_time,
1251
- "expected_consistency": consistency_level,
1252
- "consistency_seed": story_request.consistency_seed,
1253
- "callback_url": story_request.callback_url,
1254
  "timestamp": datetime.now().isoformat()
1255
  }
1256
 
1257
- print(f"✅ Job {job_id} started with {pipeline_type} pipeline")
1258
 
1259
  return response_data
1260
 
@@ -1265,7 +363,6 @@ async def generate_storybook_unified(request: dict, background_tasks: Background
1265
 
1266
  @app.get("/api/job-status/{job_id}")
1267
  async def get_job_status_endpoint(job_id: str):
1268
- """Check job status"""
1269
  job_data = job_storage.get(job_id)
1270
  if not job_data:
1271
  raise HTTPException(status_code=404, detail="Job not found")
@@ -1282,364 +379,49 @@ async def get_job_status_endpoint(job_id: str):
1282
 
1283
  @app.get("/api/health")
1284
  async def api_health():
1285
- """Health check endpoint for n8n"""
1286
  return {
1287
  "status": "healthy",
1288
- "service": "dual-pipeline-storybook-generator",
1289
  "timestamp": datetime.now().isoformat(),
1290
  "active_jobs": len(job_storage),
1291
- "models_loaded": list(model_cache.keys()),
1292
- "inpaint_model_ready": inpaint_pipe is not None,
1293
- "pipelines_available": ["standard", "enhanced"],
1294
- "fallback_templates": list(FALLBACK_CHARACTER_TEMPLATES.keys()),
1295
  "oci_api_connected": OCI_API_BASE_URL
1296
  }
1297
 
1298
- @app.get("/api/system-status")
1299
- async def system_status():
1300
- """Comprehensive system status"""
1301
- active_jobs = len([job for job in job_storage.values() if job["status"] in ["processing", "pending"]])
1302
-
1303
- return {
1304
- "status": "healthy",
1305
- "active_jobs": active_jobs,
1306
- "models_loaded": list(model_cache.keys()),
1307
- "inpaint_ready": inpaint_pipe is not None,
1308
- "pipelines_available": ["standard", "enhanced"],
1309
- "storage_available": True,
1310
- "timestamp": datetime.now().isoformat()
1311
- }
1312
-
1313
- @app.get("/api/local-images")
1314
- async def get_local_images():
1315
- """API endpoint to get locally saved test images"""
1316
- storage_info = get_local_storage_info()
1317
- return storage_info
1318
-
1319
- @app.delete("/api/local-images/{filename:path}")
1320
- async def delete_local_image_api(filename: str):
1321
- """API endpoint to delete a local image"""
1322
- try:
1323
- filepath = os.path.join(PERSISTENT_IMAGE_DIR, filename)
1324
- success, message = delete_local_image(filepath)
1325
- return {"status": "success" if success else "error", "message": message}
1326
- except Exception as e:
1327
- return {"status": "error", "message": str(e)}
1328
-
1329
- # ============================================================================
1330
- # GRADIO INTERFACE
1331
- # ============================================================================
1332
-
1333
- def create_dual_pipeline_gradio_interface():
1334
- """Create Gradio interface with both pipeline options"""
1335
-
1336
- def generate_test_image(prompt, model_choice, style_choice, pipeline_type, character_names_text):
1337
- """Generate a single image for testing"""
1338
- try:
1339
- if not prompt.strip():
1340
- return None, "❌ Please enter a prompt", None
1341
-
1342
- # Parse character names
1343
- character_names = [name.strip() for name in character_names_text.split(",") if name.strip()]
1344
-
1345
- print(f"🎨 Generating test image with {pipeline_type} pipeline")
1346
- print(f"👥 Character names: {character_names}")
1347
-
1348
- if pipeline_type == "enhanced" and character_names:
1349
- # Test enhanced pipeline with character generation
1350
- character_templates = {}
1351
- for char_name in character_names:
1352
- character_templates[char_name] = {
1353
- "visual_prompt": f"{char_name}, distinctive appearance",
1354
- "key_features": ["consistent appearance"],
1355
- "consistency_keywords": f"consistent {char_name}"
1356
- }
1357
-
1358
- # For testing, we'll just use the standard generation
1359
- enhanced_prompt, negative_prompt = enhance_prompt_with_characters(
1360
- prompt, character_names, character_templates, style_choice, 1
1361
- )
1362
-
1363
- image = generate_consistent_image(
1364
- prompt,
1365
- model_choice,
1366
- style_choice,
1367
- character_names,
1368
- character_templates,
1369
- 1
1370
- )
1371
- else:
1372
- # Standard generation
1373
- pipe = load_model(model_choice)
1374
- if pipe is None:
1375
- return None, "❌ Model not available", None
1376
-
1377
- image = pipe(
1378
- prompt=prompt,
1379
- num_inference_steps=30,
1380
- guidance_scale=7.5,
1381
- width=768,
1382
- height=768,
1383
- ).images[0]
1384
-
1385
- # Save to local storage
1386
- filepath, filename = save_image_to_local(image, prompt, style_choice)
1387
-
1388
- character_info = f"👥 Characters: {', '.join(character_names)}" if character_names else "👥 No specific characters"
1389
- pipeline_info = f"🚀 Pipeline: {pipeline_type.upper()}"
1390
-
1391
- status_msg = f"""✅ Success! Generated: {prompt}
1392
-
1393
- {character_info}
1394
- {pipeline_info}
1395
-
1396
- 📁 **Local file:** {filename if filename else 'Not saved'}"""
1397
-
1398
- return image, status_msg, filepath
1399
-
1400
- except Exception as e:
1401
- error_msg = f"❌ Generation failed: {str(e)}"
1402
- print(error_msg)
1403
- return None, error_msg, None
1404
-
1405
- with gr.Blocks(title="Dual-Pipeline Storybook Generator", theme="soft") as demo:
1406
- gr.Markdown("# 🎨 Dual-Pipeline Storybook Generator")
1407
- gr.Markdown("Choose between **Standard** (fast) or **Enhanced** (perfect consistency) pipeline")
1408
-
1409
- # Storage info display
1410
- storage_info = gr.Textbox(
1411
- label="📊 Local Storage Information",
1412
- interactive=False,
1413
- lines=2
1414
- )
1415
-
1416
- def update_storage_info():
1417
- info = get_local_storage_info()
1418
- if "error" not in info:
1419
- return f"📁 Local Storage: {info['total_files']} images, {info['total_size_mb']} MB used"
1420
- return "📁 Local Storage: Unable to calculate"
1421
 
1422
  with gr.Row():
1423
- with gr.Column(scale=1):
1424
- gr.Markdown("### ⚙️ Pipeline Selection")
1425
-
1426
- pipeline_radio = gr.Radio(
1427
- choices=["standard", "enhanced"],
1428
- value="standard",
1429
- label="Generation Pipeline",
1430
- info="Standard: Faster | Enhanced: Perfect character consistency"
1431
- )
1432
-
1433
- gr.Markdown("#### 🚀 Standard Pipeline")
1434
- gr.Markdown("- Faster generation (2-3 minutes)")
1435
- gr.Markdown("- Good character consistency")
1436
- gr.Markdown("- Single-pass generation")
1437
-
1438
- gr.Markdown("#### 🎯 Enhanced Pipeline")
1439
- gr.Markdown("- Perfect character consistency")
1440
- gr.Markdown("- Better prompt understanding")
1441
- gr.Markdown("- 3-stage process (5-8 minutes)")
1442
-
1443
- gr.Markdown("### 🎯 Quality Settings")
1444
-
1445
- model_dropdown = gr.Dropdown(
1446
- label="AI Model",
1447
- choices=list(MODEL_CHOICES.keys()),
1448
- value="dreamshaper-8"
1449
- )
1450
-
1451
- style_dropdown = gr.Dropdown(
1452
- label="Art Style",
1453
- choices=["childrens_book", "realistic", "fantasy", "anime"],
1454
- value="childrens_book"
1455
- )
1456
-
1457
- character_names_input = gr.Textbox(
1458
- label="Character Names (comma-separated)",
1459
- placeholder="Enter character names: Sparkle the Star Cat, Benny the Bunny, Tilly the Turtle",
1460
- info="Enter character names to test consistency features",
1461
- lines=2
1462
- )
1463
-
1464
- prompt_input = gr.Textbox(
1465
- label="Scene Description",
1466
- placeholder="Describe your scene with character interactions...",
1467
- lines=3
1468
- )
1469
-
1470
- generate_btn = gr.Button("✨ Generate Test Image", variant="primary")
1471
-
1472
- # Current image management
1473
- current_file_path = gr.State()
1474
- delete_btn = gr.Button("🗑️ Delete This Image", variant="stop")
1475
- delete_status = gr.Textbox(label="Delete Status", interactive=False, lines=2)
1476
-
1477
- with gr.Column(scale=2):
1478
- image_output = gr.Image(label="Generated Image", height=500, show_download_button=True)
1479
- status_output = gr.Textbox(label="Status", interactive=False, lines=4)
1480
-
1481
- # Pipeline comparison section
1482
- with gr.Accordion("📊 Pipeline Comparison", open=False):
1483
- gr.Markdown("""
1484
- | Feature | Standard Pipeline | Enhanced Pipeline |
1485
- |---------|-------------------|-------------------|
1486
- | **Speed** | 🚀 Fast (2-3 min) | 🐢 Slower (5-8 min) |
1487
- | **Consistency** | ✅ Good (80-90%) | 🎯 Perfect (100%) |
1488
- | **Prompt Understanding** | 👍 Good | 🎨 Excellent |
1489
- | **Best For** | Quick stories, testing | Final production, critical stories |
1490
- | **Storage** | Single folder | Organized subfolders |
1491
- """)
1492
-
1493
- # API usage section
1494
- with gr.Accordion("📚 API Usage for n8n", open=False):
1495
- gr.Markdown("""
1496
- **For complete storybooks (OCI bucket):**
1497
- - Endpoint: `POST /api/generate-storybook`
1498
- - Add `"pipeline_type": "enhanced"` for perfect consistency
1499
- - Add `"pipeline_type": "standard"` for faster generation
1500
-
1501
- **Example Enhanced Pipeline Payload:**
1502
- ```json
1503
- {
1504
- "story_title": "Magical Adventure",
1505
- "pipeline_type": "enhanced",
1506
- "characters": [
1507
- {
1508
- "name": "Sparkle the Star Cat",
1509
- "description": "A magical kitten with star-shaped spots"
1510
- }
1511
- ],
1512
- "scenes": [
1513
- {
1514
- "visual": "Sparkle discovering a magical portal",
1515
- "text": "Once upon a time...",
1516
- "characters_present": ["Sparkle the Star Cat"]
1517
- }
1518
- ]
1519
- }
1520
- ```
1521
- """)
1522
-
1523
- # Local file management section
1524
- with gr.Accordion("📁 Manage Local Test Images", open=True):
1525
- gr.Markdown("### Locally Saved Images")
1526
-
1527
- with gr.Row():
1528
- refresh_btn = gr.Button("🔄 Refresh List")
1529
- clear_all_btn = gr.Button("🗑️ Clear All Images", variant="stop")
1530
-
1531
- file_gallery = gr.Gallery(
1532
- label="Local Images",
1533
- show_label=True,
1534
- elem_id="gallery",
1535
- columns=4,
1536
- height="auto"
1537
- )
1538
-
1539
- clear_status = gr.Textbox(label="Clear Status", interactive=False)
1540
-
1541
- # System status section
1542
- with gr.Accordion("🔧 System Status", open=False):
1543
- debug_btn = gr.Button("🔄 Check System Status", variant="secondary")
1544
- debug_output = gr.Textbox(label="System Info", interactive=False, lines=4)
1545
-
1546
- def check_system_status():
1547
- """Check system status"""
1548
- active_jobs = len(job_storage)
1549
- return f"""**System Status:**
1550
- - Model: {current_model_name}
1551
- - Pipelines: Standard ✅ | Enhanced ✅
1552
- - Inpainting Model: {"✅ Ready" if inpaint_pipe else "❌ Not available"}
1553
- - Local Storage: {get_local_storage_info().get('total_files', 0)} images
1554
- - Active Jobs: {active_jobs}
1555
- - Ready for dual-pipeline generation!"""
1556
-
1557
- # Connect buttons to functions
1558
- generate_btn.click(
1559
- fn=generate_test_image,
1560
- inputs=[prompt_input, model_dropdown, style_dropdown, pipeline_radio, character_names_input],
1561
- outputs=[image_output, status_output, current_file_path]
1562
- ).then(
1563
- fn=refresh_local_images,
1564
- outputs=file_gallery
1565
- ).then(
1566
- fn=update_storage_info,
1567
- outputs=storage_info
1568
- )
1569
-
1570
- delete_btn.click(
1571
- fn=delete_current_image,
1572
- inputs=current_file_path,
1573
- outputs=[delete_status, image_output, status_output, file_gallery]
1574
- ).then(
1575
- fn=update_storage_info,
1576
- outputs=storage_info
1577
- )
1578
 
1579
- refresh_btn.click(
1580
- fn=refresh_local_images,
1581
- outputs=file_gallery
1582
- ).then(
1583
- fn=update_storage_info,
1584
- outputs=storage_info
1585
- )
1586
-
1587
- clear_all_btn.click(
1588
- fn=clear_all_images,
1589
- outputs=[clear_status, file_gallery]
1590
- ).then(
1591
- fn=update_storage_info,
1592
- outputs=storage_info
1593
- )
1594
-
1595
- debug_btn.click(
1596
- fn=check_system_status,
1597
- inputs=None,
1598
- outputs=debug_output
1599
- )
1600
 
1601
- # Initialize on load
1602
- demo.load(fn=refresh_local_images, outputs=file_gallery)
1603
- demo.load(fn=update_storage_info, outputs=storage_info)
1604
 
1605
  return demo
1606
 
1607
- # Create enhanced Gradio app
1608
- demo = create_dual_pipeline_gradio_interface()
1609
 
1610
- # Enhanced root endpoint
1611
  @app.get("/")
1612
  async def root():
1613
  return {
1614
- "message": "Dual-Pipeline Storybook Generator API is running!",
1615
- "pipelines": {
1616
- "standard": "Fast generation with good consistency",
1617
- "enhanced": "Slower generation with perfect consistency"
1618
- },
1619
- "api_endpoints": {
1620
- "generate_storybook": "POST /api/generate-storybook",
1621
- "job_status": "GET /api/job-status/{job_id}",
1622
- "health": "GET /api/health",
1623
- "system_status": "GET /api/system-status"
1624
- },
1625
- "web_interface": "GET /ui",
1626
- "note": "Add 'pipeline_type': 'enhanced' to your request for perfect character consistency"
1627
  }
1628
 
1629
- # Add a test endpoint
1630
- @app.get("/api/test")
1631
- async def test_endpoint():
1632
- return {
1633
- "status": "success",
1634
- "message": "Dual-pipeline API is working correctly",
1635
- "pipelines": {
1636
- "standard": "✅ Available",
1637
- "enhanced": "✅ Available"
1638
- },
1639
- "timestamp": datetime.now().isoformat()
1640
- }
1641
-
1642
- # For Hugging Face Spaces deployment
1643
  def get_app():
1644
  return app
1645
 
@@ -1647,66 +429,12 @@ if __name__ == "__main__":
1647
  import uvicorn
1648
  import os
1649
 
1650
- # Check if we're running on Hugging Face Spaces
1651
  HF_SPACE = os.environ.get('SPACE_ID') is not None
1652
 
1653
  if HF_SPACE:
1654
- print("🚀 Running on Hugging Face Spaces - Integrated Mode")
1655
- print("📚 API endpoints available at: /api/*")
1656
- print("🎨 Web interface available at: /ui")
1657
- print("🔧 Dual-pipeline system: Standard ✅ | Enhanced ✅")
1658
- print("👥 Perfect character consistency available with enhanced pipeline")
1659
-
1660
- # Mount Gradio without reassigning app
1661
  gr.mount_gradio_app(app, demo, path="/ui")
1662
-
1663
- # Run the combined app
1664
- uvicorn.run(
1665
- app,
1666
- host="0.0.0.0",
1667
- port=7860,
1668
- log_level="info"
1669
- )
1670
  else:
1671
- # Local development - run separate servers
1672
- print("🚀 Running locally - Separate API and UI servers")
1673
- print("📚 API endpoints: http://localhost:8000/api/*")
1674
- print("🎨 Web interface: http://localhost:7860/ui")
1675
- print("🔧 Dual-pipeline system: Standard ✅ | Enhanced ✅")
1676
-
1677
- def run_fastapi():
1678
- """Run FastAPI on port 8000 for API calls"""
1679
- uvicorn.run(
1680
- app,
1681
- host="0.0.0.0",
1682
- port=8000,
1683
- log_level="info",
1684
- access_log=False
1685
- )
1686
-
1687
- def run_gradio():
1688
- """Run Gradio on port 7860 for web interface"""
1689
- demo.launch(
1690
- server_name="0.0.0.0",
1691
- server_port=7860,
1692
- share=False,
1693
- show_error=True,
1694
- quiet=True
1695
- )
1696
-
1697
- # Start both servers in separate threads
1698
- api_thread = threading.Thread(target=run_fastapi, daemon=True)
1699
- ui_thread = threading.Thread(target=run_gradio, daemon=True)
1700
-
1701
- api_thread.start()
1702
- print("✅ FastAPI server started on port 8000")
1703
-
1704
- ui_thread.start()
1705
- print("✅ Gradio server started on port 7860")
1706
-
1707
- # Keep the main thread alive
1708
- try:
1709
- while True:
1710
- time.sleep(1)
1711
- except KeyboardInterrupt:
1712
- print("🛑 Shutting down servers...")
 
1
  import gradio as gr
2
  import torch
3
+ from diffusers import StableDiffusionPipeline, EulerAncestralDiscreteScheduler
4
+ from PIL import Image
5
  import io
6
  import requests
7
  import os
 
9
  import re
10
  import time
11
  import json
12
+ from typing import List, Optional, Dict
13
  from fastapi import FastAPI, HTTPException, BackgroundTasks
14
  from pydantic import BaseModel
 
 
15
  import threading
16
  import uuid
 
 
17
  import random
 
18
 
19
+ # External OCI API URL
20
  OCI_API_BASE_URL = "https://yukee1992-oci-story-book.hf.space"
21
 
22
+ # Create local directories
23
  PERSISTENT_IMAGE_DIR = "generated_test_images"
24
  os.makedirs(PERSISTENT_IMAGE_DIR, exist_ok=True)
25
  print(f"📁 Created local image directory: {PERSISTENT_IMAGE_DIR}")
26
 
27
  # Initialize FastAPI app
28
+ app = FastAPI(title="Storybook Generator API")
29
 
 
30
  from fastapi.middleware.cors import CORSMiddleware
31
  app.add_middleware(
32
  CORSMiddleware,
 
36
  allow_headers=["*"],
37
  )
38
 
 
39
  class JobStatus(str, Enum):
40
  PENDING = "pending"
 
 
 
41
  PROCESSING = "processing"
42
  COMPLETED = "completed"
43
  FAILED = "failed"
44
 
 
45
  class StoryScene(BaseModel):
46
  visual: str
47
  text: str
48
+ characters_present: List[str] = []
 
 
49
 
50
  class CharacterDescription(BaseModel):
51
  name: str
52
  description: str
53
+ visual_prompt: str = ""
54
+ key_features: List[str] = []
 
55
 
56
  class StorybookRequest(BaseModel):
57
  story_title: str
58
  scenes: List[StoryScene]
59
  characters: List[CharacterDescription] = []
60
+ model_choice: str = "sd-1.5"
61
  style: str = "childrens_book"
62
  callback_url: Optional[str] = None
63
  consistency_seed: Optional[int] = None
 
64
 
65
  class JobStatusResponse(BaseModel):
66
  job_id: str
 
71
  created_at: float
72
  updated_at: float
73
 
74
+ # SIMPLE MODEL CHOICES - Only working models
75
  MODEL_CHOICES = {
 
 
 
 
76
  "sd-1.5": "runwayml/stable-diffusion-v1-5",
77
  }
78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  job_storage = {}
80
  model_cache = {}
 
81
  current_model_name = None
82
  current_pipe = None
83
  model_lock = threading.Lock()
84
 
85
+ def load_model(model_name="sd-1.5"):
86
+ """EXACT WORKING VERSION FROM OLD SCRIPT"""
87
  global model_cache, current_model_name, current_pipe
88
 
89
  with model_lock:
 
92
  current_model_name = model_name
93
  return current_pipe
94
 
95
+ print(f"🔄 Loading model: {model_name}")
96
  try:
97
+ model_id = MODEL_CHOICES.get(model_name, "runwayml/stable-diffusion-v1-5")
98
 
99
  pipe = StableDiffusionPipeline.from_pretrained(
100
  model_id,
 
110
  current_pipe = pipe
111
  current_model_name = model_name
112
 
113
+ print(f"✅ Model loaded: {model_name}")
114
  return pipe
115
 
116
  except Exception as e:
117
  print(f"❌ Model loading failed: {e}")
118
+ # FALLBACK THAT WORKS
119
+ fallback_pipe = StableDiffusionPipeline.from_pretrained(
120
+ "runwayml/stable-diffusion-v1-5",
121
+ torch_dtype=torch.float32,
122
+ safety_checker=None,
123
+ requires_safety_checker=False
124
+ ).to("cpu")
125
+ model_cache["sd-1.5"] = fallback_pipe
126
+ return fallback_pipe
127
+
128
+ # Initialize model
129
+ print("🚀 Initializing Storybook Generator API...")
130
+ load_model("sd-1.5")
131
+ print("✅ Model loaded and ready!")
132
+
133
+ def generate_simple_image(prompt, negative_prompt="", seed=None):
134
+ """Simple image generation"""
135
+ pipe = load_model("sd-1.5")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  if pipe is None:
137
  raise Exception("Model not available")
138
 
139
+ generator = None
140
+ if seed:
141
+ generator = torch.Generator(device="cpu").manual_seed(seed)
 
142
 
143
  image = pipe(
144
+ prompt=prompt,
145
  negative_prompt=negative_prompt,
146
+ num_inference_steps=25,
147
  guidance_scale=7.5,
148
  width=768,
149
  height=768,
150
  generator=generator
151
  ).images[0]
152
 
 
153
  return image
154
 
155
+ def save_to_oci_bucket(file_data, filename, story_title, file_type="image"):
156
+ """Save files to OCI bucket"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
  try:
158
  api_url = f"{OCI_API_BASE_URL}/api/upload"
159
 
160
+ full_subfolder = f'stories/{story_title}'
 
 
 
 
161
  mime_type = "image/png" if file_type == "image" else "text/plain"
162
  files = {'file': (filename, file_data, mime_type)}
163
  data = {
 
167
 
168
  response = requests.post(api_url, files=files, data=data, timeout=30)
169
 
 
 
170
  if response.status_code == 200:
171
  result = response.json()
172
  if result['status'] == 'success':
 
179
  except Exception as e:
180
  raise Exception(f"OCI upload failed: {str(e)}")
181
 
 
 
 
 
182
  def create_job(story_request: StorybookRequest) -> str:
183
  job_id = str(uuid.uuid4())
184
 
 
 
 
 
185
  job_storage[job_id] = {
186
  "status": JobStatus.PENDING,
187
  "progress": 0,
 
190
  "result": None,
191
  "created_at": time.time(),
192
  "updated_at": time.time(),
 
 
 
193
  }
194
 
195
  print(f"📝 Created job {job_id} for story: {story_request.story_title}")
 
 
 
196
  return job_id
197
 
198
  def update_job_status(job_id: str, status: JobStatus, progress: int, message: str, result=None):
 
209
  if result:
210
  job_storage[job_id]["result"] = result
211
 
 
212
  job_data = job_storage[job_id]
213
  request_data = job_data["request"]
214
 
215
  if request_data.get("callback_url"):
216
  try:
217
  callback_url = request_data["callback_url"]
 
218
  callback_data = {
219
  "job_id": job_id,
220
  "status": status.value,
221
  "progress": progress,
222
  "message": message,
223
  "story_title": request_data["story_title"],
 
 
 
224
  "timestamp": time.time(),
 
225
  }
226
 
 
 
 
 
 
 
 
 
227
  headers = {'Content-Type': 'application/json'}
228
  response = requests.post(callback_url, json=callback_data, headers=headers, timeout=30)
229
  print(f"📢 Callback sent: Status {response.status_code}")
 
233
 
234
  return True
235
 
236
+ def generate_storybook_background(job_id: str):
237
+ """Simple background task"""
 
 
 
 
238
  try:
239
  job_data = job_storage[job_id]
240
  story_request_data = job_data["request"]
241
  story_request = StorybookRequest(**story_request_data)
 
242
 
243
+ print(f"🎬 Starting storybook generation for job {job_id}")
 
244
 
245
+ update_job_status(job_id, JobStatus.PROCESSING, 10, "Starting generation...")
246
 
247
  total_scenes = len(story_request.scenes)
248
  generated_pages = []
249
  start_time = time.time()
250
 
251
  for i, scene in enumerate(story_request.scenes):
252
+ progress = 10 + int((i / total_scenes) * 80)
253
+ update_job_status(job_id, JobStatus.PROCESSING, progress, f"Generating page {i+1}/{total_scenes}...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254
 
255
  try:
256
+ print(f"🖼️ Generating page {i+1}")
257
 
258
+ # Simple prompt enhancement
259
+ enhanced_prompt = f"children's book illustration, {scene.visual}, professional illustration, vibrant colors"
260
+ negative_prompt = "blurry, low quality, bad anatomy"
261
+
262
+ image = generate_simple_image(
263
+ enhanced_prompt,
264
+ negative_prompt,
 
265
  story_request.consistency_seed
266
  )
267
 
268
+ # Save image
269
  img_bytes = io.BytesIO()
270
  image.save(img_bytes, format='PNG')
271
  image_url = save_to_oci_bucket(
 
287
  "page_number": i + 1,
288
  "image_url": image_url,
289
  "text_url": text_url,
 
 
 
290
  }
291
  generated_pages.append(page_data)
292
 
 
298
  update_job_status(job_id, JobStatus.FAILED, 0, error_msg)
299
  return
300
 
 
301
  generation_time = time.time() - start_time
302
 
303
  result = {
304
  "story_title": story_request.story_title,
305
  "total_pages": total_scenes,
 
 
306
  "generation_time": round(generation_time, 2),
 
 
 
307
  "pages": generated_pages
308
  }
309
 
 
311
  job_id,
312
  JobStatus.COMPLETED,
313
  100,
314
+ f"🎉 Storybook completed! {len(generated_pages)} pages in {generation_time:.2f}s.",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  result
316
  )
317
 
318
+ print(f"🎉 Storybook finished for job {job_id}")
319
 
320
  except Exception as e:
321
+ error_msg = f"Story generation failed: {str(e)}"
322
  print(f"❌ {error_msg}")
323
  update_job_status(job_id, JobStatus.FAILED, 0, error_msg)
324
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
325
  @app.post("/api/generate-storybook")
326
+ async def generate_storybook(request: dict, background_tasks: BackgroundTasks):
327
+ """Simple storybook generation endpoint"""
328
  try:
329
  print(f"📥 Received storybook request: {request.get('story_title', 'Unknown')}")
330
 
 
331
  if 'consistency_seed' not in request or not request['consistency_seed']:
332
  request['consistency_seed'] = random.randint(1000, 9999)
 
333
 
334
+ # Force to working model
335
+ request['model_choice'] = "sd-1.5"
 
 
 
 
 
336
 
 
337
  story_request = StorybookRequest(**request)
338
 
 
339
  if not story_request.story_title or not story_request.scenes:
340
  raise HTTPException(status_code=400, detail="story_title and scenes are required")
341
 
 
342
  job_id = create_job(story_request)
343
+ background_tasks.add_task(generate_storybook_background, job_id)
 
 
 
 
 
 
 
344
 
345
  response_data = {
346
  "status": "success",
347
+ "message": "Storybook generation started successfully",
348
  "job_id": job_id,
349
  "story_title": story_request.story_title,
350
  "total_scenes": len(story_request.scenes),
351
+ "model_used": "sd-1.5",
 
 
 
 
 
352
  "timestamp": datetime.now().isoformat()
353
  }
354
 
355
+ print(f"✅ Job {job_id} started")
356
 
357
  return response_data
358
 
 
363
 
364
  @app.get("/api/job-status/{job_id}")
365
  async def get_job_status_endpoint(job_id: str):
 
366
  job_data = job_storage.get(job_id)
367
  if not job_data:
368
  raise HTTPException(status_code=404, detail="Job not found")
 
379
 
380
  @app.get("/api/health")
381
  async def api_health():
 
382
  return {
383
  "status": "healthy",
384
+ "service": "storybook-generator",
385
  "timestamp": datetime.now().isoformat(),
386
  "active_jobs": len(job_storage),
387
+ "model_loaded": "sd-1.5" in model_cache,
 
 
 
388
  "oci_api_connected": OCI_API_BASE_URL
389
  }
390
 
391
+ # Simple Gradio interface
392
+ def create_simple_interface():
393
+ with gr.Blocks() as demo:
394
+ gr.Markdown("# Storybook Generator")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
395
 
396
  with gr.Row():
397
+ with gr.Column():
398
+ prompt = gr.Textbox(label="Test Prompt", value="a cute cartoon cat in a garden")
399
+ generate_btn = gr.Button("Test Generate")
400
+ with gr.Column():
401
+ output = gr.Image(label="Generated Image")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
402
 
403
+ def test_generate(prompt_text):
404
+ try:
405
+ image = generate_simple_image(prompt_text)
406
+ return image
407
+ except Exception as e:
408
+ print(f"Test generation failed: {e}")
409
+ return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
410
 
411
+ generate_btn.click(test_generate, inputs=prompt, outputs=output)
 
 
412
 
413
  return demo
414
 
415
+ demo = create_simple_interface()
 
416
 
 
417
  @app.get("/")
418
  async def root():
419
  return {
420
+ "message": "Storybook Generator API is running!",
421
+ "model": "sd-1.5",
422
+ "status": "ready"
 
 
 
 
 
 
 
 
 
 
423
  }
424
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
425
  def get_app():
426
  return app
427
 
 
429
  import uvicorn
430
  import os
431
 
 
432
  HF_SPACE = os.environ.get('SPACE_ID') is not None
433
 
434
  if HF_SPACE:
435
+ print("🚀 Running on Hugging Face Spaces")
 
 
 
 
 
 
436
  gr.mount_gradio_app(app, demo, path="/ui")
437
+ uvicorn.run(app, host="0.0.0.0", port=7860, log_level="info")
 
 
 
 
 
 
 
438
  else:
439
+ print("🚀 Running locally")
440
+ uvicorn.run(app, host="0.0.0.0", port=8000, log_level="info")