yukee1992 commited on
Commit
945b0d7
Β·
verified Β·
1 Parent(s): d22980d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +119 -515
app.py CHANGED
@@ -51,18 +51,14 @@ class JobStatus(str, Enum):
51
  COMPLETED = "completed"
52
  FAILED = "failed"
53
 
54
- # Enhanced Story scene model with character consistency
55
  class StoryScene(BaseModel):
56
  visual: str
57
  text: str
58
- characters_present: List[str] = [] # Which characters are in this scene
59
- scene_type: str = "general" # "action", "dialogue", "establishing", etc.
60
 
61
  class CharacterDescription(BaseModel):
62
  name: str
63
  description: str
64
- visual_prompt: str = "" # Detailed visual description for AI
65
- key_features: List[str] = [] # Critical features that must stay consistent
66
 
67
  class StorybookRequest(BaseModel):
68
  story_title: str
@@ -71,7 +67,7 @@ class StorybookRequest(BaseModel):
71
  model_choice: str = "dreamshaper-8"
72
  style: str = "childrens_book"
73
  callback_url: Optional[str] = None
74
- consistency_seed: Optional[int] = None # For consistent character generation
75
 
76
  class JobStatusResponse(BaseModel):
77
  job_id: str
@@ -91,22 +87,6 @@ MODEL_CHOICES = {
91
  "sd-2.1": "stabilityai/stable-diffusion-2-1",
92
  }
93
 
94
- # FALLBACK CHARACTER TEMPLATES (used only if n8n doesn't provide character details)
95
- FALLBACK_CHARACTER_TEMPLATES = {
96
- "Sparkle the Star Cat": {
97
- "visual_prompt": "small white kitten with distinctive silver star-shaped spots on fur, big golden eyes, shiny blue collar with star charm, playful expression",
98
- "key_features": ["star-shaped spots", "blue collar", "golden eyes", "white fur"],
99
- },
100
- "Benny the Bunny": {
101
- "visual_prompt": "fluffy brown rabbit with long ears, bright green eyes, red scarf around neck, cheerful expression",
102
- "key_features": ["red scarf", "long ears", "green eyes", "brown fur"],
103
- },
104
- "Tilly the Turtle": {
105
- "visual_prompt": "gentle green turtle with shiny turquoise shell decorated with swirl patterns, wise expression, slow-moving",
106
- "key_features": ["turquoise shell", "swirl patterns", "green skin", "wise expression"],
107
- }
108
- }
109
-
110
  # GLOBAL STORAGE
111
  job_storage = {}
112
  model_cache = {}
@@ -159,138 +139,9 @@ print("πŸš€ Initializing Storybook Generator API...")
159
  load_model("dreamshaper-8")
160
  print("βœ… Model loaded and ready!")
161
 
162
- # DYNAMIC CHARACTER PROCESSING FUNCTIONS
163
- def process_character_descriptions(characters_from_request):
164
- """Process character descriptions from n8n and create consistency templates"""
165
- character_templates = {}
166
-
167
- for character in characters_from_request:
168
- char_name = character.name
169
-
170
- # Use provided visual_prompt or generate from description
171
- if character.visual_prompt:
172
- visual_prompt = character.visual_prompt
173
- else:
174
- # Generate visual prompt from description
175
- visual_prompt = generate_visual_prompt_from_description(character.description, char_name)
176
-
177
- # Use provided key_features or extract from description
178
- if character.key_features:
179
- key_features = character.key_features
180
- else:
181
- key_features = extract_key_features_from_description(character.description)
182
-
183
- character_templates[char_name] = {
184
- "visual_prompt": visual_prompt,
185
- "key_features": key_features,
186
- "consistency_keywords": f"consistent character, same {char_name.split()[-1].lower()}, maintaining appearance",
187
- "source": "n8n_request" # Track where this template came from
188
- }
189
-
190
- print(f"βœ… Processed {len(character_templates)} characters from n8n request")
191
- return character_templates
192
-
193
- def generate_visual_prompt_from_description(description, character_name):
194
- """Generate a visual prompt from character description"""
195
- # Basic extraction of visual elements
196
- description_lower = description.lower()
197
-
198
- # Extract species/type
199
- species_keywords = ["kitten", "cat", "rabbit", "bunny", "turtle", "dog", "bird", "dragon", "bear", "fox"]
200
- species = "character"
201
- for keyword in species_keywords:
202
- if keyword in description_lower:
203
- species = keyword
204
- break
205
-
206
- # Extract colors
207
- color_keywords = ["white", "black", "brown", "red", "blue", "green", "yellow", "golden", "silver", "orange"]
208
- colors = []
209
- for color in color_keywords:
210
- if color in description_lower:
211
- colors.append(color)
212
-
213
- # Extract distinctive features
214
- feature_keywords = ["spots", "stripes", "collar", "scarf", "shell", "wings", "horn", "tail", "ears", "eyes"]
215
- features = []
216
- for feature in feature_keywords:
217
- if feature in description_lower:
218
- features.append(feature)
219
-
220
- # Build visual prompt
221
- visual_prompt_parts = []
222
- if colors:
223
- visual_prompt_parts.append(f"{' '.join(colors)} {species}")
224
- else:
225
- visual_prompt_parts.append(species)
226
-
227
- visual_prompt_parts.append(character_name)
228
-
229
- if features:
230
- visual_prompt_parts.append(f"with {', '.join(features)}")
231
-
232
- # Add emotional/character traits
233
- trait_keywords = ["playful", "brave", "curious", "kind", "cheerful", "wise", "calm", "friendly"]
234
- traits = [trait for trait in trait_keywords if trait in description_lower]
235
- if traits:
236
- visual_prompt_parts.append(f"{', '.join(traits)} expression")
237
-
238
- visual_prompt = " ".join(visual_prompt_parts)
239
- print(f"πŸ”§ Generated visual prompt for {character_name}: {visual_prompt}")
240
-
241
- return visual_prompt
242
-
243
- def extract_key_features_from_description(description):
244
- """Extract key features from character description"""
245
- description_lower = description.lower()
246
- key_features = []
247
-
248
- # Look for distinctive physical features
249
- feature_patterns = [
250
- r"(\w+)\s+(?:spots|stripes|marks)",
251
- r"(\w+)\s+(?:collar|scarf|ribbon)",
252
- r"(\w+)\s+(?:eyes|fur|skin|shell)",
253
- r"(\w+)\s+(?:ears|tail|wings|horn)"
254
- ]
255
-
256
- for pattern in feature_patterns:
257
- matches = re.findall(pattern, description_lower)
258
- key_features.extend(matches)
259
-
260
- # Remove duplicates and limit to 3 most important features
261
- key_features = list(set(key_features))[:3]
262
-
263
- # If no features found, use some defaults based on character type
264
- if not key_features:
265
- if any(word in description_lower for word in ["kitten", "cat"]):
266
- key_features = ["whiskers", "tail", "paws"]
267
- elif any(word in description_lower for word in ["rabbit", "bunny"]):
268
- key_features = ["long ears", "fluffy tail", "paws"]
269
- elif any(word in description_lower for word in ["turtle"]):
270
- key_features = ["shell", "slow moving", "wise eyes"]
271
- else:
272
- key_features = ["distinctive appearance", "consistent features"]
273
-
274
- print(f"πŸ”§ Extracted key features: {key_features}")
275
- return key_features
276
-
277
- # ENHANCED PROMPT ENGINEERING WITH DYNAMIC CHARACTER CONSISTENCY
278
- def enhance_prompt_with_characters(scene_visual, characters_present, character_templates, style="childrens_book", scene_number=1):
279
- """Create prompts that maintain character consistency using dynamic templates"""
280
-
281
- # Get character descriptions for this scene
282
- character_descriptions = []
283
- consistency_keywords = []
284
-
285
- for char_name in characters_present:
286
- if char_name in character_templates:
287
- char_data = character_templates[char_name]
288
- character_descriptions.append(f"{char_name}: {char_data['visual_prompt']}")
289
- consistency_keywords.append(char_data['consistency_keywords'])
290
- else:
291
- # Fallback if character not in templates
292
- character_descriptions.append(f"{char_name}: distinctive character")
293
- consistency_keywords.append(f"consistent {char_name}")
294
 
295
  # Style templates
296
  style_templates = {
@@ -302,82 +153,28 @@ def enhance_prompt_with_characters(scene_visual, characters_present, character_t
302
 
303
  style_prompt = style_templates.get(style, style_templates["childrens_book"])
304
 
305
- # Build the enhanced prompt
306
- character_context = ". ".join(character_descriptions)
307
- consistency_context = ", ".join(consistency_keywords)
308
 
309
- enhanced_prompt = (
310
- f"{style_prompt}, {scene_visual}. "
311
- f"Featuring: {character_context}. "
312
- f"Maintain character consistency: {consistency_context}. "
313
- f"Scene {scene_number} of storybook series. "
314
- )
315
-
316
- # Quality boosters for consistency
317
- quality_boosters = [
318
- "consistent character design", "maintain identical features",
319
- "same characters throughout", "continuous visual narrative",
320
- "professional storybook illustration", "cohesive art style",
321
- "character continuity", "consistent proportions"
322
- ]
323
-
324
- enhanced_prompt += ", ".join(quality_boosters)
325
-
326
- # Enhanced negative prompt to avoid inconsistencies
327
  negative_prompt = (
328
- "inconsistent characters, different appearances, changing features, "
329
- "multiple versions of same character, inconsistent art style, "
330
  "blurry, low quality, bad anatomy, deformed characters, "
331
- "wrong proportions, mismatched features, different art style"
332
  )
333
 
334
  return enhanced_prompt, negative_prompt
335
 
336
- def extract_characters_from_visual(visual_description, available_characters):
337
- """Extract character names from visual description using available characters"""
338
- characters = []
339
- visual_lower = visual_description.lower()
340
-
341
- # Check for each available character name in the visual description
342
- for char_name in available_characters:
343
- # Use the first word or main identifier from character name
344
- char_identifier = char_name.split()[0].lower()
345
- if char_identifier in visual_lower or char_name.lower() in visual_lower:
346
- characters.append(char_name)
347
 
348
- return characters
349
-
350
- def generate_character_reference_sheet(characters):
351
- """Generate reference descriptions for consistent character generation"""
352
- reference_sheet = {}
353
 
354
- for character in characters:
355
- char_name = character.name
356
- reference_sheet[char_name] = {
357
- "name": char_name,
358
- "base_prompt": character.visual_prompt if character.visual_prompt else generate_visual_prompt_from_description(character.description, char_name),
359
- "key_features": character.key_features if character.key_features else extract_key_features_from_description(character.description),
360
- "must_include": character.key_features[:2] if character.key_features else []
361
- }
362
-
363
- return reference_sheet
364
-
365
- def generate_consistent_image(prompt, model_choice, style, characters_present, character_templates, scene_number, consistency_seed=None):
366
- """Generate image with character consistency measures using dynamic templates"""
367
-
368
- # Enhance prompt with character consistency
369
- enhanced_prompt, negative_prompt = enhance_prompt_with_characters(
370
- prompt, characters_present, character_templates, style, scene_number
371
- )
372
-
373
- # Use a consistent seed for character generation
374
  if consistency_seed:
375
- base_seed = consistency_seed
376
  else:
377
- base_seed = hash("".join(characters_present)) % 1000000 if characters_present else random.randint(1000, 9999)
378
-
379
- # Adjust seed slightly per scene but maintain character consistency
380
- scene_seed = base_seed + scene_number
381
 
382
  try:
383
  pipe = load_model(model_choice)
@@ -385,32 +182,23 @@ def generate_consistent_image(prompt, model_choice, style, characters_present, c
385
  image = pipe(
386
  prompt=enhanced_prompt,
387
  negative_prompt=negative_prompt,
388
- num_inference_steps=35, # Increased for better quality
389
- guidance_scale=7.5, # Slightly lower for more consistency
390
  width=768,
391
- height=768,
392
  generator=torch.Generator(device="cpu").manual_seed(scene_seed)
393
  ).images[0]
394
 
395
- print(f"βœ… Generated consistent image for scene {scene_number}")
396
- print(f"πŸ‘₯ Characters: {characters_present}")
397
  print(f"🌱 Seed used: {scene_seed}")
 
398
 
399
  return image
400
 
401
  except Exception as e:
402
- print(f"❌ Consistent generation failed: {str(e)}")
403
  raise
404
 
405
- # Backward compatibility functions
406
- def enhance_prompt(prompt, style="childrens_book"):
407
- """Legacy function for backward compatibility"""
408
- return enhance_prompt_with_characters(prompt, [], {}, style, 1)
409
-
410
- def generate_high_quality_image(prompt, model_choice="dreamshaper-8", style="childrens_book", negative_prompt=""):
411
- """Legacy function for backward compatibility"""
412
- return generate_consistent_image(prompt, model_choice, style, [], {}, 1)
413
-
414
  # LOCAL FILE MANAGEMENT FUNCTIONS
415
  def save_image_to_local(image, prompt, style="test"):
416
  """Save image to local persistent storage"""
@@ -516,13 +304,13 @@ def save_to_oci_bucket(image, text_content, story_title, page_number, file_type=
516
  'subfolder': f'stories/{story_title}'
517
  }
518
 
519
- # Create session with retry strategy - FIXED PARAMETERS
520
  session = requests.Session()
521
  retry_strategy = Retry(
522
- total=3, # Retry 3 times
523
- status_forcelist=[429, 500, 502, 503, 504], # Retry on these status codes
524
- allowed_methods=["POST"], # CORRECTED: Use 'allowed_methods' instead of 'method_whitelist'
525
- backoff_factor=1 # Wait 1, 2, 4 seconds between retries
526
  )
527
  adapter = HTTPAdapter(max_retries=retry_strategy)
528
  session.mount("http://", adapter)
@@ -566,15 +354,10 @@ def test_oci_connection():
566
  print(f"πŸ”§ Connection test failed: {e}")
567
  return False
568
 
569
-
570
  # JOB MANAGEMENT FUNCTIONS
571
  def create_job(story_request: StorybookRequest) -> str:
572
  job_id = str(uuid.uuid4())
573
 
574
- # Process character descriptions from n8n
575
- character_templates = process_character_descriptions(story_request.characters)
576
- character_references = generate_character_reference_sheet(story_request.characters)
577
-
578
  job_storage[job_id] = {
579
  "status": JobStatus.PENDING,
580
  "progress": 0,
@@ -583,13 +366,11 @@ def create_job(story_request: StorybookRequest) -> str:
583
  "result": None,
584
  "created_at": time.time(),
585
  "updated_at": time.time(),
586
- "pages": [],
587
- "character_templates": character_templates,
588
- "character_references": character_references
589
  }
590
 
591
  print(f"πŸ“ Created job {job_id} for story: {story_request.story_title}")
592
- print(f"πŸ‘₯ Processed {len(character_templates)} characters from n8n request")
593
 
594
  return job_id
595
 
@@ -615,7 +396,7 @@ def update_job_status(job_id: str, status: JobStatus, progress: int, message: st
615
  try:
616
  callback_url = request_data["callback_url"]
617
 
618
- # Enhanced callback data
619
  callback_data = {
620
  "job_id": job_id,
621
  "status": status.value,
@@ -623,10 +404,8 @@ def update_job_status(job_id: str, status: JobStatus, progress: int, message: st
623
  "message": message,
624
  "story_title": request_data["story_title"],
625
  "total_scenes": len(request_data["scenes"]),
626
- "total_characters": len(request_data["characters"]),
627
  "timestamp": time.time(),
628
- "source": "huggingface-storybook-generator",
629
- "estimated_time_remaining": calculate_remaining_time(job_id, progress)
630
  }
631
 
632
  # Add result data for completed jobs
@@ -635,23 +414,9 @@ def update_job_status(job_id: str, status: JobStatus, progress: int, message: st
635
  "total_pages": result.get("total_pages", 0),
636
  "generation_time": result.get("generation_time", 0),
637
  "oci_bucket_url": result.get("oci_bucket_url", ""),
638
- "pages_generated": result.get("generated_pages", 0),
639
- "characters_used": result.get("characters_used", 0)
640
  }
641
 
642
- # Add current scene info for processing jobs
643
- if status == JobStatus.PROCESSING:
644
- current_scene = progress // (100 // len(request_data["scenes"])) + 1
645
- callback_data["current_scene"] = current_scene
646
- callback_data["total_scenes"] = len(request_data["scenes"])
647
- if current_scene <= len(request_data["scenes"]):
648
- scene_visual = request_data["scenes"][current_scene-1]["visual"]
649
- callback_data["scene_description"] = scene_visual[:100] + "..."
650
-
651
- # Add characters in current scene
652
- if "characters_present" in request_data["scenes"][current_scene-1]:
653
- callback_data["characters_in_scene"] = request_data["scenes"][current_scene-1]["characters_present"]
654
-
655
  headers = {
656
  'Content-Type': 'application/json',
657
  'User-Agent': 'Storybook-Generator/1.0'
@@ -688,26 +453,27 @@ def calculate_remaining_time(job_id, progress):
688
 
689
  return "Unknown"
690
 
691
- # ENHANCED BACKGROUND TASK WITH DYNAMIC CHARACTER CONSISTENCY
692
  def generate_storybook_background(job_id: str):
693
- """Background task to generate complete storybook with dynamic character consistency"""
694
  try:
 
 
 
 
 
 
695
  job_data = job_storage[job_id]
696
  story_request_data = job_data["request"]
697
  story_request = StorybookRequest(**story_request_data)
698
- character_templates = job_data["character_templates"]
699
 
700
- print(f"🎬 Starting DYNAMIC storybook generation for job {job_id}")
701
  print(f"πŸ“– Story: {story_request.story_title}")
702
- print(f"πŸ‘₯ Characters: {len(story_request.characters)} (from n8n)")
703
  print(f"πŸ“„ Scenes: {len(story_request.scenes)}")
 
704
  print(f"🌱 Consistency seed: {story_request.consistency_seed}")
705
 
706
- # Log character details
707
- for char in story_request.characters:
708
- print(f" - {char.name}: {char.description[:50]}...")
709
-
710
- update_job_status(job_id, JobStatus.PROCESSING, 5, "Starting storybook generation with dynamic character consistency...")
711
 
712
  total_scenes = len(story_request.scenes)
713
  generated_pages = []
@@ -716,37 +482,27 @@ def generate_storybook_background(job_id: str):
716
  for i, scene in enumerate(story_request.scenes):
717
  progress = 5 + int((i / total_scenes) * 90)
718
 
719
- # Extract characters for this scene
720
- characters_present = []
721
- if hasattr(scene, 'characters_present') and scene.characters_present:
722
- characters_present = scene.characters_present
723
- else:
724
- # Fallback: extract from visual description using available characters
725
- available_chars = [char.name for char in story_request.characters]
726
- characters_present = extract_characters_from_visual(scene.visual, available_chars)
727
-
728
  update_job_status(
729
  job_id,
730
  JobStatus.PROCESSING,
731
  progress,
732
- f"Generating page {i+1}/{total_scenes} with {len(characters_present)} characters: {scene.visual[:50]}..."
733
  )
734
 
735
  try:
736
- print(f"πŸ–ΌοΈ Generating page {i+1} with characters: {characters_present}")
 
737
 
738
- # Generate consistent image using dynamic character templates
739
- image = generate_consistent_image(
740
  scene.visual,
741
  story_request.model_choice,
742
  story_request.style,
743
- characters_present,
744
- character_templates,
745
  i + 1,
746
  story_request.consistency_seed
747
  )
748
 
749
- # ADD LOCAL FALLBACK SAVE
750
  local_filepath, local_filename = save_image_to_local(image, scene.visual, story_request.style)
751
  print(f"πŸ’Ύ Image saved locally as backup: {local_filename}")
752
 
@@ -776,13 +532,12 @@ def generate_storybook_background(job_id: str):
776
  "text_url": text_url,
777
  "text_content": scene.text,
778
  "visual_description": scene.visual,
779
- "characters_present": characters_present,
780
- "prompt_used": f"Dynamic consistent generation with {len(characters_present)} characters",
781
- "local_backup_path": local_filepath # Store local path as backup
782
  }
783
  generated_pages.append(page_data)
784
 
785
- print(f"βœ… Page {i+1} completed - Characters: {characters_present}")
786
 
787
  except Exception as upload_error:
788
  # If OCI upload fails, use local file as fallback
@@ -791,12 +546,11 @@ def generate_storybook_background(job_id: str):
791
 
792
  page_data = {
793
  "page_number": i + 1,
794
- "image_url": f"local://{local_filepath}", # Mark as local file
795
  "text_url": f"local://text_content_{i+1}",
796
  "text_content": scene.text,
797
  "visual_description": scene.visual,
798
- "characters_present": characters_present,
799
- "prompt_used": f"Dynamic consistent generation with {len(characters_present)} characters",
800
  "local_backup_path": local_filepath,
801
  "upload_error": str(upload_error)
802
  }
@@ -821,13 +575,11 @@ def generate_storybook_background(job_id: str):
821
  result = {
822
  "story_title": story_request.story_title,
823
  "total_pages": total_scenes,
824
- "characters_used": len(story_request.characters),
825
  "generated_pages": len(generated_pages),
826
  "generation_time": round(generation_time, 2),
827
  "folder_path": f"stories/{story_request.story_title}",
828
  "oci_bucket_url": f"https://oci.com/stories/{story_request.story_title}",
829
  "consistency_seed": story_request.consistency_seed,
830
- "character_names": [char.name for char in story_request.characters],
831
  "pages": generated_pages,
832
  "file_structure": {
833
  "images": [f"page_{i+1:03d}.png" for i in range(total_scenes)],
@@ -840,7 +592,7 @@ def generate_storybook_background(job_id: str):
840
  }
841
  }
842
 
843
- status_message = f"πŸŽ‰ Storybook completed! {len(generated_pages)} pages with {len(story_request.characters)} dynamic characters created in {generation_time:.2f}s."
844
  if local_fallback_count > 0:
845
  status_message += f" {local_fallback_count} pages saved locally due to OCI upload issues."
846
 
@@ -852,19 +604,19 @@ def generate_storybook_background(job_id: str):
852
  result
853
  )
854
 
855
- print(f"πŸŽ‰ DYNAMIC Storybook generation finished for job {job_id}")
856
  print(f"πŸ“ OCI Uploads: {oci_success_count} successful, {local_fallback_count} local fallbacks")
857
- print(f"πŸ‘₯ Dynamic character consistency maintained for {len(story_request.characters)} characters across {total_scenes} scenes")
858
 
859
  except Exception as e:
860
- error_msg = f"Dynamic story generation failed: {str(e)}"
861
  print(f"❌ {error_msg}")
862
  update_job_status(job_id, JobStatus.FAILED, 0, error_msg)
863
 
864
  # FASTAPI ENDPOINTS (for n8n)
865
  @app.post("/api/generate-storybook")
866
  async def generate_storybook(request: dict, background_tasks: BackgroundTasks):
867
- """Main endpoint for n8n integration - generates complete storybook with dynamic character consistency"""
868
  try:
869
  print(f"πŸ“₯ Received n8n request for story: {request.get('story_title', 'Unknown')}")
870
 
@@ -873,15 +625,6 @@ async def generate_storybook(request: dict, background_tasks: BackgroundTasks):
873
  request['consistency_seed'] = random.randint(1000, 9999)
874
  print(f"🌱 Generated consistency seed: {request['consistency_seed']}")
875
 
876
- # Ensure characters have required fields
877
- if 'characters' in request:
878
- for char in request['characters']:
879
- if 'visual_prompt' not in char or not char['visual_prompt']:
880
- # Generate visual prompt from description if not provided
881
- char['visual_prompt'] = ""
882
- if 'key_features' not in char:
883
- char['key_features'] = []
884
-
885
  # Convert to Pydantic model
886
  story_request = StorybookRequest(**request)
887
 
@@ -892,25 +635,23 @@ async def generate_storybook(request: dict, background_tasks: BackgroundTasks):
892
  # Create job immediately
893
  job_id = create_job(story_request)
894
 
895
- # Start background processing (runs independently of HF idle)
896
  background_tasks.add_task(generate_storybook_background, job_id)
897
 
898
  # Immediate response for n8n
899
  response_data = {
900
  "status": "success",
901
- "message": "Storybook generation with dynamic character consistency started successfully",
902
  "job_id": job_id,
903
  "story_title": story_request.story_title,
904
  "total_scenes": len(story_request.scenes),
905
- "total_characters": len(story_request.characters),
906
- "character_names": [char.name for char in story_request.characters],
907
  "consistency_seed": story_request.consistency_seed,
908
  "callback_url": story_request.callback_url,
909
  "estimated_time_seconds": len(story_request.scenes) * 35,
910
  "timestamp": datetime.now().isoformat()
911
  }
912
 
913
- print(f"βœ… Job {job_id} started with dynamic character consistency for: {story_request.story_title}")
914
 
915
  return response_data
916
 
@@ -945,7 +686,6 @@ async def api_health():
945
  "timestamp": datetime.now().isoformat(),
946
  "active_jobs": len(job_storage),
947
  "models_loaded": list(model_cache.keys()),
948
- "fallback_templates": list(FALLBACK_CHARACTER_TEMPLATES.keys()),
949
  "oci_api_connected": OCI_API_BASE_URL
950
  }
951
 
@@ -965,89 +705,31 @@ async def delete_local_image_api(filename: str):
965
  except Exception as e:
966
  return {"status": "error", "message": str(e)}
967
 
968
- # MISSING HELPER FUNCTIONS FOR GRADIO INTERFACE
969
- def delete_current_image(filepath):
970
- """Delete the currently displayed image"""
971
- if not filepath:
972
- return "❌ No image to delete", None, None, refresh_local_images()
973
-
974
- success, message = delete_local_image(filepath)
975
- updated_files = refresh_local_images()
976
-
977
- if success:
978
- status_msg = f"βœ… {message}"
979
- return status_msg, None, "Image deleted successfully!", updated_files
980
- else:
981
- return f"❌ {message}", None, "Delete failed", updated_files
982
-
983
- def clear_all_images():
984
- """Delete all local images"""
985
- try:
986
- storage_info = get_local_storage_info()
987
- deleted_count = 0
988
-
989
- if "images" in storage_info:
990
- for image_info in storage_info["images"]:
991
- success, _ = delete_local_image(image_info["path"])
992
- if success:
993
- deleted_count += 1
994
-
995
- updated_files = refresh_local_images()
996
- return f"βœ… Deleted {deleted_count} images", updated_files
997
- except Exception as e:
998
- return f"❌ Error: {str(e)}", refresh_local_images()
999
-
1000
- # Enhanced Gradio interface with dynamic character testing
1001
  def create_gradio_interface():
1002
- """Create Gradio interface with dynamic character consistency features"""
1003
 
1004
- def generate_test_image_with_characters(prompt, model_choice, style_choice, character_names_text):
1005
- """Generate a single image for testing character consistency"""
1006
  try:
1007
  if not prompt.strip():
1008
  return None, "❌ Please enter a prompt", None
1009
 
1010
- # Parse character names from text input
1011
- character_names = [name.strip() for name in character_names_text.split(",") if name.strip()]
1012
-
1013
- print(f"🎨 Generating test image with prompt: {prompt}")
1014
- print(f"πŸ‘₯ Character names: {character_names}")
1015
-
1016
- # Create dynamic character templates for testing
1017
- character_templates = {}
1018
- for char_name in character_names:
1019
- character_templates[char_name] = {
1020
- "visual_prompt": f"{char_name}, distinctive appearance, consistent features",
1021
- "key_features": ["consistent appearance", "maintain features"],
1022
- "consistency_keywords": f"consistent {char_name}"
1023
- }
1024
-
1025
- # Enhance the prompt with character consistency
1026
- enhanced_prompt, negative_prompt = enhance_prompt_with_characters(
1027
- prompt, character_names, character_templates, style_choice, 1
1028
- )
1029
 
1030
- # Generate the image
1031
- image = generate_consistent_image(
1032
  prompt,
1033
  model_choice,
1034
  style_choice,
1035
- character_names,
1036
- character_templates,
1037
  1
1038
  )
1039
 
1040
  # Save to local storage
1041
  filepath, filename = save_image_to_local(image, prompt, style_choice)
1042
 
1043
- character_info = f"πŸ‘₯ Characters: {', '.join(character_names)}" if character_names else "πŸ‘₯ No specific characters"
1044
-
1045
  status_msg = f"""βœ… Success! Generated: {prompt}
1046
 
1047
- {character_info}
1048
-
1049
- 🎨 Enhanced prompt: {enhanced_prompt[:200]}...
1050
-
1051
  πŸ“ **Local file:** {filename if filename else 'Not saved'}"""
1052
 
1053
  return image, status_msg, filepath
@@ -1057,9 +739,9 @@ def create_gradio_interface():
1057
  print(error_msg)
1058
  return None, error_msg, None
1059
 
1060
- with gr.Blocks(title="Premium Children's Book Illustrator with Dynamic Character Consistency", theme="soft") as demo:
1061
- gr.Markdown("# 🎨 Premium Children's Book Illustrator")
1062
- gr.Markdown("Generate **studio-quality** storybook images with **dynamic character consistency**")
1063
 
1064
  # Storage info display
1065
  storage_info = gr.Textbox(
@@ -1087,24 +769,16 @@ def create_gradio_interface():
1087
  style_dropdown = gr.Dropdown(
1088
  label="Art Style",
1089
  choices=["childrens_book", "realistic", "fantasy", "anime"],
1090
- value="childrens_book"
1091
- )
1092
-
1093
- # Dynamic character input for testing
1094
- character_names_input = gr.Textbox(
1095
- label="Character Names (comma-separated)",
1096
- placeholder="Enter character names: Sparkle the Star Cat, Benny the Bunny, Tilly the Turtle",
1097
- info="Enter character names to test consistency features",
1098
- lines=2
1099
  )
1100
 
1101
  prompt_input = gr.Textbox(
1102
- label="Scene Description",
1103
- placeholder="Describe your scene with character interactions...\nExample: Sparkle the Star Cat chasing butterflies while Benny the Bunny watches",
1104
  lines=3
1105
  )
1106
 
1107
- generate_btn = gr.Button("✨ Generate Premium Image", variant="primary")
1108
 
1109
  # Current image management
1110
  current_file_path = gr.State()
@@ -1116,65 +790,13 @@ def create_gradio_interface():
1116
  **For complete storybooks (OCI bucket):**
1117
  - Endpoint: `POST /api/generate-storybook`
1118
  - Input: `story_title`, `scenes[]`, `characters[]`
1119
- - Output: Saves to OCI bucket with dynamic character consistency
1120
  """)
1121
 
1122
  with gr.Column(scale=2):
1123
  image_output = gr.Image(label="Generated Image", height=500, show_download_button=True)
1124
  status_output = gr.Textbox(label="Status", interactive=False, lines=4)
1125
 
1126
- # Dynamic character guidance section
1127
- with gr.Accordion("πŸ‘₯ Dynamic Character Guidance", open=False):
1128
- gr.Markdown("""
1129
- ### How to Use Dynamic Characters from n8n:
1130
-
1131
- **n8n Payload Structure:**
1132
- ```json
1133
- {
1134
- "story_title": "Your Story Title",
1135
- "characters": [
1136
- {
1137
- "name": "Character Name",
1138
- "description": "Character description...",
1139
- "visual_prompt": "Detailed visual description", // Optional
1140
- "key_features": ["feature1", "feature2"] // Optional
1141
- }
1142
- ],
1143
- "scenes": [
1144
- {
1145
- "visual": "Scene description with characters...",
1146
- "text": "Scene text...",
1147
- "characters_present": ["Character Name"] // Optional
1148
- }
1149
- ]
1150
- }
1151
- ```
1152
-
1153
- **Features:**
1154
- - βœ… Dynamic character processing from n8n
1155
- - βœ… Automatic visual prompt generation
1156
- - βœ… Key feature extraction
1157
- - βœ… Cross-scene consistency
1158
- - βœ… Flexible character numbers and types
1159
- """)
1160
-
1161
- # Examples section
1162
- with gr.Accordion("πŸ’‘ Prompt Examples & Tips", open=False):
1163
- gr.Markdown("""
1164
- ## 🎨 Professional Prompt Examples with Dynamic Characters:
1165
-
1166
- **Best Results with Dynamic Characters:**
1167
- - "Sparkle the Star Cat chasing butterflies in a sunny meadow"
1168
- - "Benny the Bunny and Tilly the Turtle having a picnic"
1169
- - "Multiple characters discovering a magical portal together"
1170
-
1171
- ## ⚑ Dynamic Character Consistency Tips:
1172
- 1. **Always mention character names** in your prompts
1173
- 2. **n8n will send character details** automatically
1174
- 3. **The system processes any number** of characters dynamically
1175
- 4. **Consistency is maintained** across all scenes automatically
1176
- """)
1177
-
1178
  # Local file management section
1179
  with gr.Accordion("πŸ“ Manage Local Test Images", open=True):
1180
  gr.Markdown("### Locally Saved Images")
@@ -1193,27 +815,41 @@ def create_gradio_interface():
1193
 
1194
  clear_status = gr.Textbox(label="Clear Status", interactive=False)
1195
 
1196
- # Debug section
1197
- with gr.Accordion("πŸ”§ Advanced Settings", open=False):
1198
- debug_btn = gr.Button("πŸ”„ Check System Status", variant="secondary")
1199
- debug_output = gr.Textbox(label="System Info", interactive=False, lines=4)
1200
-
1201
- def check_system_status():
1202
- """Check system status"""
1203
- active_jobs = len(job_storage)
1204
- return f"""**System Status:**
1205
- - Model: {current_model_name}
1206
- - Dynamic Character Processing: βœ… Enabled
1207
- - Fallback Templates: {len(FALLBACK_CHARACTER_TEMPLATES)} available
1208
- - OCI API: {OCI_API_BASE_URL}
1209
- - Local Storage: {get_local_storage_info().get('total_files', 0)} images
1210
- - Active Jobs: {active_jobs}
1211
- - Ready for dynamic character consistency generation!"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1212
 
1213
  # Connect buttons to functions
1214
  generate_btn.click(
1215
- fn=generate_test_image_with_characters,
1216
- inputs=[prompt_input, model_dropdown, style_dropdown, character_names_input],
1217
  outputs=[image_output, status_output, current_file_path]
1218
  ).then(
1219
  fn=refresh_local_images,
@@ -1248,26 +884,20 @@ def create_gradio_interface():
1248
  outputs=storage_info
1249
  )
1250
 
1251
- debug_btn.click(
1252
- fn=check_system_status,
1253
- inputs=None,
1254
- outputs=debug_output
1255
- )
1256
-
1257
  # Initialize on load
1258
  demo.load(fn=refresh_local_images, outputs=file_gallery)
1259
  demo.load(fn=update_storage_info, outputs=storage_info)
1260
 
1261
  return demo
1262
 
1263
- # Create enhanced Gradio app
1264
  demo = create_gradio_interface()
1265
 
1266
- # Enhanced root endpoint that explains the API structure
1267
  @app.get("/")
1268
  async def root():
1269
  return {
1270
- "message": "Storybook Generator API with Dynamic Character Consistency is running!",
1271
  "api_endpoints": {
1272
  "health_check": "GET /api/health",
1273
  "generate_storybook": "POST /api/generate-storybook",
@@ -1275,13 +905,10 @@ async def root():
1275
  "local_images": "GET /api/local-images"
1276
  },
1277
  "features": {
1278
- "dynamic_characters": "βœ… Enabled",
1279
- "character_consistency": "βœ… Enabled",
1280
- "flexible_storytelling": "βœ… Enabled",
1281
  "n8n_integration": "βœ… Enabled"
1282
  },
1283
- "web_interface": "GET /ui",
1284
- "note": "Use API endpoints for programmatic access with dynamic characters from n8n"
1285
  }
1286
 
1287
  # Add a simple test endpoint
@@ -1289,9 +916,8 @@ async def root():
1289
  async def test_endpoint():
1290
  return {
1291
  "status": "success",
1292
- "message": "API with dynamic character consistency is working correctly",
1293
- "dynamic_processing": "βœ… Enabled",
1294
- "fallback_templates": len(FALLBACK_CHARACTER_TEMPLATES),
1295
  "timestamp": datetime.now().isoformat()
1296
  }
1297
 
@@ -1310,8 +936,7 @@ if __name__ == "__main__":
1310
  print("πŸš€ Running on Hugging Face Spaces - Integrated Mode")
1311
  print("πŸ“š API endpoints available at: /api/*")
1312
  print("🎨 Web interface available at: /ui")
1313
- print("πŸ‘₯ Dynamic character consistency features enabled")
1314
- print("πŸ”Œ Both API and UI running on same port")
1315
 
1316
  # Mount Gradio without reassigning app
1317
  gr.mount_gradio_app(app, demo, path="/ui")
@@ -1328,7 +953,7 @@ if __name__ == "__main__":
1328
  print("πŸš€ Running locally - Separate API and UI servers")
1329
  print("πŸ“š API endpoints: http://localhost:8000/api/*")
1330
  print("🎨 Web interface: http://localhost:7860/ui")
1331
- print("πŸ‘₯ Dynamic character consistency features enabled")
1332
 
1333
  def run_fastapi():
1334
  """Run FastAPI on port 8000 for API calls"""
@@ -1344,25 +969,4 @@ if __name__ == "__main__":
1344
  """Run Gradio on port 7860 for web interface"""
1345
  demo.launch(
1346
  server_name="0.0.0.0",
1347
- server_port=7860,
1348
- share=False,
1349
- show_error=True,
1350
- quiet=True
1351
- )
1352
-
1353
- # Start both servers in separate threads
1354
- api_thread = threading.Thread(target=run_fastapi, daemon=True)
1355
- ui_thread = threading.Thread(target=run_gradio, daemon=True)
1356
-
1357
- api_thread.start()
1358
- print("βœ… FastAPI server started on port 8000")
1359
-
1360
- ui_thread.start()
1361
- print("βœ… Gradio server started on port 7860")
1362
-
1363
- # Keep the main thread alive
1364
- try:
1365
- while True:
1366
- time.sleep(1)
1367
- except KeyboardInterrupt:
1368
- print("πŸ›‘ Shutting down servers...")
 
51
  COMPLETED = "completed"
52
  FAILED = "failed"
53
 
54
+ # Simple Story scene model
55
  class StoryScene(BaseModel):
56
  visual: str
57
  text: str
 
 
58
 
59
  class CharacterDescription(BaseModel):
60
  name: str
61
  description: str
 
 
62
 
63
  class StorybookRequest(BaseModel):
64
  story_title: str
 
67
  model_choice: str = "dreamshaper-8"
68
  style: str = "childrens_book"
69
  callback_url: Optional[str] = None
70
+ consistency_seed: Optional[int] = None
71
 
72
  class JobStatusResponse(BaseModel):
73
  job_id: str
 
87
  "sd-2.1": "stabilityai/stable-diffusion-2-1",
88
  }
89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  # GLOBAL STORAGE
91
  job_storage = {}
92
  model_cache = {}
 
139
  load_model("dreamshaper-8")
140
  print("βœ… Model loaded and ready!")
141
 
142
+ # SIMPLE PROMPT ENGINEERING - USE PURE PROMPTS ONLY
143
+ def enhance_prompt_simple(scene_visual, style="childrens_book"):
144
+ """Simple prompt enhancement - uses only the provided visual prompt with style"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
 
146
  # Style templates
147
  style_templates = {
 
153
 
154
  style_prompt = style_templates.get(style, style_templates["childrens_book"])
155
 
156
+ # Use only the provided visual prompt with style
157
+ enhanced_prompt = f"{style_prompt}, {scene_visual}"
 
158
 
159
+ # Basic negative prompt for quality
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
  negative_prompt = (
 
 
161
  "blurry, low quality, bad anatomy, deformed characters, "
162
+ "wrong proportions, mismatched features"
163
  )
164
 
165
  return enhanced_prompt, negative_prompt
166
 
167
+ def generate_image_simple(prompt, model_choice, style, scene_number, consistency_seed=None):
168
+ """Generate image using pure prompts only"""
 
 
 
 
 
 
 
 
 
169
 
170
+ # Enhance prompt with simple style addition
171
+ enhanced_prompt, negative_prompt = enhance_prompt_simple(prompt, style)
 
 
 
172
 
173
+ # Use seed if provided
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174
  if consistency_seed:
175
+ scene_seed = consistency_seed + scene_number
176
  else:
177
+ scene_seed = random.randint(1000, 9999)
 
 
 
178
 
179
  try:
180
  pipe = load_model(model_choice)
 
182
  image = pipe(
183
  prompt=enhanced_prompt,
184
  negative_prompt=negative_prompt,
185
+ num_inference_steps=35,
186
+ guidance_scale=7.5,
187
  width=768,
188
+ height=1024, # Portrait for better full-body
189
  generator=torch.Generator(device="cpu").manual_seed(scene_seed)
190
  ).images[0]
191
 
192
+ print(f"βœ… Generated image for scene {scene_number}")
 
193
  print(f"🌱 Seed used: {scene_seed}")
194
+ print(f"πŸ“ Pure prompt used: {prompt}")
195
 
196
  return image
197
 
198
  except Exception as e:
199
+ print(f"❌ Generation failed: {str(e)}")
200
  raise
201
 
 
 
 
 
 
 
 
 
 
202
  # LOCAL FILE MANAGEMENT FUNCTIONS
203
  def save_image_to_local(image, prompt, style="test"):
204
  """Save image to local persistent storage"""
 
304
  'subfolder': f'stories/{story_title}'
305
  }
306
 
307
+ # Create session with retry strategy
308
  session = requests.Session()
309
  retry_strategy = Retry(
310
+ total=3,
311
+ status_forcelist=[429, 500, 502, 503, 504],
312
+ allowed_methods=["POST"],
313
+ backoff_factor=1
314
  )
315
  adapter = HTTPAdapter(max_retries=retry_strategy)
316
  session.mount("http://", adapter)
 
354
  print(f"πŸ”§ Connection test failed: {e}")
355
  return False
356
 
 
357
  # JOB MANAGEMENT FUNCTIONS
358
  def create_job(story_request: StorybookRequest) -> str:
359
  job_id = str(uuid.uuid4())
360
 
 
 
 
 
361
  job_storage[job_id] = {
362
  "status": JobStatus.PENDING,
363
  "progress": 0,
 
366
  "result": None,
367
  "created_at": time.time(),
368
  "updated_at": time.time(),
369
+ "pages": []
 
 
370
  }
371
 
372
  print(f"πŸ“ Created job {job_id} for story: {story_request.story_title}")
373
+ print(f"πŸ“„ Scenes to generate: {len(story_request.scenes)}")
374
 
375
  return job_id
376
 
 
396
  try:
397
  callback_url = request_data["callback_url"]
398
 
399
+ # Simple callback data
400
  callback_data = {
401
  "job_id": job_id,
402
  "status": status.value,
 
404
  "message": message,
405
  "story_title": request_data["story_title"],
406
  "total_scenes": len(request_data["scenes"]),
 
407
  "timestamp": time.time(),
408
+ "source": "huggingface-storybook-generator"
 
409
  }
410
 
411
  # Add result data for completed jobs
 
414
  "total_pages": result.get("total_pages", 0),
415
  "generation_time": result.get("generation_time", 0),
416
  "oci_bucket_url": result.get("oci_bucket_url", ""),
417
+ "pages_generated": result.get("generated_pages", 0)
 
418
  }
419
 
 
 
 
 
 
 
 
 
 
 
 
 
 
420
  headers = {
421
  'Content-Type': 'application/json',
422
  'User-Agent': 'Storybook-Generator/1.0'
 
453
 
454
  return "Unknown"
455
 
456
+ # SIMPLE BACKGROUND TASK - USES PURE PROMPTS ONLY
457
  def generate_storybook_background(job_id: str):
458
+ """Background task to generate complete storybook using pure prompts only"""
459
  try:
460
+ # Test OCI connection first
461
+ print("πŸ”§ Testing OCI API connection...")
462
+ oci_connected = test_oci_connection()
463
+ if not oci_connected:
464
+ print("⚠️ OCI API connection test failed - will use local fallback")
465
+
466
  job_data = job_storage[job_id]
467
  story_request_data = job_data["request"]
468
  story_request = StorybookRequest(**story_request_data)
 
469
 
470
+ print(f"🎬 Starting storybook generation for job {job_id}")
471
  print(f"πŸ“– Story: {story_request.story_title}")
 
472
  print(f"πŸ“„ Scenes: {len(story_request.scenes)}")
473
+ print(f"🎨 Style: {story_request.style}")
474
  print(f"🌱 Consistency seed: {story_request.consistency_seed}")
475
 
476
+ update_job_status(job_id, JobStatus.PROCESSING, 5, "Starting storybook generation with pure prompts...")
 
 
 
 
477
 
478
  total_scenes = len(story_request.scenes)
479
  generated_pages = []
 
482
  for i, scene in enumerate(story_request.scenes):
483
  progress = 5 + int((i / total_scenes) * 90)
484
 
 
 
 
 
 
 
 
 
 
485
  update_job_status(
486
  job_id,
487
  JobStatus.PROCESSING,
488
  progress,
489
+ f"Generating page {i+1}/{total_scenes}: {scene.visual[:50]}..."
490
  )
491
 
492
  try:
493
+ print(f"πŸ–ΌοΈ Generating page {i+1}")
494
+ print(f"πŸ“ Pure prompt: {scene.visual}")
495
 
496
+ # Generate image using pure prompt only
497
+ image = generate_image_simple(
498
  scene.visual,
499
  story_request.model_choice,
500
  story_request.style,
 
 
501
  i + 1,
502
  story_request.consistency_seed
503
  )
504
 
505
+ # Save locally as backup
506
  local_filepath, local_filename = save_image_to_local(image, scene.visual, story_request.style)
507
  print(f"πŸ’Ύ Image saved locally as backup: {local_filename}")
508
 
 
532
  "text_url": text_url,
533
  "text_content": scene.text,
534
  "visual_description": scene.visual,
535
+ "prompt_used": scene.visual, # Store the pure prompt
536
+ "local_backup_path": local_filepath
 
537
  }
538
  generated_pages.append(page_data)
539
 
540
+ print(f"βœ… Page {i+1} completed")
541
 
542
  except Exception as upload_error:
543
  # If OCI upload fails, use local file as fallback
 
546
 
547
  page_data = {
548
  "page_number": i + 1,
549
+ "image_url": f"local://{local_filepath}",
550
  "text_url": f"local://text_content_{i+1}",
551
  "text_content": scene.text,
552
  "visual_description": scene.visual,
553
+ "prompt_used": scene.visual,
 
554
  "local_backup_path": local_filepath,
555
  "upload_error": str(upload_error)
556
  }
 
575
  result = {
576
  "story_title": story_request.story_title,
577
  "total_pages": total_scenes,
 
578
  "generated_pages": len(generated_pages),
579
  "generation_time": round(generation_time, 2),
580
  "folder_path": f"stories/{story_request.story_title}",
581
  "oci_bucket_url": f"https://oci.com/stories/{story_request.story_title}",
582
  "consistency_seed": story_request.consistency_seed,
 
583
  "pages": generated_pages,
584
  "file_structure": {
585
  "images": [f"page_{i+1:03d}.png" for i in range(total_scenes)],
 
592
  }
593
  }
594
 
595
+ status_message = f"πŸŽ‰ Storybook completed! {len(generated_pages)} pages created in {generation_time:.2f}s using pure prompts."
596
  if local_fallback_count > 0:
597
  status_message += f" {local_fallback_count} pages saved locally due to OCI upload issues."
598
 
 
604
  result
605
  )
606
 
607
+ print(f"πŸŽ‰ Storybook generation finished for job {job_id}")
608
  print(f"πŸ“ OCI Uploads: {oci_success_count} successful, {local_fallback_count} local fallbacks")
609
+ print(f"πŸ“ All prompts used exactly as provided from Telegram")
610
 
611
  except Exception as e:
612
+ error_msg = f"Story generation failed: {str(e)}"
613
  print(f"❌ {error_msg}")
614
  update_job_status(job_id, JobStatus.FAILED, 0, error_msg)
615
 
616
  # FASTAPI ENDPOINTS (for n8n)
617
  @app.post("/api/generate-storybook")
618
  async def generate_storybook(request: dict, background_tasks: BackgroundTasks):
619
+ """Main endpoint for n8n integration - generates complete storybook using pure prompts"""
620
  try:
621
  print(f"πŸ“₯ Received n8n request for story: {request.get('story_title', 'Unknown')}")
622
 
 
625
  request['consistency_seed'] = random.randint(1000, 9999)
626
  print(f"🌱 Generated consistency seed: {request['consistency_seed']}")
627
 
 
 
 
 
 
 
 
 
 
628
  # Convert to Pydantic model
629
  story_request = StorybookRequest(**request)
630
 
 
635
  # Create job immediately
636
  job_id = create_job(story_request)
637
 
638
+ # Start background processing
639
  background_tasks.add_task(generate_storybook_background, job_id)
640
 
641
  # Immediate response for n8n
642
  response_data = {
643
  "status": "success",
644
+ "message": "Storybook generation with pure prompts started successfully",
645
  "job_id": job_id,
646
  "story_title": story_request.story_title,
647
  "total_scenes": len(story_request.scenes),
 
 
648
  "consistency_seed": story_request.consistency_seed,
649
  "callback_url": story_request.callback_url,
650
  "estimated_time_seconds": len(story_request.scenes) * 35,
651
  "timestamp": datetime.now().isoformat()
652
  }
653
 
654
+ print(f"βœ… Job {job_id} started with pure prompts for: {story_request.story_title}")
655
 
656
  return response_data
657
 
 
686
  "timestamp": datetime.now().isoformat(),
687
  "active_jobs": len(job_storage),
688
  "models_loaded": list(model_cache.keys()),
 
689
  "oci_api_connected": OCI_API_BASE_URL
690
  }
691
 
 
705
  except Exception as e:
706
  return {"status": "error", "message": str(e)}
707
 
708
+ # SIMPLE GRADIO INTERFACE
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
709
  def create_gradio_interface():
710
+ """Create simple Gradio interface for testing"""
711
 
712
+ def generate_test_image_simple(prompt, model_choice, style_choice):
713
+ """Generate a single image using pure prompt only"""
714
  try:
715
  if not prompt.strip():
716
  return None, "❌ Please enter a prompt", None
717
 
718
+ print(f"🎨 Generating test image with pure prompt: {prompt}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
719
 
720
+ # Generate the image using pure prompt
721
+ image = generate_image_simple(
722
  prompt,
723
  model_choice,
724
  style_choice,
 
 
725
  1
726
  )
727
 
728
  # Save to local storage
729
  filepath, filename = save_image_to_local(image, prompt, style_choice)
730
 
 
 
731
  status_msg = f"""βœ… Success! Generated: {prompt}
732
 
 
 
 
 
733
  πŸ“ **Local file:** {filename if filename else 'Not saved'}"""
734
 
735
  return image, status_msg, filepath
 
739
  print(error_msg)
740
  return None, error_msg, None
741
 
742
+ with gr.Blocks(title="Simple Image Generator", theme="soft") as demo:
743
+ gr.Markdown("# 🎨 Simple Image Generator")
744
+ gr.Markdown("Generate images using **pure prompts only** - no automatic enhancements")
745
 
746
  # Storage info display
747
  storage_info = gr.Textbox(
 
769
  style_dropdown = gr.Dropdown(
770
  label="Art Style",
771
  choices=["childrens_book", "realistic", "fantasy", "anime"],
772
+ value="anime"
 
 
 
 
 
 
 
 
773
  )
774
 
775
  prompt_input = gr.Textbox(
776
+ label="Pure Prompt",
777
+ placeholder="Enter your exact prompt...",
778
  lines=3
779
  )
780
 
781
+ generate_btn = gr.Button("✨ Generate Image", variant="primary")
782
 
783
  # Current image management
784
  current_file_path = gr.State()
 
790
  **For complete storybooks (OCI bucket):**
791
  - Endpoint: `POST /api/generate-storybook`
792
  - Input: `story_title`, `scenes[]`, `characters[]`
793
+ - Output: Uses pure prompts only from your script
794
  """)
795
 
796
  with gr.Column(scale=2):
797
  image_output = gr.Image(label="Generated Image", height=500, show_download_button=True)
798
  status_output = gr.Textbox(label="Status", interactive=False, lines=4)
799
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
800
  # Local file management section
801
  with gr.Accordion("πŸ“ Manage Local Test Images", open=True):
802
  gr.Markdown("### Locally Saved Images")
 
815
 
816
  clear_status = gr.Textbox(label="Clear Status", interactive=False)
817
 
818
+ def delete_current_image(filepath):
819
+ """Delete the currently displayed image"""
820
+ if not filepath:
821
+ return "❌ No image to delete", None, None, refresh_local_images()
822
+
823
+ success, message = delete_local_image(filepath)
824
+ updated_files = refresh_local_images()
825
+
826
+ if success:
827
+ status_msg = f"βœ… {message}"
828
+ return status_msg, None, "Image deleted successfully!", updated_files
829
+ else:
830
+ return f"❌ {message}", None, "Delete failed", updated_files
831
+
832
+ def clear_all_images():
833
+ """Delete all local images"""
834
+ try:
835
+ storage_info = get_local_storage_info()
836
+ deleted_count = 0
837
+
838
+ if "images" in storage_info:
839
+ for image_info in storage_info["images"]:
840
+ success, _ = delete_local_image(image_info["path"])
841
+ if success:
842
+ deleted_count += 1
843
+
844
+ updated_files = refresh_local_images()
845
+ return f"βœ… Deleted {deleted_count} images", updated_files
846
+ except Exception as e:
847
+ return f"❌ Error: {str(e)}", refresh_local_images()
848
 
849
  # Connect buttons to functions
850
  generate_btn.click(
851
+ fn=generate_test_image_simple,
852
+ inputs=[prompt_input, model_dropdown, style_dropdown],
853
  outputs=[image_output, status_output, current_file_path]
854
  ).then(
855
  fn=refresh_local_images,
 
884
  outputs=storage_info
885
  )
886
 
 
 
 
 
 
 
887
  # Initialize on load
888
  demo.load(fn=refresh_local_images, outputs=file_gallery)
889
  demo.load(fn=update_storage_info, outputs=storage_info)
890
 
891
  return demo
892
 
893
+ # Create simple Gradio app
894
  demo = create_gradio_interface()
895
 
896
+ # Simple root endpoint
897
  @app.get("/")
898
  async def root():
899
  return {
900
+ "message": "Simple Storybook Generator API is running!",
901
  "api_endpoints": {
902
  "health_check": "GET /api/health",
903
  "generate_storybook": "POST /api/generate-storybook",
 
905
  "local_images": "GET /api/local-images"
906
  },
907
  "features": {
908
+ "pure_prompts": "βœ… Enabled - No automatic enhancements",
 
 
909
  "n8n_integration": "βœ… Enabled"
910
  },
911
+ "web_interface": "GET /ui"
 
912
  }
913
 
914
  # Add a simple test endpoint
 
916
  async def test_endpoint():
917
  return {
918
  "status": "success",
919
+ "message": "API with pure prompts is working correctly",
920
+ "pure_prompts": "βœ… Enabled - Using exact prompts from Telegram",
 
921
  "timestamp": datetime.now().isoformat()
922
  }
923
 
 
936
  print("πŸš€ Running on Hugging Face Spaces - Integrated Mode")
937
  print("πŸ“š API endpoints available at: /api/*")
938
  print("🎨 Web interface available at: /ui")
939
+ print("πŸ“ PURE PROMPTS enabled - no automatic enhancements")
 
940
 
941
  # Mount Gradio without reassigning app
942
  gr.mount_gradio_app(app, demo, path="/ui")
 
953
  print("πŸš€ Running locally - Separate API and UI servers")
954
  print("πŸ“š API endpoints: http://localhost:8000/api/*")
955
  print("🎨 Web interface: http://localhost:7860/ui")
956
+ print("πŸ“ PURE PROMPTS enabled - no automatic enhancements")
957
 
958
  def run_fastapi():
959
  """Run FastAPI on port 8000 for API calls"""
 
969
  """Run Gradio on port 7860 for web interface"""
970
  demo.launch(
971
  server_name="0.0.0.0",
972
+