yukee1992 commited on
Commit
f9587af
Β·
verified Β·
1 Parent(s): dbad725

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -15
app.py CHANGED
@@ -60,7 +60,7 @@ MODEL_CHOICES = {
60
  # GLOBAL MODEL CACHE - Load once, reuse forever
61
  model_cache = {}
62
  current_model_name = None
63
- pipe = None
64
 
65
  # Character consistency tracking
66
  character_descriptions = {}
@@ -68,12 +68,14 @@ character_seeds = {} # Store seeds for consistent character generation
68
 
69
  def load_model(model_name="dreamshaper-8"):
70
  """Load model into global cache - runs only once per model"""
71
- global model_cache, current_model_name, pipe
72
 
73
  # Return cached model if already loaded
74
  if model_name in model_cache:
75
  print(f"βœ… Using cached model: {model_name}")
76
- return model_cache[model_name]
 
 
77
 
78
  print(f"πŸ”„ Loading model for the first time: {model_name}")
79
  try:
@@ -92,6 +94,7 @@ def load_model(model_name="dreamshaper-8"):
92
 
93
  # Cache the model for future use
94
  model_cache[model_name] = pipe
 
95
  current_model_name = model_name
96
 
97
  print(f"βœ… Model loaded and cached: {model_name}")
@@ -100,16 +103,19 @@ def load_model(model_name="dreamshaper-8"):
100
  except Exception as e:
101
  print(f"❌ Model loading failed: {e}")
102
  # Fallback to SD 1.5
103
- return StableDiffusionPipeline.from_pretrained(
104
  "runwayml/stable-diffusion-v1-5",
105
  torch_dtype=torch.float32,
106
  safety_checker=None,
107
  requires_safety_checker=False
108
  ).to("cpu")
 
 
 
109
 
110
  # Load the default model once at startup
111
  print("πŸš€ Initializing Storybook Generator...")
112
- pipe = load_model("dreamshaper-8")
113
  print("βœ… Default model loaded and ready!")
114
 
115
  # PROFESSIONAL PROMPT ENGINEERING
@@ -220,13 +226,13 @@ def get_character_seed(story_title, character_name):
220
 
221
  def generate_storybook_page(scene_visual, story_title, sequence_number, scene_text, characters, model_choice="dreamshaper-8", style="childrens_book"):
222
  """Generate a storybook page with character consistency"""
223
- global pipe, current_model_name
224
 
225
  try:
226
- # Switch model if different from current
227
  if model_choice != current_model_name:
228
  print(f"πŸ”„ Switching to model: {model_choice}")
229
- pipe = load_model(model_choice)
230
 
231
  # ENHANCE PROMPT WITH CHARACTER CONTEXT
232
  enhanced_visual = enhance_with_character_context(scene_visual, story_title, characters)
@@ -247,11 +253,14 @@ def generate_storybook_page(scene_visual, story_title, sequence_number, scene_te
247
  # Use seed from main character for consistency
248
  main_char_seed = get_character_seed(story_title, characters[0].name)
249
  generator.manual_seed(main_char_seed)
 
250
  else:
251
- generator.manual_seed(int(time.time()))
 
 
252
 
253
- # Generate high-quality image
254
- image = pipe(
255
  prompt=enhanced_prompt,
256
  negative_prompt=negative_prompt,
257
  num_inference_steps=30,
@@ -271,7 +280,7 @@ def generate_storybook_page(scene_visual, story_title, sequence_number, scene_te
271
 
272
  def batch_generate_complete_storybook(story_title, scenes_data, characters, model_choice="dreamshaper-8", style="childrens_book"):
273
  """Generate complete storybook with images and text - MODEL LOADS ONLY ONCE"""
274
- global character_descriptions
275
 
276
  results = []
277
  status_messages = []
@@ -286,9 +295,9 @@ def batch_generate_complete_storybook(story_title, scenes_data, characters, mode
286
  character_descriptions[story_title] = characters
287
  print(f"βœ… Character context stored for {story_title}")
288
 
289
- # Load model once at the beginning
290
- global pipe
291
- pipe = load_model(model_choice)
292
 
293
  start_time = time.time()
294
 
@@ -324,6 +333,8 @@ async def api_generate_storybook(request: StorybookRequest):
324
  for char in request.characters:
325
  print(f" - {char.name}: {char.description[:50]}...")
326
 
 
 
327
  # Convert to scene data format
328
  scenes_data = [{"visual": scene.visual, "text": scene.text} for scene in request.scenes]
329
 
@@ -336,12 +347,15 @@ async def api_generate_storybook(request: StorybookRequest):
336
  request.style
337
  )
338
 
 
 
339
  return {
340
  "status": "success",
341
  "story_title": request.story_title,
342
  "total_pages": len(request.scenes),
343
  "characters_used": len(request.characters),
344
  "generated_pages": len(results),
 
345
  "message": status,
346
  "folder_path": f"storybook-library/stories/{request.story_title.replace(' ', '_')}/",
347
  "pages": [
@@ -379,6 +393,11 @@ def generate_single_page(prompt, story_title, scene_text, model_choice, style):
379
  if not prompt or not story_title:
380
  return None, "❌ Please enter both scene description and story title"
381
 
 
 
 
 
 
382
  image, status = generate_storybook_page(
383
  prompt, story_title, 1, scene_text or "", [], model_choice, style
384
  )
 
60
  # GLOBAL MODEL CACHE - Load once, reuse forever
61
  model_cache = {}
62
  current_model_name = None
63
+ current_pipe = None
64
 
65
  # Character consistency tracking
66
  character_descriptions = {}
 
68
 
69
  def load_model(model_name="dreamshaper-8"):
70
  """Load model into global cache - runs only once per model"""
71
+ global model_cache, current_model_name, current_pipe
72
 
73
  # Return cached model if already loaded
74
  if model_name in model_cache:
75
  print(f"βœ… Using cached model: {model_name}")
76
+ current_pipe = model_cache[model_name]
77
+ current_model_name = model_name
78
+ return current_pipe
79
 
80
  print(f"πŸ”„ Loading model for the first time: {model_name}")
81
  try:
 
94
 
95
  # Cache the model for future use
96
  model_cache[model_name] = pipe
97
+ current_pipe = pipe
98
  current_model_name = model_name
99
 
100
  print(f"βœ… Model loaded and cached: {model_name}")
 
103
  except Exception as e:
104
  print(f"❌ Model loading failed: {e}")
105
  # Fallback to SD 1.5
106
+ pipe = StableDiffusionPipeline.from_pretrained(
107
  "runwayml/stable-diffusion-v1-5",
108
  torch_dtype=torch.float32,
109
  safety_checker=None,
110
  requires_safety_checker=False
111
  ).to("cpu")
112
+ model_cache[model_name] = pipe
113
+ current_pipe = pipe
114
+ return pipe
115
 
116
  # Load the default model once at startup
117
  print("πŸš€ Initializing Storybook Generator...")
118
+ current_pipe = load_model("dreamshaper-8")
119
  print("βœ… Default model loaded and ready!")
120
 
121
  # PROFESSIONAL PROMPT ENGINEERING
 
226
 
227
  def generate_storybook_page(scene_visual, story_title, sequence_number, scene_text, characters, model_choice="dreamshaper-8", style="childrens_book"):
228
  """Generate a storybook page with character consistency"""
229
+ global current_pipe, current_model_name
230
 
231
  try:
232
+ # Switch model if different from current - BUT DON'T RELOAD UNLESS NECESSARY
233
  if model_choice != current_model_name:
234
  print(f"πŸ”„ Switching to model: {model_choice}")
235
+ current_pipe = load_model(model_choice) # This uses cached version if available
236
 
237
  # ENHANCE PROMPT WITH CHARACTER CONTEXT
238
  enhanced_visual = enhance_with_character_context(scene_visual, story_title, characters)
 
253
  # Use seed from main character for consistency
254
  main_char_seed = get_character_seed(story_title, characters[0].name)
255
  generator.manual_seed(main_char_seed)
256
+ print(f"🌱 Using seed {main_char_seed} for character consistency")
257
  else:
258
+ seed = int(time.time())
259
+ generator.manual_seed(seed)
260
+ print(f"🌱 Using timestamp seed {seed}")
261
 
262
+ # Generate high-quality image - USE THE GLOBAL current_pipe
263
+ image = current_pipe(
264
  prompt=enhanced_prompt,
265
  negative_prompt=negative_prompt,
266
  num_inference_steps=30,
 
280
 
281
  def batch_generate_complete_storybook(story_title, scenes_data, characters, model_choice="dreamshaper-8", style="childrens_book"):
282
  """Generate complete storybook with images and text - MODEL LOADS ONLY ONCE"""
283
+ global character_descriptions, current_pipe
284
 
285
  results = []
286
  status_messages = []
 
295
  character_descriptions[story_title] = characters
296
  print(f"βœ… Character context stored for {story_title}")
297
 
298
+ # Load model once at the beginning - THIS IS THE KEY FIX
299
+ print(f"πŸ”§ Loading model for this storybook...")
300
+ current_pipe = load_model(model_choice)
301
 
302
  start_time = time.time()
303
 
 
333
  for char in request.characters:
334
  print(f" - {char.name}: {char.description[:50]}...")
335
 
336
+ start_time = time.time()
337
+
338
  # Convert to scene data format
339
  scenes_data = [{"visual": scene.visual, "text": scene.text} for scene in request.scenes]
340
 
 
347
  request.style
348
  )
349
 
350
+ generation_time = time.time() - start_time
351
+
352
  return {
353
  "status": "success",
354
  "story_title": request.story_title,
355
  "total_pages": len(request.scenes),
356
  "characters_used": len(request.characters),
357
  "generated_pages": len(results),
358
+ "generation_time": round(generation_time, 2),
359
  "message": status,
360
  "folder_path": f"storybook-library/stories/{request.story_title.replace(' ', '_')}/",
361
  "pages": [
 
393
  if not prompt or not story_title:
394
  return None, "❌ Please enter both scene description and story title"
395
 
396
+ # Ensure model is loaded
397
+ global current_pipe
398
+ if current_model_name != model_choice:
399
+ current_pipe = load_model(model_choice)
400
+
401
  image, status = generate_storybook_page(
402
  prompt, story_title, 1, scene_text or "", [], model_choice, style
403
  )