yukee1992 commited on
Commit
b272d86
Β·
verified Β·
1 Parent(s): 345b158

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +61 -18
app.py CHANGED
@@ -17,7 +17,7 @@ from pydantic import BaseModel
17
  import random
18
 
19
  # External OCI API URL
20
- OCI_API_BASE_URL = "https://yukee1992-oci-story-book.hf.space"
21
 
22
  # Initialize FastAPI app
23
  app = FastAPI(title="Storybook Generator API")
@@ -52,10 +52,21 @@ MODEL_CHOICES = {
52
  "sd-2.1": "stabilityai/stable-diffusion-2-1",
53
  }
54
 
55
- # Initialize the Stable Diffusion model
 
 
 
 
56
  def load_model(model_name="dreamshaper-8"):
57
- """Load and return a high-quality Stable Diffusion model"""
58
- print(f"πŸ”„ Loading {model_name} model...")
 
 
 
 
 
 
 
59
  try:
60
  model_id = MODEL_CHOICES.get(model_name, "lykon/dreamshaper-8")
61
 
@@ -70,8 +81,13 @@ def load_model(model_name="dreamshaper-8"):
70
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
71
  pipe = pipe.to("cpu")
72
 
73
- print(f"βœ… {model_name} loaded successfully!")
 
 
 
 
74
  return pipe
 
75
  except Exception as e:
76
  print(f"❌ Model loading failed: {e}")
77
  # Fallback to SD 1.5
@@ -82,8 +98,10 @@ def load_model(model_name="dreamshaper-8"):
82
  requires_safety_checker=False
83
  ).to("cpu")
84
 
85
- # Load the model once at startup
86
- pipe = load_model()
 
 
87
 
88
  # PROFESSIONAL PROMPT ENGINEERING
89
  def enhance_prompt(prompt, style="childrens_book"):
@@ -113,24 +131,19 @@ def enhance_prompt(prompt, style="childrens_book"):
113
  ]
114
  }
115
 
116
- # Choose style template
117
  templates = style_templates.get(style, style_templates["childrens_book"])
118
- style_prompt = templates[0] # Use the first template
119
 
120
- # Enhanced prompt construction
121
  enhanced = f"{style_prompt}, {prompt}"
122
 
123
- # Add quality boosters
124
  quality_boosters = [
125
  "intricate details", "beautiful composition", "perfect lighting",
126
  "professional artwork", "award winning", "trending on artstation"
127
  ]
128
 
129
- # Add 2-3 random quality boosters
130
  boosters = random.sample(quality_boosters, 2)
131
  enhanced += ", " + ", ".join(boosters)
132
 
133
- # Negative prompt to avoid bad quality
134
  negative_prompt = (
135
  "blurry, low quality, low resolution, ugly, deformed, poorly drawn, "
136
  "bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, "
@@ -178,7 +191,14 @@ def save_complete_storybook_page(image, story_title, sequence_number, scene_text
178
 
179
  def generate_storybook_page(scene_visual, story_title, sequence_number, scene_text, model_choice="dreamshaper-8", style="childrens_book"):
180
  """Generate a storybook page with both image and text"""
 
 
181
  try:
 
 
 
 
 
182
  # Enhanced prompt for better quality
183
  enhanced_prompt, negative_prompt = enhance_prompt(scene_visual, style)
184
 
@@ -204,14 +224,25 @@ def generate_storybook_page(scene_visual, story_title, sequence_number, scene_te
204
  return None, f"❌ Generation failed: {str(e)}"
205
 
206
  def batch_generate_complete_storybook(story_title, scenes_data, model_choice="dreamshaper-8", style="childrens_book"):
207
- """Generate complete storybook with images and text"""
208
  results = []
209
  status_messages = []
210
 
 
 
 
 
 
 
 
 
 
 
211
  for i, scene_data in enumerate(scenes_data, 1):
212
  scene_visual = scene_data.get('visual', '')
213
  scene_text = scene_data.get('text', '')
214
 
 
215
  image, status = generate_storybook_page(
216
  scene_visual, story_title, i, scene_text, model_choice, style
217
  )
@@ -220,20 +251,26 @@ def batch_generate_complete_storybook(story_title, scenes_data, model_choice="dr
220
  results.append((f"Page {i}", image, scene_text))
221
  status_messages.append(f"Page {i}: {status}")
222
 
 
 
 
 
223
  return results, "\n".join(status_messages)
224
 
225
  # FastAPI endpoint for n8n
226
  @app.post("/api/generate-storybook")
227
  async def api_generate_storybook(request: StorybookRequest):
228
- """API endpoint for n8n automation"""
229
  try:
230
  print(f"πŸ“š Received storybook request: {request.story_title}")
231
  print(f"πŸ“– Pages to generate: {len(request.scenes)}")
 
 
232
 
233
  # Convert to scene data format
234
  scenes_data = [{"visual": scene.visual, "text": scene.text} for scene in request.scenes]
235
 
236
- # Generate storybook
237
  results, status = batch_generate_complete_storybook(
238
  request.story_title,
239
  scenes_data,
@@ -258,7 +295,9 @@ async def api_generate_storybook(request: StorybookRequest):
258
  }
259
 
260
  except Exception as e:
261
- raise HTTPException(status_code=500, detail=f"Storybook generation failed: {str(e)}")
 
 
262
 
263
  # Health check endpoint
264
  @app.get("/api/health")
@@ -267,9 +306,13 @@ async def health_check():
267
  "status": "healthy",
268
  "service": "Storybook Generator API",
269
  "timestamp": datetime.now().isoformat(),
270
- "models_available": list(MODEL_CHOICES.keys())
 
 
271
  }
272
 
 
 
273
  # Gradio Interface Functions
274
  def generate_single_page(prompt, story_title, scene_text, model_choice, style):
275
  """Generate a single page for Gradio interface"""
 
17
  import random
18
 
19
  # External OCI API URL
20
+ OCI_API_BASE_URL = "https://yukee1992-oci-video-storage.hf.space"
21
 
22
  # Initialize FastAPI app
23
  app = FastAPI(title="Storybook Generator API")
 
52
  "sd-2.1": "stabilityai/stable-diffusion-2-1",
53
  }
54
 
55
+ # GLOBAL MODEL CACHE - Load once, reuse forever
56
+ model_cache = {}
57
+ current_model_name = None
58
+ pipe = None
59
+
60
  def load_model(model_name="dreamshaper-8"):
61
+ """Load model into global cache - runs only once per model"""
62
+ global model_cache, current_model_name, pipe
63
+
64
+ # Return cached model if already loaded
65
+ if model_name in model_cache:
66
+ print(f"βœ… Using cached model: {model_name}")
67
+ return model_cache[model_name]
68
+
69
+ print(f"πŸ”„ Loading model for the first time: {model_name}")
70
  try:
71
  model_id = MODEL_CHOICES.get(model_name, "lykon/dreamshaper-8")
72
 
 
81
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
82
  pipe = pipe.to("cpu")
83
 
84
+ # Cache the model for future use
85
+ model_cache[model_name] = pipe
86
+ current_model_name = model_name
87
+
88
+ print(f"βœ… Model loaded and cached: {model_name}")
89
  return pipe
90
+
91
  except Exception as e:
92
  print(f"❌ Model loading failed: {e}")
93
  # Fallback to SD 1.5
 
98
  requires_safety_checker=False
99
  ).to("cpu")
100
 
101
+ # Load the default model once at startup
102
+ print("πŸš€ Initializing Storybook Generator...")
103
+ pipe = load_model("dreamshaper-8")
104
+ print("βœ… Default model loaded and ready!")
105
 
106
  # PROFESSIONAL PROMPT ENGINEERING
107
  def enhance_prompt(prompt, style="childrens_book"):
 
131
  ]
132
  }
133
 
 
134
  templates = style_templates.get(style, style_templates["childrens_book"])
135
+ style_prompt = templates[0]
136
 
 
137
  enhanced = f"{style_prompt}, {prompt}"
138
 
 
139
  quality_boosters = [
140
  "intricate details", "beautiful composition", "perfect lighting",
141
  "professional artwork", "award winning", "trending on artstation"
142
  ]
143
 
 
144
  boosters = random.sample(quality_boosters, 2)
145
  enhanced += ", " + ", ".join(boosters)
146
 
 
147
  negative_prompt = (
148
  "blurry, low quality, low resolution, ugly, deformed, poorly drawn, "
149
  "bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, "
 
191
 
192
  def generate_storybook_page(scene_visual, story_title, sequence_number, scene_text, model_choice="dreamshaper-8", style="childrens_book"):
193
  """Generate a storybook page with both image and text"""
194
+ global pipe, current_model_name
195
+
196
  try:
197
+ # Switch model if different from current
198
+ if model_choice != current_model_name:
199
+ print(f"πŸ”„ Switching to model: {model_choice}")
200
+ pipe = load_model(model_choice)
201
+
202
  # Enhanced prompt for better quality
203
  enhanced_prompt, negative_prompt = enhance_prompt(scene_visual, style)
204
 
 
224
  return None, f"❌ Generation failed: {str(e)}"
225
 
226
  def batch_generate_complete_storybook(story_title, scenes_data, model_choice="dreamshaper-8", style="childrens_book"):
227
+ """Generate complete storybook with images and text - MODEL LOADS ONLY ONCE"""
228
  results = []
229
  status_messages = []
230
 
231
+ print(f"πŸ“š Starting batch generation for: {story_title}")
232
+ print(f"πŸ“– Total pages: {len(scenes_data)}")
233
+ print(f"🎨 Using model: {model_choice}")
234
+
235
+ # Load model once at the beginning
236
+ global pipe
237
+ pipe = load_model(model_choice)
238
+
239
+ start_time = time.time()
240
+
241
  for i, scene_data in enumerate(scenes_data, 1):
242
  scene_visual = scene_data.get('visual', '')
243
  scene_text = scene_data.get('text', '')
244
 
245
+ print(f"πŸ”„ Generating page {i}/{len(scenes_data)}...")
246
  image, status = generate_storybook_page(
247
  scene_visual, story_title, i, scene_text, model_choice, style
248
  )
 
251
  results.append((f"Page {i}", image, scene_text))
252
  status_messages.append(f"Page {i}: {status}")
253
 
254
+ total_time = time.time() - start_time
255
+ print(f"βœ… Batch generation completed in {total_time:.2f} seconds")
256
+ print(f"πŸ“Š Average: {total_time/len(scenes_data):.2f} seconds per page")
257
+
258
  return results, "\n".join(status_messages)
259
 
260
  # FastAPI endpoint for n8n
261
  @app.post("/api/generate-storybook")
262
  async def api_generate_storybook(request: StorybookRequest):
263
+ """API endpoint for n8n automation - OPTIMIZED"""
264
  try:
265
  print(f"πŸ“š Received storybook request: {request.story_title}")
266
  print(f"πŸ“– Pages to generate: {len(request.scenes)}")
267
+ print(f"🎨 Model: {request.model_choice}")
268
+ print(f"🎨 Style: {request.style}")
269
 
270
  # Convert to scene data format
271
  scenes_data = [{"visual": scene.visual, "text": scene.text} for scene in request.scenes]
272
 
273
+ # Generate storybook (model loads only once)
274
  results, status = batch_generate_complete_storybook(
275
  request.story_title,
276
  scenes_data,
 
295
  }
296
 
297
  except Exception as e:
298
+ error_msg = f"Storybook generation failed: {str(e)}"
299
+ print(f"❌ {error_msg}")
300
+ raise HTTPException(status_code=500, detail=error_msg)
301
 
302
  # Health check endpoint
303
  @app.get("/api/health")
 
306
  "status": "healthy",
307
  "service": "Storybook Generator API",
308
  "timestamp": datetime.now().isoformat(),
309
+ "models_loaded": list(model_cache.keys()),
310
+ "current_model": current_model_name,
311
+ "cached_models_count": len(model_cache)
312
  }
313
 
314
+ # ... (keep the rest of your Gradio interface code the same) ...
315
+
316
  # Gradio Interface Functions
317
  def generate_single_page(prompt, story_title, scene_text, model_choice, style):
318
  """Generate a single page for Gradio interface"""