yukee1992 commited on
Commit
83dc960
·
verified ·
1 Parent(s): 4148061

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +410 -279
app.py CHANGED
@@ -10,11 +10,14 @@ import re
10
  import time
11
  import json
12
  from typing import List, Optional, Dict
13
- from fastapi import FastAPI, HTTPException
14
  from pydantic import BaseModel
15
  import gc
16
  import psutil
17
  import threading
 
 
 
18
 
19
  # External OCI API URL
20
  OCI_API_BASE_URL = "https://yukee1992-oci-story-book.hf.space"
@@ -32,6 +35,13 @@ app.add_middleware(
32
  allow_headers=["*"],
33
  )
34
 
 
 
 
 
 
 
 
35
  # Story scene model
36
  class StoryScene(BaseModel):
37
  visual: str
@@ -47,6 +57,7 @@ class StorybookRequest(BaseModel):
47
  characters: List[CharacterDescription] = []
48
  model_choice: str = "dreamshaper-8"
49
  style: str = "childrens_book"
 
50
 
51
  class StorybookResponse(BaseModel):
52
  status: str
@@ -60,22 +71,35 @@ class StorybookResponse(BaseModel):
60
  pages: List[dict]
61
  request_id: str
62
 
63
- # MODEL SELECTION
 
 
 
 
 
 
 
 
 
64
  MODEL_CHOICES = {
65
- "dreamshaper-8": "lykon/dreamshaper-8",
66
- "realistic-vision": "SG161222/Realistic_Vision_V5.1",
 
 
 
67
  }
68
 
 
 
 
 
 
69
  # GLOBAL MODEL CACHE
70
  model_cache = {}
71
  current_model_name = None
72
  current_pipe = None
73
  model_lock = threading.Lock()
74
 
75
- # Character consistency tracking
76
- character_seeds = {}
77
- active_requests = {}
78
-
79
  def monitor_memory():
80
  try:
81
  process = psutil.Process()
@@ -89,7 +113,7 @@ def cleanup_memory():
89
  torch.cuda.empty_cache()
90
 
91
  def load_model(model_name="dreamshaper-8"):
92
- """Thread-safe model loading"""
93
  global model_cache, current_model_name, current_pipe
94
 
95
  with model_lock:
@@ -98,10 +122,11 @@ def load_model(model_name="dreamshaper-8"):
98
  current_model_name = model_name
99
  return current_pipe
100
 
101
- print(f"🔄 Loading model: {model_name}")
102
  try:
103
  model_id = MODEL_CHOICES.get(model_name, "lykon/dreamshaper-8")
104
 
 
105
  pipe = StableDiffusionPipeline.from_pretrained(
106
  model_id,
107
  torch_dtype=torch.float32,
@@ -109,6 +134,7 @@ def load_model(model_name="dreamshaper-8"):
109
  requires_safety_checker=False
110
  )
111
 
 
112
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
113
  pipe = pipe.to("cpu")
114
 
@@ -116,352 +142,457 @@ def load_model(model_name="dreamshaper-8"):
116
  current_pipe = pipe
117
  current_model_name = model_name
118
 
119
- print(f"✅ Model loaded: {model_name}")
120
  return pipe
121
 
122
  except Exception as e:
123
  print(f"❌ Model loading failed: {e}")
124
- raise
 
 
 
 
 
 
125
 
126
  # Initialize default model
127
- print("🚀 Initializing Storybook Generator...")
128
  load_model("dreamshaper-8")
129
- print("✅ Model loaded and ready!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
 
131
- # PROMPT OPTIMIZATION - PRESERVE FULL DESCRIPTIONS
132
  def optimize_prompt(scene_visual, characters, style="childrens_book", page_number=1):
133
  """
134
- Create a prompt that PRESERVES all visual descriptions while fitting 77 tokens
135
  """
136
- # 1. PRESERVE THE ENTIRE SCENE VISUAL DESCRIPTION (most important)
137
- scene_prompt = scene_visual
138
 
139
- # 2. Extract only ESSENTIAL character features (not full descriptions)
140
  character_essence = ""
141
  if characters:
142
  char_names = []
143
  for char in characters:
144
  char_name = char.get('name', '') if isinstance(char, dict) else getattr(char, 'name', '')
145
- char_names.append(char_name.split()[0]) # Just first name
146
 
147
  character_essence = f" featuring {', '.join(char_names)}"
148
 
149
- # 3. Add style context briefly
150
- style_context = {
151
- "childrens_book": "children's book illustration style",
152
- "realistic": "photorealistic style",
153
- "fantasy": "fantasy art style",
154
- "anime": "anime style"
155
- }.get(style, "children's book illustration style")
156
-
157
- # 4. Build the final prompt - SCENE DESCRIPTION COMES FIRST
158
  continuity = f"Scene {page_number}, " if page_number > 1 else ""
159
- final_prompt = f"{continuity}{scene_prompt}{character_essence}. {style_context}. high quality, detailed"
160
-
161
- # 5. If still too long, prioritize scene description over style
162
- words = final_prompt.split()
163
- if len(words) > 60:
164
- # Keep the scene description intact, trim the end
165
- scene_words = scene_visual.split()
166
- if len(scene_words) > 45:
167
- # If scene itself is too long, keep first 40 words of scene
168
- scene_part = ' '.join(scene_words[:40])
169
- final_prompt = f"{continuity}{scene_part}...{character_essence}. {style_context}"
170
- else:
171
- # Keep entire scene, trim style part
172
- final_prompt = f"{continuity}{scene_visual}{character_essence}. high quality"
173
-
174
- print(f"📝 Final prompt: {final_prompt}")
175
- print(f"📏 Length: {len(final_prompt.split())} words")
176
 
 
177
  return final_prompt
178
 
179
- def enhance_prompt(scene_visual, characters, style="childrens_book", page_number=1):
180
- """Create optimized prompt that preserves visual descriptions"""
181
- main_prompt = optimize_prompt(scene_visual, characters, style, page_number)
182
-
183
- negative_prompt = (
184
- "blurry, low quality, ugly, deformed, bad anatomy, "
185
- "watermark, text, username, multiple people, inconsistent, "
186
- "missing limbs, extra limbs, disfigured, malformed"
187
- )
188
-
189
- return main_prompt, negative_prompt
 
 
 
 
 
 
 
 
 
 
 
 
190
 
191
- def save_complete_storybook_page(image, story_title, sequence_number, scene_text):
 
192
  try:
 
193
  img_bytes = io.BytesIO()
194
  image.save(img_bytes, format='PNG')
195
  img_data = img_bytes.getvalue()
196
 
197
- clean_title = re.sub(r'[^a-zA-Z0-9_\-]', '', story_title.strip().replace(' ', '_'))
198
- image_filename = f"page_{sequence_number:03d}_{clean_title}.png"
199
- text_filename = f"page_{sequence_number:03d}_{clean_title}.txt"
 
 
 
 
200
 
201
- # Save image to OCI
202
- image_api_url = f"{OCI_API_BASE_URL}/api/upload"
203
- files = {'file': (image_filename, img_data, 'image/png')}
204
- data = {'project_id': 'storybook-library', 'subfolder': f'stories/{clean_title}'}
205
- response = requests.post(image_api_url, files=files, data=data, timeout=30)
 
206
 
207
- # Save text to OCI
208
- text_api_url = f"{OCI_API_BASE_URL}/api/upload-text"
209
- text_data = {
210
  'project_id': 'storybook-library',
211
- 'subfolder': f'stories/{clean_title}',
212
- 'filename': text_filename,
213
- 'content': scene_text
214
  }
215
- text_response = requests.post(text_api_url, data=text_data, timeout=30)
216
 
217
- return True, f"✅ Page {sequence_number}: Image & Text saved"
 
 
 
 
 
 
 
 
 
 
 
 
218
 
219
  except Exception as e:
220
- return False, f" Save failed: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
 
222
- def get_character_seed(story_title, character_name, page_number):
223
- if story_title not in character_seeds:
224
- character_seeds[story_title] = {}
 
225
 
226
- seed_key = f"{character_name}_{page_number}"
227
- if seed_key not in character_seeds[story_title]:
228
- base_seed = hash(f"{story_title}_{character_name}") % 1000000
229
- page_variation = (page_number * 13) % 1000
230
- seed_value = (base_seed + page_variation) % 1000000
231
- character_seeds[story_title][seed_key] = seed_value
232
 
233
- return character_seeds[story_title][seed_key]
 
 
 
 
 
 
 
 
234
 
235
- def process_storybook_generation(request_id, request_data):
236
- """Process generation in background and store results"""
 
237
  try:
238
- print(f"🔧 Processing request {request_id} in background...")
239
-
240
- # Load model
241
- load_model(request_data["model_choice"])
242
 
243
- # Convert characters to dict
244
- characters_dict = []
245
- for char in request_data["characters"]:
246
- characters_dict.append({
247
- "name": char["name"],
248
- "description": char["description"]
249
- })
250
 
251
- results = []
252
- status_messages = []
253
  start_time = time.time()
254
 
255
- # Process each page
256
- for i, scene in enumerate(request_data["scenes"], 1):
 
 
 
 
 
 
 
257
  try:
258
- print(f"🔄 Generating page {i}...")
 
 
259
 
260
- enhanced_prompt, negative_prompt = enhance_prompt(
261
- scene["visual"], characters_dict, request_data["style"], i
 
 
 
 
262
  )
263
 
264
- # Get character name for seed
265
- main_char_name = "default"
266
- if characters_dict:
267
- main_char_name = characters_dict[0]["name"]
268
-
269
- # Use consistent seed
270
- generator = torch.Generator(device="cpu")
271
- main_char_seed = get_character_seed(request_data["story_title"], main_char_name, i)
272
- generator.manual_seed(main_char_seed)
273
-
274
- # Generate image
275
- global current_pipe
276
- image = current_pipe(
277
- prompt=enhanced_prompt,
278
- negative_prompt=negative_prompt,
279
- num_inference_steps=25,
280
- guidance_scale=7.0,
281
- width=512,
282
- height=512,
283
- generator=generator
284
- ).images[0]
285
-
286
  # Save to OCI
287
- success, save_status = save_complete_storybook_page(
288
- image, request_data["story_title"], i, scene["text"]
289
- )
290
 
291
- if success:
292
- results.append({"page_number": i, "status": "success"})
293
- status_messages.append(f"Page {i}: {save_status}")
294
- print(f" Page {i} completed")
295
- else:
296
- results.append({"page_number": i, "status": "error", "message": save_status})
297
- status_messages.append(f"Page {i}: {save_status}")
 
298
 
299
- cleanup_memory()
300
 
301
- if i < len(request_data["scenes"]):
302
- time.sleep(1)
303
-
304
  except Exception as e:
305
- error_msg = f"Page {i} failed: {str(e)}"
306
- results.append({"page_number": i, "status": "error", "message": error_msg})
307
- status_messages.append(error_msg)
308
  print(f"❌ {error_msg}")
 
 
309
 
310
- total_time = time.time() - start_time
311
-
312
- # Store results
313
- active_requests[request_id] = {
314
- "status": "completed",
315
- "results": results,
316
- "message": "\n".join(status_messages),
317
- "generation_time": total_time,
318
- "completed_at": datetime.now().isoformat()
 
319
  }
320
 
321
- print(f"✅ Request {request_id} completed in {total_time:.2f} seconds")
 
 
 
 
 
 
 
 
322
 
323
  except Exception as e:
324
- active_requests[request_id] = {
325
- "status": "error",
326
- "message": f"Processing failed: {str(e)}"
327
- }
328
- print(f"❌ Request {request_id} failed: {e}")
329
 
330
- # FastAPI endpoint - IMMEDIATE RESPONSE
331
- @app.post("/api/generate-storybook", response_model=StorybookResponse)
332
- async def api_generate_storybook(request: StorybookRequest):
333
- """API endpoint that returns immediately"""
334
  try:
335
- print(f"📚 Received request: {request.story_title}")
336
- print(f"📖 Pages: {len(request.scenes)}")
337
-
338
- # Create request ID
339
- request_id = f"{request.story_title}_{int(time.time())}"
340
-
341
- # Convert to dict for background processing
342
- request_data = {
343
- "story_title": request.story_title,
344
- "scenes": [{"visual": scene.visual, "text": scene.text} for scene in request.scenes],
345
- "characters": [{"name": char.name, "description": char.description} for char in request.characters],
346
- "model_choice": request.model_choice,
347
- "style": request.style
348
- }
349
-
350
- # Store initial request state
351
- active_requests[request_id] = {
352
- "status": "processing",
353
- "started_at": datetime.now().isoformat(),
354
- "total_pages": len(request.scenes)
355
- }
356
 
357
- # Start background processing in a thread
358
- import threading
359
- thread = threading.Thread(
360
- target=process_storybook_generation,
361
- args=(request_id, request_data)
362
- )
363
- thread.daemon = True
364
- thread.start()
365
 
366
- # IMMEDIATE RESPONSE to n8n
367
- response_data = {
368
- "status": "processing",
 
 
369
  "story_title": request.story_title,
370
- "total_pages": len(request.scenes),
371
- "characters_used": len(request.characters),
372
- "generated_pages": 0,
373
- "generation_time": 0,
374
- "message": f"Generation started for {len(request.scenes)} pages. Request ID: {request_id}",
375
- "folder_path": f"storybook-library/stories/{request.story_title.replace(' ', '_')}/",
376
- "pages": [
377
- {
378
- "page_number": i+1,
379
- "image_file": f"page_{i+1:03d}_{request.story_title.replace(' ', '_')}.png",
380
- "text_file": f"page_{i+1:03d}_{request.story_title.replace(' ', '_')}.txt"
381
- } for i in range(len(request.scenes))
382
- ],
383
- "request_id": request_id
384
  }
385
 
386
- return response_data
387
-
388
  except Exception as e:
389
- error_msg = f"Request failed: {str(e)}"
390
- print(f"❌ {error_msg}")
391
- raise HTTPException(status_code=500, detail=error_msg)
392
 
393
- # Status check endpoint for n8n
394
- @app.get("/api/status/{request_id}")
395
- async def check_status(request_id: str):
396
- """Check status of a generation request"""
397
- if request_id not in active_requests:
398
- return {"status": "not_found", "message": "Request ID not found"}
399
-
400
- request_data = active_requests[request_id]
401
- return {
402
- "status": request_data["status"],
403
- "message": request_data.get("message", ""),
404
- "generation_time": request_data.get("generation_time", 0),
405
- "completed_at": request_data.get("completed_at", ""),
406
- "total_pages": request_data.get("total_pages", 0)
407
- }
408
 
409
- @app.get("/api/health")
410
- async def health_check():
 
411
  return {
412
- "status": "healthy",
413
- "service": "Storybook Generator API",
414
- "timestamp": datetime.now().isoformat(),
415
- "memory_usage_mb": monitor_memory(),
416
- "models_loaded": list(model_cache.keys()),
417
- "current_model": current_model_name,
418
- "active_requests": len(active_requests)
 
 
 
419
  }
420
 
421
- # Simple Gradio interface
422
- with gr.Blocks(title="Storybook Generator", theme="soft") as demo:
423
- gr.Markdown("# 📚 Storybook Generator")
424
-
425
- with gr.Row():
426
- story_title = gr.Textbox(label="Story Title", value="Test Story")
427
- prompt_input = gr.Textbox(label="Scene Description", lines=3, value="A beautiful sunset over mountains with vibrant colors")
428
- generate_btn = gr.Button("Generate Test Page")
429
- output_image = gr.Image()
430
- status = gr.Textbox()
431
-
432
- def generate_test_page(prompt, title):
433
- try:
434
- # Test with a simple generation
435
- enhanced_prompt, negative_prompt = enhance_prompt(prompt, [], "childrens_book", 1)
436
-
437
- generator = torch.Generator(device="cpu")
438
- generator.manual_seed(123)
439
-
440
- global current_pipe
441
- image = current_pipe(
442
- prompt=enhanced_prompt,
443
- negative_prompt=negative_prompt,
444
- num_inference_steps=20,
445
- guidance_scale=7.0,
446
- width=512,
447
- height=512,
448
- generator=generator
449
- ).images[0]
450
-
451
- return image, f"✅ Generated: {enhanced_prompt}"
 
 
 
 
 
 
 
452
 
453
- except Exception as e:
454
- return None, f"Error: {str(e)}"
455
-
456
- generate_btn.click(
457
- fn=generate_test_page,
458
- inputs=[prompt_input, story_title],
459
- outputs=[output_image, status]
460
- )
 
 
 
 
 
 
 
 
 
 
 
 
461
 
462
- app = gr.mount_gradio_app(app, demo, path="/")
 
 
463
 
464
  if __name__ == "__main__":
465
- print("🚀 Starting Storybook Generator API...")
466
  import uvicorn
 
 
 
 
 
 
467
  uvicorn.run(app, host="0.0.0.0", port=7860)
 
10
  import time
11
  import json
12
  from typing import List, Optional, Dict
13
+ from fastapi import FastAPI, HTTPException, BackgroundTasks
14
  from pydantic import BaseModel
15
  import gc
16
  import psutil
17
  import threading
18
+ import uuid
19
+ import hashlib
20
+ from enum import Enum
21
 
22
  # External OCI API URL
23
  OCI_API_BASE_URL = "https://yukee1992-oci-story-book.hf.space"
 
35
  allow_headers=["*"],
36
  )
37
 
38
+ # Job Status Enum
39
+ class JobStatus(str, Enum):
40
+ PENDING = "pending"
41
+ PROCESSING = "processing"
42
+ COMPLETED = "completed"
43
+ FAILED = "failed"
44
+
45
  # Story scene model
46
  class StoryScene(BaseModel):
47
  visual: str
 
57
  characters: List[CharacterDescription] = []
58
  model_choice: str = "dreamshaper-8"
59
  style: str = "childrens_book"
60
+ callback_url: Optional[str] = None # For n8n webhook
61
 
62
  class StorybookResponse(BaseModel):
63
  status: str
 
71
  pages: List[dict]
72
  request_id: str
73
 
74
+ class JobStatusResponse(BaseModel):
75
+ job_id: str
76
+ status: JobStatus
77
+ progress: int # 0-100
78
+ message: str
79
+ result: Optional[dict] = None
80
+ created_at: float
81
+ updated_at: float
82
+
83
+ # HIGH-QUALITY MODEL SELECTION (from your high-quality app.py)
84
  MODEL_CHOICES = {
85
+ "dreamshaper-8": "lykon/dreamshaper-8", # Excellent for fantasy/artistic
86
+ "realistic-vision": "SG161222/Realistic_Vision_V5.1", # Photorealistic
87
+ "anything-v5": "andite/anything-v5.0", # Anime/illustration style
88
+ "openjourney": "prompthero/openjourney", # Artistic/Midjourney style
89
+ "sd-2.1": "stabilityai/stable-diffusion-2-1", # General purpose
90
  }
91
 
92
+ # GLOBAL STORAGE FOR JOBS AND STATUS
93
+ job_storage = {}
94
+ active_requests = {}
95
+ character_seeds = {}
96
+
97
  # GLOBAL MODEL CACHE
98
  model_cache = {}
99
  current_model_name = None
100
  current_pipe = None
101
  model_lock = threading.Lock()
102
 
 
 
 
 
103
  def monitor_memory():
104
  try:
105
  process = psutil.Process()
 
113
  torch.cuda.empty_cache()
114
 
115
  def load_model(model_name="dreamshaper-8"):
116
+ """Thread-safe model loading with HIGH-QUALITY settings"""
117
  global model_cache, current_model_name, current_pipe
118
 
119
  with model_lock:
 
122
  current_model_name = model_name
123
  return current_pipe
124
 
125
+ print(f"🔄 Loading HIGH-QUALITY model: {model_name}")
126
  try:
127
  model_id = MODEL_CHOICES.get(model_name, "lykon/dreamshaper-8")
128
 
129
+ # HIGH-QUALITY LOADING (from your high-quality app.py)
130
  pipe = StableDiffusionPipeline.from_pretrained(
131
  model_id,
132
  torch_dtype=torch.float32,
 
134
  requires_safety_checker=False
135
  )
136
 
137
+ # Use better scheduler for quality
138
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
139
  pipe = pipe.to("cpu")
140
 
 
142
  current_pipe = pipe
143
  current_model_name = model_name
144
 
145
+ print(f"✅ HIGH-QUALITY Model loaded: {model_name}")
146
  return pipe
147
 
148
  except Exception as e:
149
  print(f"❌ Model loading failed: {e}")
150
+ # Fallback to SD 1.5
151
+ return StableDiffusionPipeline.from_pretrained(
152
+ "runwayml/stable-diffusion-v1-5",
153
+ torch_dtype=torch.float32,
154
+ safety_checker=None,
155
+ requires_safety_checker=False
156
+ ).to("cpu")
157
 
158
  # Initialize default model
159
+ print("🚀 Initializing HIGH-QUALITY Storybook Generator...")
160
  load_model("dreamshaper-8")
161
+ print("✅ HIGH-QUALITY Model loaded and ready!")
162
+
163
+ # PROFESSIONAL PROMPT ENGINEERING (from your high-quality app.py)
164
+ def enhance_prompt(prompt, style="childrens_book"):
165
+ """Transform basic prompts into professional-grade prompts"""
166
+
167
+ style_templates = {
168
+ "childrens_book": [
169
+ "masterpiece, best quality, 4K, ultra detailed, children's book illustration",
170
+ "watercolor painting, whimsical, cute, charming, storybook style",
171
+ "vibrant colors, soft lighting, magical, enchanting, dreamlike",
172
+ "Pixar style, Disney animation, high detail, professional artwork"
173
+ ],
174
+ "realistic": [
175
+ "photorealistic, 8K, ultra detailed, professional photography",
176
+ "sharp focus, studio lighting, high resolution, intricate details",
177
+ "realistic textures, natural lighting, cinematic quality"
178
+ ],
179
+ "fantasy": [
180
+ "epic fantasy art, digital painting, concept art, trending on artstation",
181
+ "magical, mystical, ethereal, otherworldly, fantasy illustration",
182
+ "dynamic composition, dramatic lighting, highly detailed"
183
+ ],
184
+ "anime": [
185
+ "anime style, Japanese animation, high quality, detailed artwork",
186
+ "beautiful anime illustration, vibrant colors, clean lines",
187
+ "studio ghibli style, makoto shinkai, professional anime art"
188
+ ]
189
+ }
190
+
191
+ # Choose style template
192
+ templates = style_templates.get(style, style_templates["childrens_book"])
193
+ style_prompt = templates[0] # Use the first template
194
+
195
+ # Enhanced prompt construction
196
+ enhanced = f"{style_prompt}, {prompt}"
197
+
198
+ # Add quality boosters
199
+ quality_boosters = [
200
+ "intricate details", "beautiful composition", "perfect lighting",
201
+ "professional artwork", "award winning", "trending on artstation"
202
+ ]
203
+
204
+ # Add 2-3 random quality boosters
205
+ import random
206
+ boosters = random.sample(quality_boosters, 2)
207
+ enhanced += ", " + ", ".join(boosters)
208
+
209
+ # Negative prompt to avoid bad quality
210
+ negative_prompt = (
211
+ "blurry, low quality, low resolution, ugly, deformed, poorly drawn, "
212
+ "bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, "
213
+ "disconnected limbs, mutation, mutated, ugly, disgusting, bad art, "
214
+ "beginner, amateur, distorted, watermark, signature, text, username"
215
+ )
216
+
217
+ return enhanced, negative_prompt
218
 
 
219
  def optimize_prompt(scene_visual, characters, style="childrens_book", page_number=1):
220
  """
221
+ Create HIGH-QUALITY prompt that preserves visual descriptions
222
  """
223
+ # First, enhance the base prompt using professional engineering
224
+ base_enhanced, _ = enhance_prompt(scene_visual, style)
225
 
226
+ # Extract character essence
227
  character_essence = ""
228
  if characters:
229
  char_names = []
230
  for char in characters:
231
  char_name = char.get('name', '') if isinstance(char, dict) else getattr(char, 'name', '')
232
+ char_names.append(char_name.split()[0])
233
 
234
  character_essence = f" featuring {', '.join(char_names)}"
235
 
236
+ # Build final HIGH-QUALITY prompt
 
 
 
 
 
 
 
 
237
  continuity = f"Scene {page_number}, " if page_number > 1 else ""
238
+ final_prompt = f"{continuity}{base_enhanced}{character_essence}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239
 
240
+ print(f"🎨 HIGH-QUALITY Prompt: {final_prompt}")
241
  return final_prompt
242
 
243
+ def generate_high_quality_image(prompt, model_choice="dreamshaper-8", style="childrens_book", negative_prompt=""):
244
+ """Generate HIGH-QUALITY image with professional settings"""
245
+ try:
246
+ # Load the appropriate model
247
+ pipe = load_model(model_choice)
248
+
249
+ # Generate HIGH-QUALITY image with better settings
250
+ image = pipe(
251
+ prompt=prompt,
252
+ negative_prompt=negative_prompt,
253
+ num_inference_steps=30, # More steps = better quality
254
+ guidance_scale=8.5, # Higher guidance = better prompt following
255
+ width=768, # Higher resolution
256
+ height=768,
257
+ generator=torch.Generator(device="cpu").manual_seed(int(time.time()))
258
+ ).images[0]
259
+
260
+ print("✅ HIGH-QUALITY Image generated successfully!")
261
+ return image
262
+
263
+ except Exception as e:
264
+ print(f"❌ HQ Generation failed: {str(e)}")
265
+ raise
266
 
267
+ def save_to_oci_via_api(image, prompt, story_title, page_number):
268
+ """Save image using the EXTERNAL OCI API endpoint"""
269
  try:
270
+ # Convert image to bytes
271
  img_bytes = io.BytesIO()
272
  image.save(img_bytes, format='PNG')
273
  img_data = img_bytes.getvalue()
274
 
275
+ # Create filename
276
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
277
+ safe_prompt = "".join(c for c in prompt[:30] if c.isalnum() or c in (' ', '-', '_')).rstrip()
278
+ filename = f"page_{page_number:03d}_{safe_prompt}_{timestamp}.png"
279
+
280
+ # Use the EXTERNAL API URL
281
+ api_url = f"{OCI_API_BASE_URL}/api/upload"
282
 
283
+ print(f"🌐 Calling EXTERNAL OCI API: {api_url}")
284
+
285
+ # Prepare form data for API request
286
+ files = {
287
+ 'file': (filename, img_data, 'image/png')
288
+ }
289
 
290
+ data = {
 
 
291
  'project_id': 'storybook-library',
292
+ 'subfolder': f'stories/{story_title}'
 
 
293
  }
 
294
 
295
+ # Make the API request with timeout
296
+ response = requests.post(api_url, files=files, data=data, timeout=30)
297
+
298
+ print(f"📨 API Response: {response.status_code}")
299
+
300
+ if response.status_code == 200:
301
+ result = response.json()
302
+ if result['status'] == 'success':
303
+ return result.get('file_url', 'Unknown URL')
304
+ else:
305
+ raise Exception(f"API Error: {result.get('message', 'Unknown error')}")
306
+ else:
307
+ raise Exception(f"HTTP Error: {response.status_code}")
308
 
309
  except Exception as e:
310
+ raise Exception(f"API upload failed: {str(e)}")
311
+
312
+ # JOB MANAGEMENT FUNCTIONS
313
+ def create_job(story_request: StorybookRequest) -> str:
314
+ """Create a new job and return job ID"""
315
+ job_id = str(uuid.uuid4())
316
+
317
+ job_storage[job_id] = {
318
+ "status": JobStatus.PENDING,
319
+ "progress": 0,
320
+ "message": "Job created and queued",
321
+ "request": story_request.dict(),
322
+ "result": None,
323
+ "created_at": time.time(),
324
+ "updated_at": time.time(),
325
+ "pages": []
326
+ }
327
+
328
+ print(f"📝 Created job {job_id} for story: {story_request.story_title}")
329
+ return job_id
330
+
331
+ def update_job_status(job_id: str, status: JobStatus, progress: int, message: str, result=None):
332
+ """Update job status and send notification if callback URL exists"""
333
+ if job_id not in job_storage:
334
+ return False
335
+
336
+ job_storage[job_id].update({
337
+ "status": status,
338
+ "progress": progress,
339
+ "message": message,
340
+ "updated_at": time.time()
341
+ })
342
+
343
+ if result:
344
+ job_storage[job_id]["result"] = result
345
+ if "pages" in result:
346
+ job_storage[job_id]["pages"] = result["pages"]
347
+
348
+ # Send webhook notification if callback URL exists
349
+ job_data = job_storage[job_id]
350
+ request_data = job_data["request"]
351
+
352
+ if request_data.get("callback_url"):
353
+ try:
354
+ callback_data = {
355
+ "job_id": job_id,
356
+ "status": status.value,
357
+ "progress": progress,
358
+ "message": message,
359
+ "story_title": request_data["story_title"],
360
+ "timestamp": time.time()
361
+ }
362
+
363
+ if status == JobStatus.COMPLETED and result:
364
+ callback_data["result"] = result
365
+
366
+ response = requests.post(
367
+ request_data["callback_url"],
368
+ json=callback_data,
369
+ timeout=10
370
+ )
371
+ print(f"📢 Callback sent to {request_data['callback_url']}: Status {response.status_code}")
372
+ except Exception as e:
373
+ print(f"⚠️ Callback failed: {str(e)}")
374
+
375
+ return True
376
 
377
+ def get_job_status(job_id: str) -> Optional[JobStatusResponse]:
378
+ """Get current job status"""
379
+ if job_id not in job_storage:
380
+ return None
381
 
382
+ job_data = job_storage[job_id]
 
 
 
 
 
383
 
384
+ return JobStatusResponse(
385
+ job_id=job_id,
386
+ status=job_data["status"],
387
+ progress=job_data["progress"],
388
+ message=job_data["message"],
389
+ result=job_data["result"],
390
+ created_at=job_data["created_at"],
391
+ updated_at=job_data["updated_at"]
392
+ )
393
 
394
+ # BACKGROUND TASK FOR STORY GENERATION
395
+ def generate_storybook_background(job_id: str):
396
+ """Background task to generate storybook pages"""
397
  try:
398
+ job_data = job_storage[job_id]
399
+ story_request_data = job_data["request"]
400
+ story_request = StorybookRequest(**story_request_data)
 
401
 
402
+ print(f"🎬 Starting story generation for job {job_id}")
403
+ update_job_status(job_id, JobStatus.PROCESSING, 10, "Starting storybook generation...")
 
 
 
 
 
404
 
405
+ total_scenes = len(story_request.scenes)
406
+ generated_pages = []
407
  start_time = time.time()
408
 
409
+ for i, scene in enumerate(story_request.scenes):
410
+ progress = 10 + int((i / total_scenes) * 80)
411
+ update_job_status(
412
+ job_id,
413
+ JobStatus.PROCESSING,
414
+ progress,
415
+ f"Generating page {i+1}/{total_scenes}: {scene.visual[:50]}..."
416
+ )
417
+
418
  try:
419
+ # Create HIGH-QUALITY prompt
420
+ prompt, negative_prompt = enhance_prompt(scene.visual, story_request.style)
421
+ prompt = optimize_prompt(scene.visual, story_request.characters, story_request.style, i+1)
422
 
423
+ # Generate HIGH-QUALITY image
424
+ image = generate_high_quality_image(
425
+ prompt,
426
+ story_request.model_choice,
427
+ story_request.style,
428
+ negative_prompt
429
  )
430
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
431
  # Save to OCI
432
+ image_url = save_to_oci_via_api(image, prompt, story_request.story_title, i+1)
 
 
433
 
434
+ # Store page data
435
+ page_data = {
436
+ "page_number": i + 1,
437
+ "image_url": image_url,
438
+ "text": scene.text,
439
+ "prompt": prompt
440
+ }
441
+ generated_pages.append(page_data)
442
 
443
+ print(f"✅ Page {i+1} completed successfully")
444
 
 
 
 
445
  except Exception as e:
446
+ error_msg = f"Failed to generate page {i+1}: {str(e)}"
 
 
447
  print(f"❌ {error_msg}")
448
+ update_job_status(job_id, JobStatus.FAILED, 0, error_msg)
449
+ return
450
 
451
+ # Complete the job
452
+ generation_time = time.time() - start_time
453
+ result = {
454
+ "story_title": story_request.story_title,
455
+ "total_pages": total_scenes,
456
+ "characters_used": len(story_request.characters),
457
+ "generated_pages": len(generated_pages),
458
+ "generation_time": round(generation_time, 2),
459
+ "folder_path": f"stories/{story_request.story_title}",
460
+ "pages": generated_pages
461
  }
462
 
463
+ update_job_status(
464
+ job_id,
465
+ JobStatus.COMPLETED,
466
+ 100,
467
+ f"Storybook generation completed! {len(generated_pages)} pages created in {generation_time:.2f}s",
468
+ result
469
+ )
470
+
471
+ print(f"🎉 Storybook generation completed for job {job_id}")
472
 
473
  except Exception as e:
474
+ error_msg = f"Story generation failed: {str(e)}"
475
+ print(f" {error_msg}")
476
+ update_job_status(job_id, JobStatus.FAILED, 0, error_msg)
 
 
477
 
478
+ # FASTAPI ENDPOINTS
479
+ @app.post("/api/generate-storybook", response_model=dict)
480
+ async def generate_storybook(request: StorybookRequest, background_tasks: BackgroundTasks):
481
+ """Endpoint to start storybook generation - immediately returns job ID"""
482
  try:
483
+ # Create job immediately
484
+ job_id = create_job(request)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
485
 
486
+ # Start background task
487
+ background_tasks.add_task(generate_storybook_background, job_id)
 
 
 
 
 
 
488
 
489
+ # Return job ID immediately for status tracking
490
+ return {
491
+ "status": "success",
492
+ "message": "Storybook generation started",
493
+ "job_id": job_id,
494
  "story_title": request.story_title,
495
+ "total_scenes": len(request.scenes),
496
+ "callback_url": request.callback_url
 
 
 
 
 
 
 
 
 
 
 
 
497
  }
498
 
 
 
499
  except Exception as e:
500
+ raise HTTPException(status_code=500, detail=f"Failed to start generation: {str(e)}")
 
 
501
 
502
+ @app.get("/api/job-status/{job_id}", response_model=JobStatusResponse)
503
+ async def get_job_status_endpoint(job_id: str):
504
+ """Endpoint to check job status"""
505
+ job_status = get_job_status(job_id)
506
+ if not job_status:
507
+ raise HTTPException(status_code=404, detail="Job not found")
508
+ return job_status
 
 
 
 
 
 
 
 
509
 
510
+ @app.get("/api/jobs")
511
+ async def list_jobs():
512
+ """Endpoint to list all jobs"""
513
  return {
514
+ "total_jobs": len(job_storage),
515
+ "jobs": {
516
+ job_id: {
517
+ "status": data["status"].value,
518
+ "progress": data["progress"],
519
+ "story_title": data["request"]["story_title"],
520
+ "created_at": data["created_at"]
521
+ }
522
+ for job_id, data in job_storage.items()
523
+ }
524
  }
525
 
526
+
527
+ # GRADIO INTERFACE (Optional - for manual testing)
528
+ def create_gradio_interface():
529
+ """Create Gradio interface for manual testing"""
530
+ with gr.Blocks(title="High-Quality Storybook Generator", theme="soft") as demo:
531
+ gr.Markdown("# 🎨 High-Quality Storybook Generator")
532
+ gr.Markdown("Generate **studio-quality** storybook images with professional results")
533
+
534
+ with gr.Row():
535
+ with gr.Column(scale=1):
536
+ gr.Markdown("### 🎯 Quality Settings")
537
+
538
+ model_choice = gr.Dropdown(
539
+ label="AI Model",
540
+ choices=list(MODEL_CHOICES.keys()),
541
+ value="dreamshaper-8",
542
+ info="Choose the best model for your style"
543
+ )
544
+
545
+ style_choice = gr.Dropdown(
546
+ label="Art Style",
547
+ choices=["childrens_book", "realistic", "fantasy", "anime"],
548
+ value="childrens_book",
549
+ info="Select the artistic style"
550
+ )
551
+
552
+ story_title = gr.Textbox(
553
+ label="Story Title",
554
+ placeholder="Enter your story title..."
555
+ )
556
+
557
+ scene_input = gr.Textbox(
558
+ label="Scene Description",
559
+ placeholder="Describe your scene in detail...",
560
+ lines=3
561
+ )
562
+
563
+ generate_btn = gr.Button("✨ Generate Premium Image", variant="primary")
564
 
565
+ with gr.Column(scale=2):
566
+ image_output = gr.Image(label="Generated Image", height=500, show_download_button=True)
567
+ status_output = gr.Textbox(label="Status", interactive=False, lines=3)
568
+
569
+ def generate_single_image(story_title, scene, model, style):
570
+ """Generate a single high-quality image for testing"""
571
+ try:
572
+ prompt, negative_prompt = enhance_prompt(scene, style)
573
+ image = generate_high_quality_image(prompt, model, style, negative_prompt)
574
+ return image, f"✅ High-quality image generated for: {story_title}"
575
+ except Exception as e:
576
+ return None, f"❌ Error: {str(e)}"
577
+
578
+ generate_btn.click(
579
+ fn=generate_single_image,
580
+ inputs=[story_title, scene_input, model_choice, style_choice],
581
+ outputs=[image_output, status_output]
582
+ )
583
+
584
+ return demo
585
 
586
+ # For Hugging Face Spaces deployment
587
+ def get_app():
588
+ return app
589
 
590
  if __name__ == "__main__":
 
591
  import uvicorn
592
+ print("🚀 Starting High-Quality Storybook Generator API...")
593
+ print("🎨 Available models:")
594
+ for model in MODEL_CHOICES:
595
+ print(f" - {model}")
596
+
597
+ # Start both FastAPI and Gradio (if needed)
598
  uvicorn.run(app, host="0.0.0.0", port=7860)