Spaces:
Runtime error
Runtime error
| import asyncio | |
| import base64 | |
| import importlib | |
| import io | |
| import logging | |
| import os | |
| from datetime import datetime | |
| import gradio as gr | |
| import httpx | |
| from PIL import Image | |
| import config | |
| importlib.reload(config) | |
| from config import CONFIG, get_api_headers, get_api_url | |
| from models import ( | |
| TaskSubmission, ImageToImageSubmission, PhotoStyleSubmission, | |
| InteriorDesignRenderingSubmission, WatermarkRemovalSubmission, | |
| LineArtConversionSubmission, AnimeToRealSubmission, RealToAnimeSubmission, | |
| ImageOutpaintingSubmission, FiveViewGenerationSubmission, Figure3DSubmission, | |
| CharacterFigureCollaborationSubmission | |
| ) | |
| from examples_config import ( | |
| TEXT_TO_IMAGE_EXAMPLES_WITH_RESULTS, | |
| FIVE_VIEW_GENERATION_EXAMPLES_WITH_RESULTS, | |
| FIGURE_3D_EXAMPLES_WITH_RESULTS, | |
| CHARACTER_FIGURE_COLLABORATION_EXAMPLES_WITH_RESULTS, | |
| IMAGE_OUTPAINTING_EXAMPLES_WITH_RESULTS, | |
| LINE_ART_CONVERSION_EXAMPLES_WITH_RESULTS, | |
| ANIME_TO_REAL_EXAMPLES_WITH_RESULTS, | |
| REAL_TO_ANIME_EXAMPLES_WITH_RESULTS, | |
| INTERIOR_DESIGN_EXAMPLES_WITH_RESULTS | |
| ) | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger(__name__) | |
| PHOTO_STYLE_DISPLAY_MAPPING = { | |
| "camera_movement": "๐น Camera Movement", | |
| "relighting": "๐ก Relighting", | |
| "camera_zoom": "๐ Camera Zoom", | |
| "product_photo": "๐ฆ Professional Product Photography", | |
| "miniature": "๐ Tilt-Shift Miniature", | |
| "reflection": "๐ช Reflection Addition", | |
| "pose_change": "๐ญ Pose & Viewpoint Change" | |
| } | |
| def preset_key_to_display_name(preset_key: str) -> str: | |
| return PHOTO_STYLE_DISPLAY_MAPPING.get(preset_key, preset_key) | |
| def display_name_to_preset_key(display_name: str) -> str: | |
| for key, value in PHOTO_STYLE_DISPLAY_MAPPING.items(): | |
| if value == display_name: | |
| return key | |
| return display_name | |
| PHOTO_STYLE_CHOICES = [ | |
| (preset_key_to_display_name(key), key) | |
| for key in PHOTO_STYLE_DISPLAY_MAPPING.keys() | |
| ] | |
| INTERIOR_DESIGN_STYLE_MAPPING = { | |
| "japanese_wabi_sabi": "๐ฏ Japanese Wabi-Sabi", | |
| "nordic_cozy": "๐๏ธ Nordic Cozy", | |
| "italian_luxury": "๐ฎ๐น Italian Luxury", | |
| "parisian_apartment": "๐ผ Parisian Apartment" | |
| } | |
| def interior_style_key_to_display_name(style_key: str) -> str: | |
| return INTERIOR_DESIGN_STYLE_MAPPING.get(style_key, style_key) | |
| def display_name_to_interior_style_key(display_name: str) -> str: | |
| for key, value in INTERIOR_DESIGN_STYLE_MAPPING.items(): | |
| if value == display_name: | |
| return key | |
| return display_name | |
| INTERIOR_DESIGN_STYLE_CHOICES = [ | |
| (interior_style_key_to_display_name(key), key) | |
| for key in INTERIOR_DESIGN_STYLE_MAPPING.keys() | |
| ] | |
| # 3D Figure Style Mapping | |
| FIGURE_3D_STYLE_MAPPING = { | |
| "professional_lighting": "๐ก Professional Lighting Scene", | |
| "collector_shelf": "๐ Collector's Display Scene", | |
| "desktop_display": "๐ป Desktop Display Scene" | |
| } | |
| FIGURE_3D_STYLE_CHOICES = [ | |
| (display_name, key) for key, display_name in FIGURE_3D_STYLE_MAPPING.items() | |
| ] | |
| # ============================================================================ | |
| # Utility Functions | |
| # ============================================================================ | |
| def pil_to_base64(pil_image: Image.Image) -> str: | |
| buffer = io.BytesIO() | |
| pil_image.save(buffer, format='PNG') | |
| image_data = buffer.getvalue() | |
| return base64.b64encode(image_data).decode('utf-8') | |
| def resize_image_if_needed(image: Image.Image, max_size: int = 1536, min_size: int = 512) -> Image.Image: | |
| width, height = image.size | |
| # Check if resize is needed | |
| if max(width, height) <= max_size and min(width, height) >= min_size: | |
| return image | |
| # Calculate new dimensions | |
| if max(width, height) > max_size: | |
| ratio = max_size / max(width, height) | |
| new_width = int(width * ratio) | |
| new_height = int(height * ratio) | |
| else: | |
| ratio = min_size / min(width, height) | |
| new_width = int(width * ratio) | |
| new_height = int(height * ratio) | |
| return image.resize((new_width, new_height), Image.Resampling.LANCZOS) | |
| async def submit_task_with_retry(endpoint: str, payload: dict, task_name: str, max_retries: int = 60) -> dict: | |
| """ | |
| Submit task with intelligent retry mechanism for 429 errors | |
| Args: | |
| endpoint: API endpoint | |
| payload: Request payload | |
| task_name: Task name for user-friendly messages | |
| max_retries: Maximum retry attempts (default: 60 for 5 minutes) | |
| Returns: | |
| API response dict | |
| Raises: | |
| Exception: User-friendly error messages only | |
| """ | |
| import asyncio | |
| base_delay = 5.0 # Start with 5 seconds | |
| max_delay = 30.0 # Cap at 30 seconds | |
| for attempt in range(max_retries + 1): | |
| try: | |
| async with httpx.AsyncClient(timeout=CONFIG.API_TIMEOUT) as client: | |
| response = await client.post( | |
| get_api_url(endpoint), | |
| json=payload, | |
| headers=get_api_headers() | |
| ) | |
| if response.status_code == 429: | |
| if attempt < max_retries: | |
| delay = min(base_delay * (1.5 ** attempt), max_delay) | |
| logger.info(f"System busy, retrying {task_name} in {delay:.1f}s (attempt {attempt + 1}/{max_retries + 1})") | |
| await asyncio.sleep(delay) | |
| continue | |
| else: | |
| raise Exception("The system is currently busy, please try again later") | |
| elif response.status_code >= 500: | |
| logger.error(f"Server error {response.status_code} for {task_name}") | |
| raise Exception("Service temporarily unavailable, please try again later") | |
| elif response.status_code >= 400: | |
| logger.error(f"Client error {response.status_code} for {task_name}") | |
| raise Exception("Invalid request parameters, please check your input") | |
| # Success | |
| response.raise_for_status() | |
| return response.json() | |
| except httpx.TimeoutException: | |
| logger.error(f"Timeout error for {task_name}") | |
| raise Exception("Network timeout, please check your connection") | |
| except httpx.ConnectError: | |
| logger.error(f"Connection error for {task_name}") | |
| raise Exception("Unable to connect to the server, please try again later") | |
| except Exception as e: | |
| if any(msg in str(e) for msg in ["System is currently busy", "Service temporarily unavailable", "Invalid request parameters", "Network connection timeout", "Unable to connect to server"]): | |
| # Re-raise user-friendly messages (English messages for detection) | |
| raise | |
| else: | |
| # Log technical error but show user-friendly message | |
| logger.error(f"Unexpected error submitting {task_name}: {e}") | |
| raise Exception("An error occurred while submitting the task, please try again later") | |
| # Should never reach here | |
| raise Exception("The system is currently busy, please try again later") | |
| async def submit_text_to_image_task(prompt: str, resolution: str) -> dict: | |
| """Submit text-to-image task to backend API with intelligent retry""" | |
| # Parse resolution | |
| width, height = 1024, 1024 # Default | |
| if "1024x1024" in resolution: | |
| width, height = 1024, 1024 | |
| elif "1152x896" in resolution: | |
| width, height = 1152, 896 | |
| elif "896x1152" in resolution: | |
| width, height = 896, 1152 | |
| elif "1344x768" in resolution: | |
| width, height = 1344, 768 | |
| elif "768x1344" in resolution: | |
| width, height = 768, 1344 | |
| elif "1216x832" in resolution: | |
| width, height = 1216, 832 | |
| elif "1536x640" in resolution: | |
| width, height = 1536, 640 | |
| # Create submission | |
| submission = TaskSubmission( | |
| prompt=prompt, | |
| width=width, | |
| height=height | |
| ) | |
| return await submit_task_with_retry( | |
| endpoint="api/v1/tasks/", | |
| payload=submission.to_api_payload(), | |
| task_name="Text to Image" | |
| ) | |
| async def get_task_status(task_id: str) -> dict: | |
| """Get task status from backend API""" | |
| try: | |
| async with httpx.AsyncClient(timeout=30) as client: | |
| response = await client.get( | |
| get_api_url(f"api/v1/tasks/{task_id}"), | |
| headers=get_api_headers() | |
| ) | |
| response.raise_for_status() | |
| return response.json() | |
| except Exception as e: | |
| logger.error(f"Error getting task status: {e}") | |
| raise | |
| async def get_task_result(task_id: str) -> dict: | |
| """Get task result from backend API""" | |
| try: | |
| async with httpx.AsyncClient(timeout=30) as client: | |
| response = await client.get( | |
| get_api_url(f"api/v1/tasks/{task_id}/result"), | |
| headers=get_api_headers() | |
| ) | |
| response.raise_for_status() | |
| return response.json() | |
| except Exception as e: | |
| logger.error(f"Error getting task result: {e}") | |
| raise | |
| async def load_image_from_result(result_data: dict) -> Image.Image: | |
| """Load PIL Image from task result data via URL download only""" | |
| if not result_data: | |
| raise ValueError("No result data received") | |
| result_url = result_data.get("result_url") | |
| logger.info(f"๐ Result URL: {result_url}") | |
| if not result_url: | |
| logger.error(f"โ No result_url in response. Available fields: {list(result_data.keys())}") | |
| raise ValueError("No result_url found in result") | |
| if not result_url.startswith("http"): | |
| raise ValueError(f"Invalid URL format: {result_url}") | |
| try: | |
| logger.info(f"๐ฅ Downloading image from: {result_url}") | |
| async with httpx.AsyncClient(timeout=60.0) as client: | |
| response = await client.get(result_url) | |
| logger.info(f"๐ HTTP response status: {response.status_code}") | |
| response.raise_for_status() | |
| # Check content length | |
| content_length = len(response.content) | |
| logger.info(f"๐ฆ Content size: {content_length} bytes") | |
| if content_length < 1024: # Less than 1KB is suspicious | |
| raise ValueError("Downloaded content too small to be a valid image") | |
| return Image.open(io.BytesIO(response.content)) | |
| except Exception as e: | |
| logger.error(f"Error downloading image from {result_url}: {e}") | |
| raise | |
| def create_placeholder_image(prompt: str, resolution: str) -> Image.Image: | |
| """Create a placeholder image for examples""" | |
| try: | |
| # Parse resolution to get dimensions | |
| if "1024x1024" in resolution: | |
| width, height = 1024, 1024 | |
| elif "1280x1024" in resolution: | |
| width, height = 1280, 1024 | |
| else: | |
| width, height = 1024, 1024 | |
| # Create a simple placeholder image | |
| img = Image.new('RGB', (width, height), color='#f5f5f5') | |
| return img | |
| except Exception as e: | |
| logger.error(f"Error creating placeholder image: {e}") | |
| return Image.new('RGB', (1024, 1024), color='#f5f5f5') | |
| def create_placeholder_image_inline(prompt: str, resolution: str) -> Image.Image: | |
| """Create a placeholder image for examples""" | |
| try: | |
| # Parse resolution to get dimensions | |
| if "1024x1024" in resolution: | |
| width, height = 1024, 1024 | |
| elif "1152x896" in resolution: | |
| width, height = 1152, 896 | |
| elif "896x1152" in resolution: | |
| width, height = 896, 1152 | |
| else: | |
| width, height = 1024, 1024 | |
| # Create a simple placeholder image | |
| img = Image.new('RGB', (width, height), color='lightgray') | |
| return img | |
| except Exception: | |
| # Fallback to default size | |
| return Image.new('RGB', (1024, 1024), color='lightgray') | |
| def load_example_result(prompt: str, resolution: str, result_path: str = None): | |
| """Load example result image for text-to-image""" | |
| try: | |
| # Find matching example in TEXT_TO_IMAGE_EXAMPLES_WITH_RESULTS | |
| for example_prompt, example_resolution, example_path in TEXT_TO_IMAGE_EXAMPLES_WITH_RESULTS: | |
| if example_prompt == prompt and example_resolution == resolution: | |
| if os.path.exists(example_path): | |
| logger.info(f"Loading example result: {example_path}") | |
| return Image.open(example_path) | |
| else: | |
| logger.warning(f"Example image not found: {example_path}") | |
| return create_placeholder_image_inline(prompt, resolution) | |
| # If no matching example found, create placeholder | |
| logger.warning(f"No matching example found for: {prompt[:50]}...") | |
| return create_placeholder_image_inline(prompt, resolution) | |
| except Exception as e: | |
| logger.error(f"Error loading example result: {e}") | |
| return create_placeholder_image_inline(prompt, resolution) | |
| def load_line_art_example_result(input_image_path, result_path = None): | |
| """Load line art conversion example images""" | |
| try: | |
| input_image = None | |
| if isinstance(input_image_path, Image.Image): | |
| input_image = input_image_path | |
| logger.info(f"Using PIL.Image object for line art input (cache mode): {input_image.size}") | |
| elif isinstance(input_image_path, str): | |
| if os.path.exists(input_image_path): | |
| logger.info(f"Loading line art input image: {input_image_path}") | |
| input_image = Image.open(input_image_path) | |
| else: | |
| logger.warning(f"Line art input image not found: {input_image_path}") | |
| input_image = create_placeholder_image_inline("Input Image", "1024x1024") | |
| else: | |
| logger.warning(f"Unexpected input_image_path type: {type(input_image_path)}") | |
| input_image = create_placeholder_image_inline("Input Image", "1024x1024") | |
| result_image = None | |
| if isinstance(result_path, Image.Image): | |
| result_image = result_path | |
| logger.info(f"Using PIL.Image object for line art result (cache mode): {result_image.size}") | |
| return (input_image, result_image) | |
| from examples_config import LINE_ART_CONVERSION_EXAMPLES_WITH_RESULTS | |
| if isinstance(input_image_path, str): | |
| search_path = input_image_path | |
| for example_input, example_path in LINE_ART_CONVERSION_EXAMPLES_WITH_RESULTS: | |
| if example_input == search_path: | |
| if os.path.exists(example_path): | |
| logger.info(f"Loading line art example result: {example_path}") | |
| result_image = Image.open(example_path) | |
| return (input_image, result_image) | |
| else: | |
| image_size = input_image.size | |
| logger.info(f"Cache mode: identifying example by image size: {image_size}") | |
| size_to_result = { | |
| (474, 845): "examples/results/line_art_example1.jpg", | |
| (720, 1104): "examples/results/line_art_example2.jpg", | |
| (736, 1308): "examples/results/line_art_example3.jpg", | |
| } | |
| result_path = size_to_result.get(image_size) | |
| if result_path and os.path.exists(result_path): | |
| logger.info(f"Loading line art result by size mapping: {result_path}") | |
| result_image = Image.open(result_path) | |
| return (input_image, result_image) | |
| # If no matching example is found, create a placeholder | |
| logger.warning(f"No matching line art example found") | |
| result_image = create_placeholder_image_inline("Line Art Result", "1024x1024") | |
| return (input_image, result_image) | |
| except Exception as e: | |
| logger.error(f"Error loading line art example: {e}") | |
| input_placeholder = create_placeholder_image_inline("Input Image", "1024x1024") | |
| result_placeholder = create_placeholder_image_inline("Line Art Result", "1024x1024") | |
| return (input_placeholder, result_placeholder) | |
| def load_anime_to_real_example_result(input_image_path, result_path=None): | |
| """ | |
| Load example for Anime to Real: input image and pre-generated result image | |
| Simplified version using fixed paths (since only one example exists) | |
| """ | |
| try: | |
| logger.info(f"=== ANIME FUNCTION CALLED ===") | |
| logger.info(f"Args: {input_image_path}, {result_path}") | |
| # ็ฎๅ็ๆฌ๏ผ็ดๆฅไฝฟ็จๅบๅฎ่ทฏๅพ | |
| input_path = "examples/anime_input/example1.jpg" | |
| result_path_fixed = "examples/results/anime_to_real_example1.jpg" | |
| logger.info(f"Loading fixed paths: {input_path}, {result_path_fixed}") | |
| # Load input image | |
| if os.path.exists(input_path): | |
| input_image = Image.open(input_path) | |
| logger.info(f"Input loaded: {input_image.size}") | |
| else: | |
| logger.error(f"Input not found: {input_path}") | |
| input_image = create_placeholder_image_inline("Input Error", "512x512") | |
| # Load result image | |
| if os.path.exists(result_path_fixed): | |
| result_image = Image.open(result_path_fixed) | |
| logger.info(f"Result loaded: {result_image.size}") | |
| else: | |
| logger.error(f"Result not found: {result_path_fixed}") | |
| result_image = create_placeholder_image_inline("Result Error", "512x512") | |
| logger.info(f"=== RETURNING: {input_image.size}, {result_image.size} ===") | |
| return (input_image, result_image) | |
| except Exception as e: | |
| logger.error(f"Error loading anime to real example: {e}", exc_info=True) | |
| input_placeholder = create_placeholder_image_inline("Anime Input", "1024x1024") | |
| result_placeholder = create_placeholder_image_inline("Real Person Result", "1024x1024") | |
| logger.info(f"=== Returning with error placeholders ===") | |
| return (input_placeholder, result_placeholder) | |
| def load_real_to_anime_example_result(input_image_path, result_path=None): | |
| """Load example for Real to Anime conversion""" | |
| try: | |
| input_image = None | |
| if isinstance(input_image_path, Image.Image): | |
| input_image = input_image_path | |
| logger.info(f"Using PIL.Image object for real to anime input (cache mode): {input_image.size}") | |
| elif isinstance(input_image_path, str): | |
| if os.path.exists(input_image_path): | |
| logger.info(f"Loading real to anime input image: {input_image_path}") | |
| input_image = Image.open(input_image_path) | |
| else: | |
| logger.warning(f"Real to anime input image not found: {input_image_path}") | |
| input_image = create_placeholder_image_inline("Input Image", "1024x1024") | |
| else: | |
| logger.warning(f"Unexpected input_image_path type: {type(input_image_path)}") | |
| input_image = create_placeholder_image_inline("Input Image", "1024x1024") | |
| result_image = None | |
| if isinstance(result_path, Image.Image): | |
| result_image = result_path | |
| logger.info(f"Using PIL.Image object for real to anime result (cache mode): {result_image.size}") | |
| return (input_image, result_image) | |
| from examples_config import REAL_TO_ANIME_EXAMPLES_WITH_RESULTS | |
| if isinstance(input_image_path, str): | |
| search_path = input_image_path | |
| for example_input, example_result in REAL_TO_ANIME_EXAMPLES_WITH_RESULTS: | |
| if example_input == search_path: | |
| if os.path.exists(example_result): | |
| logger.info(f"Loading real to anime example result: {example_result}") | |
| result_image = Image.open(example_result) | |
| return (input_image, result_image) | |
| else: | |
| image_size = input_image.size | |
| logger.info(f"Cache mode: identifying real to anime example by image size: {image_size}") | |
| size_to_result = { | |
| (736, 1104): "examples/results/real_to_anime_example1.jpg", | |
| (736, 946): "examples/results/real_to_anime_example2.jpg", | |
| (1206, 796): "examples/results/real_to_anime_example3.jpg", | |
| } | |
| result_path_mapped = size_to_result.get(image_size) | |
| if result_path_mapped and os.path.exists(result_path_mapped): | |
| logger.info(f"Loading real to anime result by size mapping: {result_path_mapped}") | |
| result_image = Image.open(result_path_mapped) | |
| return (input_image, result_image) | |
| # If no matching example is found, create a placeholder | |
| logger.warning(f"No matching real to anime example found") | |
| result_image = create_placeholder_image_inline("Anime Style Result", "1024x1024") | |
| return (input_image, result_image) | |
| except Exception as e: | |
| logger.error(f"Error loading real to anime example: {e}", exc_info=True) | |
| input_placeholder = create_placeholder_image_inline("Real Photo Input", "1024x1024") | |
| result_placeholder = create_placeholder_image_inline("Anime Style Result", "1024x1024") | |
| return (input_placeholder, result_placeholder) | |
| def load_dual_output_example(input_path, param1=None, param2=None): | |
| """Load example with both input and result images - enhanced version for different use cases""" | |
| try: | |
| # Handle input image | |
| if isinstance(input_path, Image.Image): | |
| input_image = input_path | |
| elif isinstance(input_path, str) and os.path.exists(input_path): | |
| input_image = Image.open(input_path) | |
| else: | |
| input_image = create_placeholder_image_inline("Input", "1024x1024") | |
| # Try to find result image from various examples configs | |
| result_image = None | |
| if isinstance(input_path, str): | |
| # Check different examples configs to find matching result | |
| from examples_config import ( | |
| IMAGE_OUTPAINTING_EXAMPLES_WITH_RESULTS, | |
| INTERIOR_DESIGN_EXAMPLES_WITH_RESULTS | |
| ) | |
| # Try image outpainting examples first | |
| for example in IMAGE_OUTPAINTING_EXAMPLES_WITH_RESULTS: | |
| if example[0] == input_path and len(example) > 3: | |
| result_path = example[3] # Result is at index 3 | |
| if os.path.exists(result_path): | |
| result_image = Image.open(result_path) | |
| logger.info(f"Found outpainting result: {result_path}") | |
| break | |
| # Try interior design examples if not found | |
| if not result_image: | |
| for example in INTERIOR_DESIGN_EXAMPLES_WITH_RESULTS: | |
| if example[0] == input_path and len(example) > 2: | |
| result_path = example[2] # Result is at index 2 | |
| if os.path.exists(result_path): | |
| result_image = Image.open(result_path) | |
| logger.info(f"Found interior design result: {result_path}") | |
| break | |
| if not result_image: | |
| result_image = create_placeholder_image_inline("Result", "1024x1024") | |
| return input_image, result_image | |
| except Exception as e: | |
| logger.error(f"Error loading dual output example: {e}") | |
| placeholder = create_placeholder_image_inline("Error", "1024x1024") | |
| return placeholder, placeholder | |
| def load_five_view_example(input_path): | |
| """Load five view generation example with input and result images""" | |
| try: | |
| # Handle input image | |
| if isinstance(input_path, Image.Image): | |
| input_image = input_path | |
| elif isinstance(input_path, str) and os.path.exists(input_path): | |
| input_image = Image.open(input_path) | |
| else: | |
| input_image = create_placeholder_image_inline("Input", "1024x1024") | |
| # Try to find result from FIVE_VIEW_GENERATION_EXAMPLES_WITH_RESULTS | |
| from examples_config import FIVE_VIEW_GENERATION_EXAMPLES_WITH_RESULTS | |
| result_image = None | |
| # When Gradio caches examples, it passes PIL Image objects, not file paths | |
| # We need to match by image size or use the first available result | |
| if isinstance(input_path, Image.Image): | |
| # For PIL Image inputs (cache mode), use the first available result | |
| if len(FIVE_VIEW_GENERATION_EXAMPLES_WITH_RESULTS) > 0: | |
| result_path_found = FIVE_VIEW_GENERATION_EXAMPLES_WITH_RESULTS[0][1] | |
| if os.path.exists(result_path_found): | |
| result_image = Image.open(result_path_found) | |
| logger.info(f"Found five view result: {result_path_found}") | |
| elif isinstance(input_path, str): | |
| for example in FIVE_VIEW_GENERATION_EXAMPLES_WITH_RESULTS: | |
| if example[0] == input_path and len(example) > 1: | |
| result_path_found = example[1] | |
| if os.path.exists(result_path_found): | |
| result_image = Image.open(result_path_found) | |
| logger.info(f"Found five view result: {result_path_found}") | |
| break | |
| if not result_image: | |
| result_image = create_placeholder_image_inline("Five View Result", "1024x1024") | |
| return input_image, result_image | |
| except Exception as e: | |
| logger.error(f"Error loading five view example: {e}") | |
| placeholder = create_placeholder_image_inline("Error", "1024x1024") | |
| return placeholder, placeholder | |
| def load_figure_3d_example(input_image_path, figure_style: str, resolution: str = "square - 1024x1024 (1:1)", result_path=None): | |
| """ | |
| Load 3D figure generation example input image and pre-generated result image | |
| Supports two modes: | |
| 1. Runtime mode: input_image_path is a string path, result_path is a string path | |
| 2. Cache generation mode: input_image_path is a PIL.Image object, result_path is a PIL.Image object | |
| """ | |
| try: | |
| # 1. ๅค็่พๅ ฅๅพ็ - ๆฏๆๅญ็ฌฆไธฒ่ทฏๅพๅPIL.Imageๅฏน่ฑก | |
| input_image = None | |
| if isinstance(input_image_path, Image.Image): | |
| # Cache mode: directly use PIL.Image object | |
| input_image = input_image_path | |
| logger.info(f"Using PIL.Image object for input (cache mode): {input_image.size}") | |
| elif isinstance(input_image_path, str): | |
| # ่ฟ่กๆถๆจกๅผ๏ผไป่ทฏๅพๅ ่ฝฝๅพ็ | |
| if os.path.exists(input_image_path): | |
| logger.info(f"Loading figure 3D input image: {input_image_path}") | |
| input_image = Image.open(input_image_path) | |
| else: | |
| logger.warning(f"Figure 3D input image not found: {input_image_path}") | |
| input_image = create_placeholder_image("Input Image", "1024x1024") | |
| else: | |
| logger.warning(f"Unexpected input_image_path type: {type(input_image_path)}") | |
| input_image = create_placeholder_image("Input Image", "1024x1024") | |
| # 2. Handle result image - supports string path and PIL.Image object | |
| result_image = None | |
| if isinstance(result_path, Image.Image): | |
| # Cache mode: directly use PIL.Image object | |
| result_image = result_path | |
| logger.info(f"Using PIL.Image object for result (cache mode): {result_image.size}") | |
| elif isinstance(result_path, str): | |
| # ่ฟ่กๆถๆจกๅผ๏ผไป่ทฏๅพๅ ่ฝฝ็ปๆๅพ็ | |
| if os.path.exists(result_path): | |
| logger.info(f"Loading figure 3D result image: {result_path}") | |
| result_image = Image.open(result_path) | |
| else: | |
| logger.warning(f"Figure 3D result image not found: {result_path}") | |
| result_image = create_placeholder_image("Result Image", "1024x1024") | |
| else: | |
| # ๅฐ่ฏไปFIGURE_3D_EXAMPLES_WITH_RESULTSไธญๆฅๆพๅน้ ็็ปๆ | |
| from examples_config import FIGURE_3D_EXAMPLES_WITH_RESULTS | |
| result_image = None | |
| # ๅฝGradio็ผๅญexamplesๆถ๏ผๅฎไผ ้PIL Imageๅฏน่ฑก๏ผไธๆฏๆไปถ่ทฏๅพ | |
| # ๆไปฌ้่ฆ้่ฟ้ฃๆ ผๅน้ ๆไฝฟ็จ็ฌฌไธไธชๅฏ็จ็ปๆ | |
| if isinstance(input_image_path, Image.Image): | |
| # ๅฏนไบPIL Image่พๅ ฅ๏ผ็ผๅญๆจกๅผ๏ผ๏ผ้่ฟ้ฃๆ ผๆฅๆพ็ปๆ | |
| for example in FIGURE_3D_EXAMPLES_WITH_RESULTS: | |
| if len(example) >= 3 and example[1] == figure_style: | |
| result_path_found = example[2] | |
| if os.path.exists(result_path_found): | |
| result_image = Image.open(result_path_found) | |
| logger.info(f"Found figure 3D result by style: {result_path_found}") | |
| break | |
| elif isinstance(input_image_path, str): | |
| # ๅฏนไบๅญ็ฌฆไธฒ่ทฏๅพ่พๅ ฅ๏ผ็ฒพ็กฎๅน้ | |
| for example in FIGURE_3D_EXAMPLES_WITH_RESULTS: | |
| if len(example) >= 3 and example[0] == input_image_path and example[1] == figure_style: | |
| result_path_found = example[2] | |
| if os.path.exists(result_path_found): | |
| result_image = Image.open(result_path_found) | |
| logger.info(f"Found figure 3D result: {result_path_found}") | |
| break | |
| if not result_image: | |
| result_image = create_placeholder_image("Figure 3D Result", "1024x1024") | |
| return input_image, figure_style, resolution, result_image | |
| except Exception as e: | |
| logger.error(f"Error loading figure 3D example: {e}") | |
| placeholder = create_placeholder_image_inline("Error", "1024x1024") | |
| return placeholder, figure_style, resolution, placeholder | |
| def load_character_figure_collaboration_example(input_image_path, result_path=None): | |
| """ | |
| Load character figure collaboration example input image and pre-generated result image | |
| Args: | |
| input_image_path: Path to input image or PIL.Image object | |
| result_path: Path to result image or PIL.Image object | |
| Returns: | |
| tuple: (input_image, result_image) | |
| """ | |
| try: | |
| # 1. ๅค็่พๅ ฅๅพ็ - ๆฏๆๅญ็ฌฆไธฒ่ทฏๅพๅPIL.Imageๅฏน่ฑก | |
| input_image = None | |
| if isinstance(input_image_path, Image.Image): | |
| # Cache mode: directly use PIL.Image object | |
| input_image = input_image_path | |
| logger.info(f"Using PIL.Image object for input (cache mode): {input_image.size}") | |
| elif isinstance(input_image_path, str): | |
| # ่ฟ่กๆถๆจกๅผ๏ผไป่ทฏๅพๅ ่ฝฝๅพ็ | |
| if os.path.exists(input_image_path): | |
| logger.info(f"Loading character figure collaboration input image: {input_image_path}") | |
| input_image = Image.open(input_image_path) | |
| else: | |
| logger.warning(f"Character figure collaboration input image not found: {input_image_path}") | |
| input_image = create_placeholder_image("Input Image", "1024x1024") | |
| else: | |
| logger.warning(f"Unexpected input_image_path type: {type(input_image_path)}") | |
| input_image = create_placeholder_image("Input Image", "1024x1024") | |
| # 2. Handle result image - ๅฎๅ จๆ็ งLine Art็ๆจกๅผ | |
| result_image = None | |
| if isinstance(result_path, Image.Image): | |
| # Cache mode: directly use PIL.Image object | |
| result_image = result_path | |
| logger.info(f"Using PIL.Image object for result (cache mode): {result_image.size}") | |
| return input_image, result_image | |
| from examples_config import CHARACTER_FIGURE_COLLABORATION_EXAMPLES_WITH_RESULTS | |
| if isinstance(input_image_path, str): | |
| # ่ฟ่กๆถๆจกๅผ๏ผ้่ฟ่พๅ ฅ่ทฏๅพๆฅๆพๅน้ ็็ปๆ | |
| search_path = input_image_path | |
| for example_input, example_result in CHARACTER_FIGURE_COLLABORATION_EXAMPLES_WITH_RESULTS: | |
| if example_input == search_path: | |
| if os.path.exists(example_result): | |
| logger.info(f"Loading character figure collaboration example result: {example_result}") | |
| result_image = Image.open(example_result) | |
| return input_image, result_image | |
| else: | |
| # Cache mode: ้่ฟๅพ็ๅฐบๅฏธๅน้ ๏ผๅฎๅ จๆ็ งLine Art็ๆจกๅผ๏ผ | |
| image_size = input_image.size | |
| logger.info(f"Cache mode: identifying character figure collaboration example by image size: {image_size}") | |
| # ๅๅปบๅฐบๅฏธๅฐ็ปๆๅพ็็ๆ ๅฐ๏ผๅฎๅ จๆ็ งLine Art็ๆจกๅผ๏ผ | |
| size_to_result = { | |
| (1206, 776): "examples/results/character_figure_collaboration_example1.jpg", | |
| (1320, 1920): "examples/results/character_figure_collaboration_example2.jpg", | |
| (1536, 2200): "examples/results/character_figure_collaboration_example3.jpg", | |
| (1206, 1787): "examples/results/character_figure_collaboration_example4.jpg", | |
| } | |
| result_path = size_to_result.get(image_size) | |
| if result_path and os.path.exists(result_path): | |
| logger.info(f"Loading character figure collaboration result by size mapping: {result_path}") | |
| result_image = Image.open(result_path) | |
| return input_image, result_image | |
| # If no matching example is found, create a placeholder | |
| logger.warning(f"No matching character figure collaboration example found") | |
| result_image = create_placeholder_image_inline("Character Figure Collaboration Result", "1024x1024") | |
| return input_image, result_image | |
| except Exception as e: | |
| logger.error(f"Error loading character figure collaboration example: {e}") | |
| placeholder = create_placeholder_image_inline("Error", "1024x1024") | |
| return placeholder, placeholder | |
| def load_outpainting_example_result(input_image_path, expand_height, expand_width, result_path=None): | |
| """ | |
| Load outpainting example input image, parameters, and pre-generated result image | |
| Args: | |
| input_image_path: input image path or PIL.Image object | |
| expand_height: outpainting height percentage | |
| expand_width: outpainting width percentage | |
| result_path: result image path or PIL.Image object | |
| Returns: | |
| Tuple[Image.Image, float, float, Image.Image]: (่พๅ ฅๅพ็, ๆฉๅฑ้ซๅบฆ, ๆฉๅฑๅฎฝๅบฆ, ็ปๆๅพ็) | |
| """ | |
| try: | |
| # ็กฎไฟๅๆฐๆฏๆญฃ็กฎ็ๆฐๅญ็ฑปๅ๏ผ้ฒๆญข็ผๅญๅบๅๅ้ฎ้ข๏ผ | |
| expand_height = float(expand_height) | |
| expand_width = float(expand_width) | |
| # ๅค็่พๅ ฅๅพ็ | |
| if isinstance(input_image_path, str) and os.path.exists(input_image_path): | |
| input_image = Image.open(input_image_path) | |
| logger.info(f"Loading outpainting input image: {input_image_path}") | |
| elif isinstance(input_image_path, Image.Image): | |
| input_image = input_image_path | |
| logger.info(f"Using PIL.Image object for outpainting input (cache mode): {input_image.size}") | |
| else: | |
| input_image = create_placeholder_image_inline("Input Image", "1024x1024") | |
| # Handle result image | |
| if isinstance(result_path, Image.Image): | |
| # ็ผๅญ็ๆๆจกๅผ๏ผ็ดๆฅไฝฟ็จPIL.Imageๅฏน่ฑก | |
| result_image = result_path | |
| logger.info(f"Using PIL.Image object for outpainting result (cache mode): {result_image.size}") | |
| return (input_image, expand_height, expand_width, result_image) | |
| elif isinstance(result_path, str) and os.path.exists(result_path): | |
| result_image = Image.open(result_path) | |
| logger.info(f"Loading outpainting result from path: {result_path}") | |
| return (input_image, expand_height, expand_width, result_image) | |
| # Runtime mode: find result image from IMAGE_OUTPAINTING_EXAMPLES_WITH_RESULTS | |
| from examples_config import IMAGE_OUTPAINTING_EXAMPLES_WITH_RESULTS | |
| if isinstance(input_image_path, str): | |
| # ่ฟ่กๆถๆจกๅผ๏ผ็ฒพ็กฎๅน้ ่ทฏๅพๅๅๆฐ | |
| search_path = input_image_path | |
| for example_input, example_height, example_width, example_result in IMAGE_OUTPAINTING_EXAMPLES_WITH_RESULTS: | |
| if example_input == search_path and abs(example_height - expand_height) < 0.01 and abs(example_width - expand_width) < 0.01: | |
| if os.path.exists(example_result): | |
| logger.info(f"Loading outpainting example result: {example_result}") | |
| result_image = Image.open(example_result) | |
| return (input_image, expand_height, expand_width, result_image) | |
| else: | |
| # Cache mode: identify example by image size and parameters | |
| image_size = input_image.size | |
| logger.info(f"Cache mode: identifying outpainting example by image size: {image_size} and params: height={expand_height}, width={expand_width}") | |
| # ้ขๅฎไน็ๅฐบๅฏธๅๅๆฐๆ ๅฐ๏ผๆ นๆฎๅฎ้ ๅพ็ๅฐบๅฏธๅๅๆฐ๏ผ | |
| size_param_to_result = { | |
| ((1070, 1906), 0.2, 0.3): "examples/results/outpainting_example1.jpg", # example1: ๅๅงๆต่ฏๅพ็ | |
| ((960, 1200), 0.2, 0.4): "examples/results/outpainting_example2.jpg", # example2: ๆฅๅผไฝๅฎ | |
| ((641, 1200), 0.5, 0.2): "examples/results/outpainting_example3.jpg", # example3: ไบบ็ฉ่ชๆ | |
| } | |
| # Find matching result | |
| key = (image_size, expand_height, expand_width) | |
| if key in size_param_to_result: | |
| result_path_mapped = size_param_to_result[key] | |
| if os.path.exists(result_path_mapped): | |
| logger.info(f"Loading outpainting result by size and param mapping: {result_path_mapped}") | |
| result_image = Image.open(result_path_mapped) | |
| return (input_image, expand_height, expand_width, result_image) | |
| # If no matching result is found, use the first available result | |
| logger.info("Using first available outpainting result") | |
| for example_input, example_height, example_width, example_result in IMAGE_OUTPAINTING_EXAMPLES_WITH_RESULTS: | |
| if os.path.exists(example_result): | |
| logger.info(f"Loading first available outpainting result: {example_result}") | |
| result_image = Image.open(example_result) | |
| return (input_image, expand_height, expand_width, result_image) | |
| # As a fallback: create a placeholder | |
| logger.info("No outpainting results found, creating placeholder") | |
| result_image = create_placeholder_image_inline("Outpainting Result", "1536x1024") | |
| return (input_image, expand_height, expand_width, result_image) | |
| except Exception as e: | |
| logger.error(f"Error loading outpainting example: {e}", exc_info=True) | |
| input_placeholder = create_placeholder_image_inline("Input Image", "1024x1024") | |
| result_placeholder = create_placeholder_image_inline("Outpainting Result", "1536x1024") | |
| return (input_placeholder, 0.2, 0.2, result_placeholder) | |
| def load_outpainting_example_for_gradio(input_image_path, expand_height, expand_width, result_path): | |
| """ | |
| Designed specifically for Gradio Examples to ensure input-output counts match | |
| Args: | |
| input_image_path: input image path or PIL.Image object | |
| expand_height: outpainting height percentage | |
| expand_width: outpainting width percentage | |
| result_path: result image path (from examples config) | |
| Returns: | |
| Tuple[Image.Image, float, float, Image.Image]: (input image, outpaint height, outpaint width, result image) | |
| """ | |
| try: | |
| # ็กฎไฟๅๆฐๆฏๆญฃ็กฎ็ๆฐๅญ็ฑปๅ | |
| expand_height = float(expand_height) | |
| expand_width = float(expand_width) | |
| # ่ฐ็จๅๆ็ๅ ่ฝฝๅฝๆฐ๏ผไผ ๅ ฅresult_path | |
| result = load_outpainting_example_result(input_image_path, expand_height, expand_width, result_path) | |
| # ็กฎไฟ่ฟๅ4ไธชๅผ | |
| if len(result) == 4: | |
| return result | |
| else: | |
| logger.error(f"load_outpainting_example_result returned {len(result)} values, expected 4") | |
| # ๅๅปบ้ป่ฎค่ฟๅๅผ | |
| input_placeholder = create_placeholder_image_inline("Input Image", "1024x1024") | |
| result_placeholder = create_placeholder_image_inline("Outpainting Result", "1536x1024") | |
| return (input_placeholder, expand_height, expand_width, result_placeholder) | |
| except Exception as e: | |
| logger.error(f"Error in load_outpainting_example_for_gradio: {e}", exc_info=True) | |
| # ๅๅปบ้ป่ฎค่ฟๅๅผ | |
| input_placeholder = create_placeholder_image_inline("Input Image", "1024x1024") | |
| result_placeholder = create_placeholder_image_inline("Outpainting Result", "1536x1024") | |
| return (input_placeholder, float(expand_height), float(expand_width), result_placeholder) | |
| def load_interior_design_example_result(input_image_path, design_style: str, result_path=None): | |
| """ | |
| Load interior design rendering example input image and pre-generated result image | |
| Supports two modes: | |
| 1. Runtime mode: input_image_path is a string path, result_path is a string path | |
| 2. Cache generation mode: input_image_path is a PIL.Image object, result_path is a PIL.Image object | |
| """ | |
| try: | |
| # 1. ๅค็่พๅ ฅๅพ็ - ๆฏๆๅญ็ฌฆไธฒ่ทฏๅพๅPIL.Imageๅฏน่ฑก | |
| input_image = None | |
| if isinstance(input_image_path, Image.Image): | |
| # Cache mode: directly use PIL.Image object | |
| input_image = input_image_path | |
| logger.info(f"Using PIL.Image object for input (cache mode): {input_image.size}") | |
| elif isinstance(input_image_path, str): | |
| # ่ฟ่กๆถๆจกๅผ๏ผไป่ทฏๅพๅ ่ฝฝๅพ็ | |
| if os.path.exists(input_image_path): | |
| logger.info(f"Loading interior design input image: {input_image_path}") | |
| input_image = Image.open(input_image_path) | |
| else: | |
| logger.warning(f"Interior design input image not found: {input_image_path}") | |
| input_image = create_placeholder_image("Input Image", "1024x1024") | |
| else: | |
| logger.warning(f"Unexpected input_image_path type: {type(input_image_path)}") | |
| input_image = create_placeholder_image("Input Image", "1024x1024") | |
| # 2. Handle result image - supports string path and PIL.Image object | |
| result_image = None | |
| if isinstance(result_path, Image.Image): | |
| # ็ผๅญ็ๆๆจกๅผ๏ผ็ดๆฅไฝฟ็จPIL.Imageๅฏน่ฑก | |
| result_image = result_path | |
| logger.info(f"Using PIL.Image object for result (cache mode): {result_image.size}") | |
| return (input_image, result_image) | |
| # ่ฟ่กๆถๆจกๅผ๏ผไปINTERIOR_DESIGN_EXAMPLES_WITH_RESULTSๆฅๆพ็ปๆๅพ็ | |
| from examples_config import INTERIOR_DESIGN_EXAMPLES_WITH_RESULTS | |
| # ๅฏนไบ่ฟ่กๆถๆจกๅผ๏ผ้่ฆ็จๅญ็ฌฆไธฒ่ทฏๅพ่ฟ่กๅน้ | |
| search_path = input_image_path if isinstance(input_image_path, str) else "examples/interior_input.png" | |
| for example_input, example_style, example_path in INTERIOR_DESIGN_EXAMPLES_WITH_RESULTS: | |
| if example_input == search_path and example_style == design_style: | |
| if os.path.exists(example_path): | |
| logger.info(f"Loading interior design example result: {example_path}") | |
| result_image = Image.open(example_path) | |
| return (input_image, result_image) | |
| else: | |
| logger.warning(f"Interior design example result not found: {example_path}") | |
| result_image = create_placeholder_image("Interior Design Result", "1280x1024") | |
| return (input_image, result_image) | |
| # If no matching example is found, create a placeholder | |
| logger.warning(f"No matching interior design example found for: {search_path}, {design_style}") | |
| result_image = create_placeholder_image("Interior Design Result", "1280x1024") | |
| return (input_image, result_image) | |
| except Exception as e: | |
| logger.error(f"Error loading interior design example: {e}") | |
| input_placeholder = create_placeholder_image("Input Image", "1024x1024") | |
| result_placeholder = create_placeholder_image("Interior Design Result", "1280x1024") | |
| return (input_placeholder, result_placeholder) | |
| # Task submission functions for all AI features | |
| async def submit_image_to_image_task(pil_image: Image.Image) -> dict: | |
| """Submit image-to-image task to backend API""" | |
| processed_image = resize_image_if_needed(pil_image, max_size=1536, min_size=512) | |
| base64_data = pil_to_base64(processed_image) | |
| submission = ImageToImageSubmission(image_data=base64_data) | |
| return await submit_task_with_retry( | |
| endpoint="api/v1/tasks/image-to-image", | |
| payload=submission.to_api_payload(), | |
| task_name="Image Conversion" | |
| ) | |
| async def submit_photo_style_task(pil_image: Image.Image, style_preset: str) -> dict: | |
| """Submit photo style transfer task to backend API""" | |
| processed_image = resize_image_if_needed(pil_image, max_size=1536, min_size=512) | |
| base64_data = pil_to_base64(processed_image) | |
| submission = PhotoStyleSubmission(image_data=base64_data, style_preset=style_preset) | |
| return await submit_task_with_retry( | |
| endpoint="api/v1/tasks/photo-style", | |
| payload=submission.to_api_payload(), | |
| task_name="Photo Style Transfer" | |
| ) | |
| async def submit_interior_design_task(pil_image: Image.Image, design_style: str) -> dict: | |
| """Submit interior design rendering task to backend API""" | |
| processed_image = resize_image_if_needed(pil_image, max_size=1536, min_size=512) | |
| base64_data = pil_to_base64(processed_image) | |
| submission = InteriorDesignRenderingSubmission(image_data=base64_data, design_style=design_style) | |
| return await submit_task_with_retry( | |
| endpoint="api/v1/tasks/interior-design-rendering", | |
| payload=submission.to_api_payload(), | |
| task_name="Interior Design Rendering" | |
| ) | |
| async def submit_watermark_removal_task(pil_image: Image.Image) -> dict: | |
| """Submit watermark removal task to backend API""" | |
| processed_image = resize_image_if_needed(pil_image, max_size=1536, min_size=512) | |
| base64_data = pil_to_base64(processed_image) | |
| submission = WatermarkRemovalSubmission(image_data=base64_data) | |
| return await submit_task_with_retry( | |
| endpoint="api/v1/tasks/watermark-removal", | |
| payload=submission.to_api_payload(), | |
| task_name="Watermark Removal" | |
| ) | |
| async def submit_line_art_task(pil_image: Image.Image) -> dict: | |
| """Submit line art conversion task to backend API""" | |
| processed_image = resize_image_if_needed(pil_image, max_size=1536, min_size=512) | |
| base64_data = pil_to_base64(processed_image) | |
| submission = LineArtConversionSubmission(image_data=base64_data) | |
| return await submit_task_with_retry( | |
| endpoint="api/v1/tasks/line-art-conversion", | |
| payload=submission.to_api_payload(), | |
| task_name="Line Art Conversion" | |
| ) | |
| async def submit_image_outpainting_task(pil_image: Image.Image, expand_height: float, expand_width: float) -> dict: | |
| """Submit image outpainting task to backend API""" | |
| processed_image = resize_image_if_needed(pil_image, max_size=1536, min_size=512) | |
| base64_data = pil_to_base64(processed_image) | |
| submission = ImageOutpaintingSubmission( | |
| image_data=base64_data, | |
| expand_height=expand_height, | |
| expand_width=expand_width | |
| ) | |
| return await submit_task_with_retry( | |
| endpoint="api/v1/tasks/image-outpainting", | |
| payload=submission.to_api_payload(), | |
| task_name="Image Outpainting" | |
| ) | |
| async def submit_anime_to_real_task(pil_image: Image.Image) -> dict: | |
| """Submit anime to real conversion task to backend API""" | |
| processed_image = resize_image_if_needed(pil_image, max_size=1536, min_size=512) | |
| base64_data = pil_to_base64(processed_image) | |
| submission = AnimeToRealSubmission(image_data=base64_data) | |
| return await submit_task_with_retry( | |
| endpoint="api/v1/tasks/anime-to-real", | |
| payload=submission.to_api_payload(), | |
| task_name="Anime to Real" | |
| ) | |
| async def submit_real_to_anime_task(pil_image: Image.Image) -> dict: | |
| """Submit real to anime conversion task to backend API""" | |
| processed_image = resize_image_if_needed(pil_image, max_size=1536, min_size=512) | |
| base64_data = pil_to_base64(processed_image) | |
| submission = RealToAnimeSubmission(image_data=base64_data) | |
| return await submit_task_with_retry( | |
| endpoint="api/v1/tasks/real-to-anime", | |
| payload=submission.to_api_payload(), | |
| task_name="Real to Anime" | |
| ) | |
| async def submit_five_view_generation_task(input_image: Image.Image) -> dict: | |
| """Submit five-view generation task to backend API""" | |
| # Convert PIL image to base64 | |
| processed_image = input_image.convert("RGB") | |
| base64_data = pil_to_base64(processed_image) | |
| submission = FiveViewGenerationSubmission(image_data=base64_data) | |
| return await submit_task_with_retry( | |
| endpoint="api/v1/tasks/five-view-generation", | |
| payload=submission.to_api_payload(), | |
| task_name="Five-View Generation" | |
| ) | |
| async def submit_figure_3d_generation_task(input_image: Image.Image, figure_style: str, resolution: str) -> dict: | |
| """Submit 2D to 3D figure generation task to backend API""" | |
| # Convert PIL image to base64 | |
| processed_image = input_image.convert("RGB") | |
| base64_data = pil_to_base64(processed_image) | |
| submission = Figure3DSubmission( | |
| image_data=base64_data, | |
| figure_style=figure_style, | |
| resolution=resolution | |
| ) | |
| return await submit_task_with_retry( | |
| endpoint="api/v1/tasks/figure-3d-generation", | |
| payload=submission.to_api_payload(), | |
| task_name="2D to 3D Figure" | |
| ) | |
| async def submit_character_figure_collaboration_task(input_image: Image.Image) -> dict: | |
| """Submit character figure collaboration task to backend API""" | |
| # Convert PIL image to base64 | |
| processed_image = input_image.convert("RGB") | |
| base64_data = pil_to_base64(processed_image) | |
| submission = CharacterFigureCollaborationSubmission( | |
| image_data=base64_data | |
| ) | |
| return await submit_task_with_retry( | |
| endpoint="api/v1/tasks/character-figure-collaboration", | |
| payload=submission.to_api_payload(), | |
| task_name="Character Figure Collaboration" | |
| ) | |
| # ============================================================================ | |
| # UI Configuration and Theme Setup | |
| # ============================================================================ | |
| def create_custom_theme(): | |
| """Create a clean light theme using Gradio's Default theme""" | |
| return gr.themes.Default( | |
| primary_hue=gr.themes.colors.blue, | |
| secondary_hue=gr.themes.colors.gray, | |
| neutral_hue=gr.themes.colors.slate, | |
| font=[gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"], | |
| font_mono=[gr.themes.GoogleFont("JetBrains Mono"), "ui-monospace", "Consolas", "monospace"], | |
| ) | |
| # ============================================================================ | |
| # AI Processing Functions | |
| # ============================================================================ | |
| # Global task tracking | |
| CURRENT_TASK_ID = None | |
| TASK_CANCELLED = False | |
| def cancel_current_task(): | |
| """Cancel the current running task""" | |
| global TASK_CANCELLED # pylint: disable=global-statement | |
| TASK_CANCELLED = True | |
| logger.info("Task cancellation requested") | |
| return ( | |
| gr.update(visible=True), # generate_btn | |
| gr.update(visible=False), # cancel_btn | |
| "Task cancelled" # status_info | |
| ) | |
| async def generate_text_to_image(prompt: str, resolution: str, progress=None): | |
| """Generate image from text prompt""" | |
| global CURRENT_TASK_ID, TASK_CANCELLED # pylint: disable=global-statement | |
| try: | |
| TASK_CANCELLED = False | |
| if not prompt.strip(): | |
| yield None, "โ Please enter an image description", "", True, False | |
| return | |
| logger.info(f"Starting text-to-image generation: {prompt[:50]}...") | |
| start_time = datetime.now() | |
| yield ( | |
| None, | |
| "๐ Submitting task to AI server...", | |
| "**Tip**: The AI is processing your description and preparing to create...", | |
| False, | |
| True | |
| ) | |
| try: | |
| task_data = await submit_text_to_image_task(prompt, resolution) | |
| task_id = task_data.get("task_id") | |
| CURRENT_TASK_ID = task_id | |
| if not task_id: | |
| raise Exception("Server did not return a task ID") | |
| logger.info(f"Task submitted successfully: {task_id}") | |
| except Exception as e: | |
| logger.error(f"Task submission failed: {e}") | |
| error_message = str(e) if any(msg in str(e) for msg in ["System is currently busy", "Service temporarily unavailable", "Invalid request parameters", "Network connection timeout", "Unable to connect to server", "Task submission failed"]) else "Task submission failed, please try again later" | |
| yield ( | |
| None, | |
| f"โ {error_message}", | |
| "**Tip**: If the system is busy, please wait a moment and try again", | |
| True, | |
| False | |
| ) | |
| return | |
| # Poll for completion | |
| max_attempts = CONFIG.MAX_POLL_ATTEMPTS | |
| poll_interval = CONFIG.POLL_INTERVAL | |
| for attempt in range(max_attempts): | |
| if TASK_CANCELLED: | |
| yield ( | |
| None, | |
| "Task cancelled", | |
| "", | |
| True, | |
| False | |
| ) | |
| return | |
| try: | |
| status_data = await get_task_status(task_id) | |
| status = status_data.get("status", "unknown") | |
| elapsed_time = (datetime.now() - start_time).total_seconds() | |
| progress_percent = min(95, (attempt / max_attempts) * 100) | |
| if progress: | |
| progress(progress_percent / 100, f"Generating... ({elapsed_time:.0f}s)") | |
| if status == "completed": | |
| # Get result | |
| result_data = await get_task_result(task_id) | |
| result_image = await load_image_from_result(result_data) | |
| logger.info(f"Generation completed in {elapsed_time:.1f}s") | |
| yield ( | |
| result_image, | |
| "โ Image generated!", | |
| f"Generation time: {elapsed_time:.1f} s", | |
| True, | |
| False | |
| ) | |
| return | |
| elif status == "failed": | |
| error_msg = status_data.get("error", "Unknown error") | |
| yield ( | |
| None, | |
| f"โ Generation failed: {error_msg}", | |
| "", | |
| True, | |
| False | |
| ) | |
| return | |
| # Update status with helpful tips | |
| tips = [ | |
| "๐ก Tip: More detailed descriptions lead to more accurate images", | |
| "๐จ Creating: The AI is drawing a unique image based on your description", | |
| "โฑ๏ธ Please wait: Complex images take more time to refine", | |
| "๐ฅ Almost done: The AI is adding final touches" | |
| ] | |
| tip_index = min(attempt // 5, len(tips) - 1) | |
| yield ( | |
| None, | |
| f"๐จ AI is generating the image... ({elapsed_time:.0f}s)", | |
| tips[tip_index], | |
| False, | |
| True | |
| ) | |
| await asyncio.sleep(poll_interval) | |
| except Exception as e: | |
| logger.error(f"Error polling task status: {e}") | |
| yield ( | |
| None, | |
| "โ Status query failed. Please try again later.", | |
| "", | |
| True, | |
| False | |
| ) | |
| return | |
| # Timeout | |
| yield ( | |
| None, | |
| "Timeout: Generation timed out, please try again later", | |
| "", | |
| True, | |
| False | |
| ) | |
| except Exception as e: | |
| logger.error(f"Unexpected error in text-to-image generation: {e}") | |
| yield ( | |
| None, | |
| "โ An unexpected error occurred during generation. Please try again later.", | |
| "", | |
| True, | |
| False | |
| ) | |
| async def generic_image_processing( | |
| input_image: Image.Image, | |
| task_name: str, | |
| submit_func, | |
| submit_args: tuple = (), | |
| progress=None | |
| ): | |
| """Generic image processing function for all AI features""" | |
| global CURRENT_TASK_ID, TASK_CANCELLED # pylint: disable=global-statement | |
| try: | |
| TASK_CANCELLED = False | |
| if input_image is None: | |
| yield None, f"โ Please upload an image", "", True, False | |
| return | |
| logger.info(f"Starting {task_name} processing...") | |
| start_time = datetime.now() | |
| yield ( | |
| None, | |
| f"๐ Submitting {task_name} task to the AI server...", | |
| f"**Tip**: The AI is analyzing your image and preparing to start {task_name}...", | |
| False, | |
| True | |
| ) | |
| try: | |
| task_data = await submit_func(input_image, *submit_args) | |
| task_id = task_data.get("task_id") | |
| CURRENT_TASK_ID = task_id | |
| if not task_id: | |
| raise Exception("Server did not return a task ID") | |
| logger.info(f"{task_name} task submitted successfully: {task_id}") | |
| except Exception as e: | |
| logger.error(f"{task_name} task submission failed: {e}") | |
| # Show user-friendly error message | |
| error_message = str(e) if any(msg in str(e) for msg in ["System is currently busy", "Service temporarily unavailable", "Invalid request parameters", "Network connection timeout", "Unable to connect to server", "Task submission failed"]) else "Task submission failed, please try again later" | |
| yield ( | |
| None, | |
| f"โ {error_message}", | |
| "**Tip**: If the system is busy, please wait a moment and try again", | |
| True, | |
| False | |
| ) | |
| return | |
| # Poll for completion | |
| max_attempts = CONFIG.MAX_POLL_ATTEMPTS | |
| poll_interval = CONFIG.POLL_INTERVAL | |
| for attempt in range(max_attempts): | |
| if TASK_CANCELLED: | |
| yield ( | |
| None, | |
| "Task cancelled", | |
| "", | |
| True, | |
| False | |
| ) | |
| return | |
| try: | |
| status_data = await get_task_status(task_id) | |
| status = status_data.get("status", "unknown") | |
| elapsed_time = (datetime.now() - start_time).total_seconds() | |
| progress_percent = min(95, (attempt / max_attempts) * 100) | |
| if progress: | |
| progress(progress_percent / 100, f"Processing... ({elapsed_time:.0f}s)") | |
| if status == "completed": | |
| # Get result | |
| result_data = await get_task_result(task_id) | |
| result_image = await load_image_from_result(result_data) | |
| logger.info(f"{task_name} completed in {elapsed_time:.1f}s") | |
| yield ( | |
| result_image, | |
| f"โ {task_name} completed!", | |
| f"Processing time: {elapsed_time:.1f} s", | |
| True, | |
| False | |
| ) | |
| return | |
| elif status == "failed": | |
| error_msg = status_data.get("error", "Unknown error") | |
| yield ( | |
| None, | |
| f"โ {task_name} failed: {error_msg}", | |
| "", | |
| True, | |
| False | |
| ) | |
| return | |
| # Update status with task-specific tips | |
| task_tips = { | |
| "Image Conversion": ["๐ Processing: analyzing image features", "๐ฏ Optimizing: applying advanced image processing"], | |
| "Five-View Generation": ["๐๏ธ Analyzing: understanding facial and pose features", "๐ Generating: creating multiple viewpoints"], | |
| "Photo Style Transfer": ["๐ธ Stylizing: applying professional photography techniques", "โจ Enhancing: optimizing lighting and colors"], | |
| "Interior Design Rendering": ["๐ Designing: composing interior layout", "๐จ Rendering: adding furniture and decor"], | |
| "Watermark Removal": ["๐ซ Detecting: locating watermark areas", "๐ง Repairing: intelligently inpainting background"], | |
| "Line Art Conversion": ["โ๏ธ Outlining: extracting contours", "๐จ Refining: improving line details"], | |
| "Image Outpainting": ["๐ Extending: adding coherent border content", "๐ Blending: ensuring seamless continuity"], | |
| "Anime to Real": ["๐ค Converting: mapping anime features to realistic ones", "๐ญ Refining: adjusting facial details"], | |
| "Real to Anime": ["๐ Stylizing: applying anime art style", "โจ Enhancing: optimizing anime effects"] | |
| } | |
| tips = task_tips.get(task_name, ["๐ Processing: AI is working hard", "โฑ๏ธ Please wait: almost done"]) | |
| tip_index = min(attempt // 8, len(tips) - 1) | |
| yield ( | |
| None, | |
| f"๐จ AI is processing {task_name}... ({elapsed_time:.0f}s)", | |
| tips[tip_index], | |
| False, | |
| True | |
| ) | |
| await asyncio.sleep(poll_interval) | |
| except Exception as e: | |
| logger.error(f"Error polling {task_name} task status: {e}") | |
| yield ( | |
| None, | |
| "โ Status query failed. Please try again later.", | |
| "", | |
| True, | |
| False | |
| ) | |
| return | |
| # Timeout | |
| yield ( | |
| None, | |
| f"Timeout: {task_name} timed out, please try again later", | |
| "", | |
| True, | |
| False | |
| ) | |
| except Exception as e: | |
| logger.error(f"Unexpected error in {task_name}: {e}") | |
| yield ( | |
| None, | |
| f"โ An unexpected error occurred during {task_name}. Please try again later.", | |
| "", | |
| True, | |
| False | |
| ) | |
| # Specific AI processing functions | |
| async def generate_image_to_image(input_image: Image.Image, progress=None): | |
| """Generate image from image conversion""" | |
| async for result in generic_image_processing( | |
| input_image, "Image to Image", submit_image_to_image_task, (), progress | |
| ): | |
| yield result | |
| async def generate_photo_style(input_image: Image.Image, style_preset: str, progress=None): | |
| """Apply photo style transfer""" | |
| async for result in generic_image_processing( | |
| input_image, "Photo Style Transfer", submit_photo_style_task, (style_preset,), progress | |
| ): | |
| yield result | |
| async def generate_interior_design(input_image: Image.Image, design_style: str, progress=None): | |
| """Generate interior design rendering""" | |
| async for result in generic_image_processing( | |
| input_image, "Interior Design Rendering", submit_interior_design_task, (design_style,), progress | |
| ): | |
| yield result | |
| async def generate_watermark_removal(input_image: Image.Image, progress=None): | |
| """Remove watermark from image""" | |
| async for result in generic_image_processing( | |
| input_image, "Watermark Removal", submit_watermark_removal_task, (), progress | |
| ): | |
| yield result | |
| async def generate_line_art(input_image: Image.Image, progress=None): | |
| """Convert image to line art""" | |
| async for result in generic_image_processing( | |
| input_image, "Line Art Conversion", submit_line_art_task, (), progress | |
| ): | |
| yield result | |
| async def generate_image_outpainting( | |
| input_image: Image.Image, expand_height: float, expand_width: float, progress=None | |
| ): | |
| """Expand image boundaries""" | |
| async for result in generic_image_processing( | |
| input_image, "Image Outpainting", submit_image_outpainting_task, | |
| (expand_height, expand_width), progress | |
| ): | |
| yield result | |
| async def generate_anime_to_real(input_image: Image.Image, progress=None): | |
| """Convert anime character to real person""" | |
| async for result in generic_image_processing( | |
| input_image, "Anime to Real", submit_anime_to_real_task, (), progress | |
| ): | |
| yield result | |
| async def generate_real_to_anime(input_image: Image.Image, progress=None): | |
| """Convert real person to anime character""" | |
| async for result in generic_image_processing( | |
| input_image, "Real to Anime", submit_real_to_anime_task, (), progress | |
| ): | |
| yield result | |
| # Five view generation needs special handling | |
| async def generate_five_view(input_image: Image.Image, progress=None): | |
| """Generate five view angles from portrait""" | |
| async for result in generic_image_processing( | |
| input_image, "Five-View Generation", submit_five_view_generation_task, (), progress | |
| ): | |
| yield result | |
| async def generate_figure_3d(input_image: Image.Image, figure_style: str, resolution: str, progress=None): | |
| """Generate 3D figure from 2D character image""" | |
| async for result in generic_image_processing( | |
| input_image, "2D to 3D Figure", submit_figure_3d_generation_task, (figure_style, resolution), progress | |
| ): | |
| yield result | |
| # Character figure collaboration generation | |
| async def generate_character_figure_collaboration(input_image: Image.Image, progress=None): | |
| """Generate character figure collaboration image""" | |
| async for result in generic_image_processing( | |
| input_image, "Character Figure Collaboration", submit_character_figure_collaboration_task, (), progress | |
| ): | |
| yield result | |
| def placeholder_handler(*args): | |
| """Placeholder function for AI processing""" | |
| # Suppress unused arguments warning | |
| _ = args | |
| return "Feature under development, stay tuned!", gr.update(visible=True) | |
| def create_text_to_image_interface(): | |
| """Create text-to-image interface""" | |
| with gr.Column(): | |
| gr.Markdown("## ๐ Text to Image") | |
| gr.Markdown("Enter a textual description and let AI generate an image for you") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| prompt_input = gr.Textbox( | |
| label="Image Description", | |
| placeholder="Describe the image you want in detail, e.g., a cute cat playing in a garden, anime style, high-quality details", | |
| lines=4, | |
| max_lines=6 | |
| ) | |
| resolution_input = gr.Dropdown( | |
| label="Resolution", | |
| choices=[ | |
| "portrait - 896x1152 (3:4)", | |
| "square - 1024x1024 (1:1)", | |
| "landscape - 1152x896 (4:3)" | |
| ], | |
| value="square - 1024x1024 (1:1)", | |
| interactive=True | |
| ) | |
| with gr.Row(): | |
| generate_btn = gr.Button("๐ Generate Image", variant="primary", scale=2) | |
| cancel_btn = gr.Button("Cancel", variant="secondary", scale=1, visible=False) | |
| status_info = gr.Markdown("") | |
| with gr.Column(scale=3): | |
| result_image = gr.Image( | |
| label="Result", | |
| show_label=True, | |
| show_download_button=True, | |
| show_share_button=True | |
| ) | |
| image_info = gr.Markdown("Waiting for image generation...") | |
| # Bind events | |
| generate_btn.click( | |
| fn=generate_text_to_image, | |
| inputs=[prompt_input, resolution_input], | |
| outputs=[result_image, status_info, image_info, generate_btn, cancel_btn], | |
| show_progress=True | |
| ) | |
| cancel_btn.click( | |
| fn=cancel_current_task, | |
| outputs=[generate_btn, cancel_btn, status_info], | |
| queue=False | |
| ) | |
| return gr.Column() | |
| def switch_to_function(function_name: str): | |
| """Switch to a specific function and show its interface""" | |
| if function_name == "text_to_image": | |
| interface_html = """ | |
| <div id="text-to-image-interface"> | |
| <h2>๐ Text to Image</h2> | |
| <p>Enter a textual description and let AI generate an image for you</p> | |
| <div> | |
| <p><strong>How to use:</strong></p> | |
| <ul> | |
| <li>Describe the image you want in detail in the input box below</li> | |
| <li>Select an appropriate resolution</li> | |
| <li>Click the "Generate Image" button to start</li> | |
| <li>Generation typically takes 3โ10 minutes; please wait patiently</li> | |
| </ul> | |
| </div> | |
| </div> | |
| """ | |
| else: | |
| function_info = { | |
| "image_convert": "Processing: Image to Image - Upload an image for intelligent transformation", | |
| "five_view": "๐๏ธ Five-View Generation - Upload a portrait to generate 5 viewpoints", | |
| "photo_style": "Photo Style Transfer - Apply professional photography styles", | |
| "interior": "Interior Design Rendering - Upload a white model to generate design renders", | |
| "watermark": "Watermark Removal - Detect and remove watermarks intelligently", | |
| "line_art": "Line Art Conversion - Convert photos into clean line art", | |
| "expand": "Image Outpainting - Extend image boundaries coherently", | |
| "anime_to_real": "Anime to Real - Convert anime characters to realistic humans", | |
| "real_to_anime": "Real to Anime - Convert real photos to anime style" | |
| } | |
| description = function_info.get(function_name, "Unknown function") | |
| interface_html = ( | |
| f"<div class='welcome-container'>" | |
| f"<h2 class='welcome-title'>{description}</h2><p class='welcome-subtitle'>Feature under development, stay tuned!</p></div>" | |
| ) | |
| # Hide welcome content and show dynamic content | |
| return ( | |
| gr.update(visible=False), # welcome_content | |
| gr.update(visible=True), # dynamic_content | |
| interface_html | |
| ) | |
| # ============================================================================ | |
| # Main UI Creation | |
| # ============================================================================ | |
| def create_main_interface(): | |
| """Create the main Gradio interface with sidebar layout""" | |
| # Create custom theme | |
| custom_theme = create_custom_theme() | |
| # No custom CSS or JavaScript - use Gradio's default styling | |
| with gr.Blocks( | |
| title=CONFIG.APP_TITLE, | |
| theme=custom_theme, | |
| fill_width=True | |
| ) as interface: | |
| # Main layout with sidebar using Row and Column | |
| with gr.Row(): | |
| # Sidebar column | |
| with gr.Column(scale=1, min_width=250): | |
| gr.Markdown("# AI Toolbox") | |
| gr.Markdown("Choose a feature below to get started") | |
| # Creation Tools group | |
| gr.Markdown("## Creation Tools") | |
| with gr.Group(): | |
| text_to_image_btn = gr.Button("Text to Image", size="sm", variant="secondary") | |
| image_convert_btn = gr.Button("Image to Image", size="sm", variant="secondary") | |
| five_view_btn_sidebar = gr.Button("Five-View Generation", size="sm", variant="secondary") | |
| figure_3d_btn_sidebar = gr.Button("2D to 3D Figure", size="sm", variant="secondary") | |
| character_figure_btn_sidebar = gr.Button("Character Figure Collaboration", size="sm", variant="secondary") | |
| gr.Markdown("---") | |
| # Style Transfer group | |
| gr.Markdown("## Style Transfer") | |
| with gr.Group(): | |
| photo_style_btn_sidebar = gr.Button("Photo Style", size="sm", variant="secondary") | |
| interior_btn_sidebar = gr.Button("Interior Design", size="sm", variant="secondary") | |
| gr.Markdown("---") | |
| # Image Processing group | |
| gr.Markdown("## Image Processing") | |
| with gr.Group(): | |
| watermark_btn_sidebar = gr.Button("Watermark Removal", size="sm", variant="secondary") | |
| line_art_btn_sidebar = gr.Button("Line Art Conversion", size="sm", variant="secondary") | |
| expand_btn_sidebar = gr.Button("Image Outpainting", size="sm", variant="secondary") | |
| gr.Markdown("---") | |
| # Anime Conversion group | |
| gr.Markdown("## Anime Conversion") | |
| with gr.Group(): | |
| anime_to_real_btn_sidebar = gr.Button("Anime to Real", size="sm", variant="secondary") | |
| real_to_anime_btn_sidebar = gr.Button("Real to Anime", size="sm", variant="secondary") | |
| # Main content area | |
| with gr.Column(scale=4): | |
| # ๆฌข่ฟ้กต้ข - ไฝฟ็จๆจ่็elem_idๆนๆณ | |
| welcome_content = gr.HTML(""" | |
| <div id=\"welcome-page\"> | |
| <h1 class=\"app-title\">AI Image Generator</h1> | |
| <p class=\"app-subtitle\">Select a feature on the left to start your AI creation journey</p> | |
| <div class=\"feature-grid\"> | |
| <div class=\"feature-card\"> | |
| <h3>Creation Tools</h3> | |
| <p>Text to Image, Image to Image, Multi-view Generation</p> | |
| </div> | |
| <div class=\"feature-card\"> | |
| <h3>Style Transfer</h3> | |
| <p>Photo Style, Interior Design</p> | |
| </div> | |
| <div class=\"feature-card\"> | |
| <h3>Image Processing</h3> | |
| <p>Watermark Removal, Line Art, Outpainting</p> | |
| </div> | |
| <div class=\"feature-card\"> | |
| <h3>Anime Conversion</h3> | |
| <p>Anime to Real, Real to Anime</p> | |
| </div> | |
| </div> | |
| </div> | |
| """, elem_id="welcome-container") | |
| # Dynamic content area (initially hidden) | |
| dynamic_content = gr.HTML(visible=False) | |
| # ๆๆฌ็ๆๅพๅๅ่ฝๅบๅ๏ผๅๅง้่๏ผ | |
| with gr.Column(visible=False) as text_to_image_interface: | |
| gr.Markdown("## ๐ Text to Image") | |
| gr.Markdown("Enter a textual description and let AI generate an image for you") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| prompt_input = gr.Textbox( | |
| label="Image Description", | |
| placeholder="Describe the image you want in detail, e.g., a cute cat playing in a garden, anime style, high-quality details", | |
| lines=4, | |
| max_lines=6 | |
| ) | |
| resolution_input = gr.Dropdown( | |
| label="Resolution", | |
| choices=[ | |
| "portrait - 768x1344 (9:16)", | |
| "portrait - 896x1152 (3:4)", | |
| "square - 1024x1024 (1:1)", | |
| "landscape - 1152x896 (4:3)", | |
| "landscape - 1216x832 (3:2)", | |
| "landscape - 1344x768 (16:9)", | |
| "landscape - 1536x640 (21:9)" | |
| ], | |
| value="square - 1024x1024 (1:1)", | |
| interactive=True | |
| ) | |
| with gr.Row(): | |
| generate_btn = gr.Button("Generate Image", variant="primary", scale=2) | |
| cancel_btn = gr.Button("Cancel", variant="secondary", scale=1, visible=False) | |
| status_info = gr.Markdown("") | |
| with gr.Column(scale=3): | |
| result_image = gr.Image( | |
| label="Result", | |
| show_label=True, | |
| show_download_button=True, | |
| show_share_button=True | |
| ) | |
| image_info = gr.Markdown(""" | |
| ### ๐ก Tips | |
| - Be descriptive: the more detailed the description, the better the results | |
| - Style keywords: e.g., \"high-definition photography\", \"anime style\", \"oil painting style\" | |
| - Composition: describe subject pose, scene layout, and lighting | |
| - Quality hints: e.g., \"4K quality\", \"high-detail\", \"professional photography\" | |
| - Processing time: typically 3โ10 minutes. Please wait patiently | |
| """, visible=True) | |
| # ๆทปๅ ๆๆฌ็ๆๅพๅ็examples | |
| text_to_image_examples_input_only = [[example[0], example[1]] for example in TEXT_TO_IMAGE_EXAMPLES_WITH_RESULTS] | |
| gr.Examples( | |
| examples=text_to_image_examples_input_only, | |
| inputs=[prompt_input, resolution_input], | |
| outputs=result_image, | |
| fn=load_example_result, | |
| label="Example Prompts - Click to preview", | |
| examples_per_page=6, | |
| cache_examples=True | |
| ) | |
| # ๅพๅ่ฝฌๆขๅ่ฝๅบๅ๏ผๅๅง้่๏ผ | |
| with gr.Column(visible=False) as image_convert_interface: | |
| gr.Markdown("## Image to Image") | |
| gr.Markdown("Upload an image and let AI intelligently transform it") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| convert_input = gr.Image( | |
| label="Upload Image", | |
| type="pil", | |
| sources=["upload", "clipboard"], | |
| height=250 | |
| ) | |
| with gr.Row(): | |
| convert_btn = gr.Button("Transform Image", variant="primary", scale=2) | |
| convert_cancel_btn = gr.Button("Cancel", variant="secondary", scale=1, visible=False) | |
| convert_status = gr.Markdown("") | |
| with gr.Column(scale=3): | |
| convert_result = gr.Image( | |
| label="Transformed Result", | |
| show_label=True, | |
| show_download_button=True, | |
| show_share_button=True | |
| ) | |
| convert_info = gr.Markdown(""" | |
| ### ๐ก Tips | |
| - Supported formats: PNG, JPEG, JPG, WEBP | |
| - Recommended size: 512x512 to 1536x1536 pixels | |
| - File size: recommended under 10MB | |
| - Image quality: higher clarity yields better results | |
| - Processing time: typically 1โ3 minutes | |
| """, visible=True) | |
| # ไบ่ง่ง็ๆๅ่ฝๅบๅ๏ผๅๅง้่๏ผ | |
| with gr.Column(visible=False) as five_view_interface: | |
| gr.Markdown("## Five-View Generation") | |
| gr.Markdown("Upload a portrait to generate 5 different viewpoints") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| five_view_input = gr.Image( | |
| label="Upload Portrait", | |
| type="pil", | |
| sources=["upload", "clipboard"], | |
| height=250 | |
| ) | |
| with gr.Row(): | |
| five_view_btn = gr.Button("Generate Five Views", variant="primary", scale=2) | |
| five_view_cancel_btn = gr.Button("Cancel", variant="secondary", scale=1, visible=False) | |
| five_view_status = gr.Markdown("") | |
| with gr.Column(scale=3): | |
| five_view_result = gr.Image( | |
| label="Five-View Result", | |
| show_label=True, | |
| show_download_button=True, | |
| show_share_button=True | |
| ) | |
| five_view_info = gr.Markdown(""" | |
| ### ๐ก Tips | |
| - Portrait: upload a clear portrait (front-facing preferred) | |
| - Supported types: real persons, anime/game characters | |
| - Recommended size: 512x512 to 1024x1024 pixels | |
| - Background: simple backgrounds work better | |
| - Processing time: typically 1โ3 minutes. Please wait patiently | |
| """, visible=True) | |
| # ๆทปๅ ไบ่ง่ง็ๆ็examples | |
| five_view_examples_input_only = [[example[0]] for example in FIVE_VIEW_GENERATION_EXAMPLES_WITH_RESULTS] | |
| gr.Examples( | |
| examples=five_view_examples_input_only, | |
| inputs=[five_view_input], | |
| outputs=[five_view_input, five_view_result], | |
| fn=load_five_view_example, | |
| label="๐ก Five-View Examples - Click to preview", | |
| examples_per_page=3, | |
| cache_examples=True | |
| ) | |
| # 2D่ฝฌ3Dๆๅๅ่ฝๅบๅ๏ผๅๅง้่๏ผ | |
| with gr.Column(visible=False) as figure_3d_interface: | |
| gr.Markdown("## 2D to 3D Figure Generation") | |
| gr.Markdown("Convert 2D character images into 3D figure renders with various scene styles") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| figure_3d_input = gr.Image( | |
| label="Upload 2D Character Image", | |
| type="pil", | |
| sources=["upload", "clipboard"], | |
| height=250 | |
| ) | |
| figure_3d_style = gr.Dropdown( | |
| label="Select Figure Style", | |
| choices=FIGURE_3D_STYLE_CHOICES, | |
| value="professional_lighting", | |
| info="Choose the 3D figure scene style" | |
| ) | |
| figure_3d_resolution = gr.Dropdown( | |
| label="Resolution", | |
| choices=[ | |
| "portrait - 768x1344 (9:16)", | |
| "portrait - 896x1152 (3:4)", | |
| "square - 1024x1024 (1:1)", | |
| "landscape - 1152x896 (4:3)", | |
| "landscape - 1216x832 (3:2)", | |
| "landscape - 1344x768 (16:9)", | |
| "landscape - 1536x640 (21:9)" | |
| ], | |
| value="square - 1024x1024 (1:1)", | |
| info="Choose the output image resolution" | |
| ) | |
| with gr.Row(): | |
| figure_3d_btn = gr.Button("Generate 3D Figure", variant="primary", scale=2) | |
| figure_3d_cancel_btn = gr.Button("Cancel", variant="secondary", scale=1, visible=False) | |
| figure_3d_status = gr.Markdown("") | |
| with gr.Column(scale=3): | |
| figure_3d_result = gr.Image( | |
| label="3D Figure Result", | |
| show_label=True, | |
| show_download_button=True, | |
| show_share_button=True | |
| ) | |
| figure_3d_info = gr.Markdown(""" | |
| ### ๐ก Tips | |
| - Character: upload clear 2D character images (anime, game characters, illustrations) | |
| - Supported formats: PNG, JPEG, JPG, WEBP | |
| - Recommended size: 512x512 to 1536x1536 pixels | |
| - File size: under 10MB recommended | |
| - Processing time: typically 1โ3 minutes | |
| - Scene styles: professional lighting, collector shelf, desktop display, miniature adventure, Alice's tea party | |
| """, visible=True) | |
| # ๆทปๅ 3Dๆๅ็ๆ็examples - ไฝฟ็จไธๅๅ่พจ็ๅฑ็คบๅคๆ ทๆง | |
| figure_3d_examples_input_only = [ | |
| [FIGURE_3D_EXAMPLES_WITH_RESULTS[0][0], FIGURE_3D_EXAMPLES_WITH_RESULTS[0][1], "square - 1024x1024 (1:1)"], | |
| [FIGURE_3D_EXAMPLES_WITH_RESULTS[1][0], FIGURE_3D_EXAMPLES_WITH_RESULTS[1][1], "landscape - 1152x896 (4:3)"] if len(FIGURE_3D_EXAMPLES_WITH_RESULTS) > 1 else [FIGURE_3D_EXAMPLES_WITH_RESULTS[0][0], FIGURE_3D_EXAMPLES_WITH_RESULTS[0][1], "landscape - 1152x896 (4:3)"], | |
| [FIGURE_3D_EXAMPLES_WITH_RESULTS[2][0], FIGURE_3D_EXAMPLES_WITH_RESULTS[2][1], "portrait - 896x1152 (3:4)"] if len(FIGURE_3D_EXAMPLES_WITH_RESULTS) > 2 else [FIGURE_3D_EXAMPLES_WITH_RESULTS[0][0], FIGURE_3D_EXAMPLES_WITH_RESULTS[0][1], "portrait - 896x1152 (3:4)"], | |
| [FIGURE_3D_EXAMPLES_WITH_RESULTS[3][0], FIGURE_3D_EXAMPLES_WITH_RESULTS[3][1], "landscape - 1344x768 (16:9)"] if len(FIGURE_3D_EXAMPLES_WITH_RESULTS) > 3 else [FIGURE_3D_EXAMPLES_WITH_RESULTS[0][0], FIGURE_3D_EXAMPLES_WITH_RESULTS[0][1], "landscape - 1344x768 (16:9)"], | |
| [FIGURE_3D_EXAMPLES_WITH_RESULTS[4][0], FIGURE_3D_EXAMPLES_WITH_RESULTS[4][1], "portrait - 768x1344 (9:16)"] if len(FIGURE_3D_EXAMPLES_WITH_RESULTS) > 4 else [FIGURE_3D_EXAMPLES_WITH_RESULTS[0][0], FIGURE_3D_EXAMPLES_WITH_RESULTS[0][1], "portrait - 768x1344 (9:16)"] | |
| ][:len(FIGURE_3D_EXAMPLES_WITH_RESULTS)] | |
| gr.Examples( | |
| examples=figure_3d_examples_input_only, | |
| inputs=[figure_3d_input, figure_3d_style, figure_3d_resolution], | |
| outputs=[figure_3d_input, figure_3d_style, figure_3d_resolution, figure_3d_result], | |
| fn=load_figure_3d_example, | |
| label="๐จ 3D Figure Examples - Click to preview different styles", | |
| examples_per_page=5, | |
| cache_examples=True | |
| ) | |
| # ไบบ็ฉๆๅๅๅฝฑๅ่ฝๅบๅ๏ผๅๅง้่๏ผ | |
| with gr.Column(visible=False) as character_figure_collaboration_interface: | |
| gr.Markdown("## Character Figure Collaboration") | |
| gr.Markdown("Generate collaboration photos between characters and figures") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| character_figure_input = gr.Image( | |
| label="Upload Character Full-Body Photo", | |
| type="pil", | |
| sources=["upload", "clipboard"], | |
| height=250 | |
| ) | |
| with gr.Row(): | |
| character_figure_btn = gr.Button("Generate Collaboration Photo", variant="primary", scale=2) | |
| character_figure_cancel_btn = gr.Button("Cancel", variant="secondary", scale=1, visible=False) | |
| character_figure_status = gr.Markdown("") | |
| with gr.Column(scale=3): | |
| character_figure_result = gr.Image( | |
| label="Collaboration Result", | |
| show_label=True, | |
| show_download_button=True, | |
| show_share_button=True | |
| ) | |
| character_figure_info = gr.Markdown(""" | |
| ### ๐ก Tips | |
| - Upload clear full-body character photos (real person or virtual character) | |
| - Supported formats: PNG, JPEG, JPG, WEBP | |
| - Recommended size: 512x512 to 1536x1536 pixels | |
| - File size: under 10MB recommended | |
| - Processing time: typically 1โ3 minutes | |
| - The AI will generate a collaboration photo with the character and a figure | |
| """, visible=True) | |
| # ๆทปๅ ไบบ็ฉๆๅๅๅฝฑ็examples | |
| if CHARACTER_FIGURE_COLLABORATION_EXAMPLES_WITH_RESULTS: # ๅชๆๅฝๆexamplesๆถๆๆพ็คบ | |
| gr.Examples( | |
| examples=CHARACTER_FIGURE_COLLABORATION_EXAMPLES_WITH_RESULTS, | |
| inputs=[character_figure_input], | |
| outputs=[character_figure_input, character_figure_result], | |
| fn=load_character_figure_collaboration_example, | |
| label="๐จ Character Figure Collaboration Examples - Click to preview", | |
| examples_per_page=5, | |
| cache_examples=True | |
| ) | |
| # ๆๅฝฑ้ฃๆ ผ่ฝฌๆขๅ่ฝๅบๅ๏ผๅๅง้่๏ผ | |
| with gr.Column(visible=False) as photo_style_interface: | |
| gr.Markdown("## Photo Style Transfer") | |
| gr.Markdown("Apply professional photography styles to your photos") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| photo_style_input = gr.Image( | |
| label="Upload Photo", | |
| type="pil", | |
| sources=["upload", "clipboard"], | |
| height=250 | |
| ) | |
| photo_style_dropdown = gr.Dropdown( | |
| label="Select Photo Style", | |
| choices=PHOTO_STYLE_CHOICES, | |
| value="camera_movement", | |
| info="Choose the photography style to apply" | |
| ) | |
| with gr.Row(): | |
| photo_style_btn = gr.Button("Apply Style", variant="primary", scale=2) | |
| photo_style_cancel_btn = gr.Button("Cancel", variant="secondary", scale=1, visible=False) | |
| photo_style_status = gr.Markdown("") | |
| with gr.Column(scale=3): | |
| photo_style_result = gr.Image( | |
| label="Style Transfer Result", | |
| show_label=True, | |
| show_download_button=True, | |
| show_share_button=True | |
| ) | |
| photo_style_info = gr.Markdown(""" | |
| ### ๐ก Tips | |
| - Photo types: portraits, landscapes, product photos, etc. | |
| - Recommended quality: higher resolution photos yield better results | |
| - Style selection: choose a style that matches the photo type | |
| - Lighting: well-lit photos convert better | |
| - Processing time: typically 1โ2 minutes | |
| """, visible=True) | |
| # ๅฎคๅ ่ฎพ่ฎกๆธฒๆๅ่ฝๅบๅ๏ผๅๅง้่๏ผ | |
| with gr.Column(visible=False) as interior_interface: | |
| gr.Markdown("## Interior Design Rendering") | |
| gr.Markdown("Upload a white model interior image to generate design renders") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| interior_input = gr.Image( | |
| label="Upload Interior White Model", | |
| type="pil", | |
| sources=["upload", "clipboard"], | |
| height=250 | |
| ) | |
| interior_style = gr.Dropdown( | |
| label="Select Interior Style", | |
| choices=INTERIOR_DESIGN_STYLE_CHOICES, | |
| value="japanese_wabi_sabi", # keep default key | |
| info="Choose the interior design style to apply" | |
| ) | |
| with gr.Row(): | |
| interior_btn = gr.Button("Render Design", variant="primary", scale=2) | |
| interior_cancel_btn = gr.Button("Cancel", variant="secondary", scale=1, visible=False) | |
| interior_status = gr.Markdown("") | |
| with gr.Column(scale=3): | |
| interior_result = gr.Image( | |
| label="Rendered Result", | |
| show_label=True, | |
| show_download_button=True, | |
| show_share_button=True | |
| ) | |
| interior_info = gr.Markdown(""" | |
| ### ๐ก Tips | |
| - Input: upload a white model or line art of the interior | |
| - Style description: describe the desired design style in detail | |
| - Space type: living room, bedroom, kitchen, office, etc. | |
| - Style keywords: modern minimalism, Nordic, classic Chinese, etc. | |
| - Processing time: typically 2โ5 minutes | |
| """, visible=True) | |
| # ๆทปๅ ๅฎคๅ ่ฎพ่ฎก็examples | |
| interior_design_examples_input_only = [[example[0], example[1]] for example in INTERIOR_DESIGN_EXAMPLES_WITH_RESULTS] | |
| gr.Examples( | |
| examples=interior_design_examples_input_only, | |
| inputs=[interior_input, interior_style], | |
| outputs=[interior_input, interior_result], | |
| fn=load_interior_design_example_result, | |
| label="๐ก Interior Design Examples - Click to preview", | |
| examples_per_page=4, | |
| cache_examples=True | |
| ) | |
| # ๆฐดๅฐ็งป้คๅ่ฝๅบๅ๏ผๅๅง้่๏ผ | |
| with gr.Column(visible=False) as watermark_interface: | |
| gr.Markdown("## Watermark Removal") | |
| gr.Markdown("Intelligently detect and remove watermarks from images") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| watermark_input = gr.Image( | |
| label="Upload Image with Watermark", | |
| type="pil", | |
| sources=["upload", "clipboard"], | |
| height=250 | |
| ) | |
| with gr.Row(): | |
| watermark_btn = gr.Button("Remove Watermark", variant="primary", scale=2) | |
| watermark_cancel_btn = gr.Button("Cancel", variant="secondary", scale=1, visible=False) | |
| watermark_status = gr.Markdown("") | |
| with gr.Column(scale=3): | |
| watermark_result = gr.Image( | |
| label="Watermark Removal Result", | |
| show_label=True, | |
| show_download_button=True, | |
| show_share_button=True | |
| ) | |
| watermark_info = gr.Markdown(""" | |
| ### ๐ก Tips | |
| - Watermark types: supports text and icon watermarks | |
| - Image quality: higher quality images yield better results | |
| - Watermark location: AI will automatically detect and remove | |
| - Background complexity: simpler backgrounds remove better | |
| - Processing time: typically 1โ3 minutes | |
| """, visible=True) | |
| # ็บฟ็จฟ่ฝฌๆขๅ่ฝๅบๅ๏ผๅๅง้่๏ผ | |
| with gr.Column(visible=False) as line_art_interface: | |
| gr.Markdown("## Line Art Conversion") | |
| gr.Markdown("Convert your photo into clean line art") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| line_art_input = gr.Image( | |
| label="Upload Photo", | |
| type="pil", | |
| sources=["upload", "clipboard"], | |
| height=250 | |
| ) | |
| with gr.Row(): | |
| line_art_btn = gr.Button("Convert to Line Art", variant="primary", scale=2) | |
| line_art_cancel_btn = gr.Button("Cancel", variant="secondary", scale=1, visible=False) | |
| line_art_status = gr.Markdown("") | |
| with gr.Column(scale=3): | |
| line_art_result = gr.Image( | |
| label="Line Art Result", | |
| show_label=True, | |
| show_download_button=True, | |
| show_share_button=True | |
| ) | |
| line_art_info = gr.Markdown(""" | |
| ### ๐ก Tips | |
| - Photo types: portraits, landscapes, architecture, objects | |
| - Image clarity: higher clarity yields better line art | |
| - Contrast: higher contrast improves line extraction | |
| - Complexity: rich details produce better line art | |
| - Processing time: typically 1โ2 minutes | |
| """, visible=True) | |
| # ๆทปๅ ็บฟ็จฟ่ฝฌๆข็examples | |
| gr.Examples( | |
| examples=LINE_ART_CONVERSION_EXAMPLES_WITH_RESULTS, | |
| inputs=[line_art_input], | |
| outputs=[line_art_input, line_art_result], | |
| fn=load_line_art_example_result, | |
| label="๐ก Line Art Examples - Click to preview", | |
| examples_per_page=3, | |
| cache_examples=True | |
| ) | |
| # ๅพๅๆฉๅฑๅ่ฝๅบๅ๏ผๅๅง้่๏ผ | |
| with gr.Column(visible=False) as expand_interface: | |
| gr.Markdown("## Image Outpainting") | |
| gr.Markdown("Intelligently extend image boundaries while keeping content coherent") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| expand_input = gr.Image( | |
| label="Upload Image to Outpaint", | |
| type="pil", | |
| sources=["upload", "clipboard"], | |
| height=250 | |
| ) | |
| expand_height = gr.Slider( | |
| label="Outpaint Height (%)", | |
| minimum=0.0, | |
| maximum=1.0, | |
| value=0.2, | |
| step=0.1, | |
| interactive=True, | |
| info="Percentage to extend vertically" | |
| ) | |
| expand_width = gr.Slider( | |
| label="Outpaint Width (%)", | |
| minimum=0.0, | |
| maximum=1.0, | |
| value=0.3, | |
| step=0.1, | |
| interactive=True, | |
| info="Percentage to extend horizontally" | |
| ) | |
| with gr.Row(): | |
| expand_btn = gr.Button("Outpaint Image", variant="primary", scale=2) | |
| expand_cancel_btn = gr.Button("Cancel", variant="secondary", scale=1, visible=False) | |
| expand_status = gr.Markdown("") | |
| with gr.Column(scale=3): | |
| expand_result = gr.Image( | |
| label="Outpainting Result", | |
| show_label=True, | |
| show_download_button=True, | |
| show_share_button=True | |
| ) | |
| expand_info = gr.Markdown(""" | |
| ### ๐ก Tips | |
| - Direction: choose which sides to extend (top/bottom/left/right) | |
| - Pixel amount: 64โ512 pixels recommended; too large may impact quality | |
| - Edges: richer edge content yields more natural results | |
| - Coherence: AI will fill in content consistent with the original | |
| - Processing time: typically 2โ4 minutes | |
| """, visible=True) | |
| # ๅพๅๆฉๅฑExamples - ไฟฎๅคinputs/outputsๅน้ ้ฎ้ข | |
| gr.Examples( | |
| examples=IMAGE_OUTPAINTING_EXAMPLES_WITH_RESULTS, | |
| inputs=[expand_input, expand_height, expand_width, expand_result], | |
| outputs=[expand_input, expand_height, expand_width, expand_result], | |
| fn=load_outpainting_example_for_gradio, | |
| label="๐ก Outpainting Examples - Click to preview", | |
| examples_per_page=3, | |
| cache_examples=False # Disable caching to avoid serialization issues | |
| ) | |
| # ไบๆฌกๅ ่ฝฌ็ไบบๅ่ฝๅบๅ๏ผๅๅง้่๏ผ | |
| with gr.Column(visible=False) as anime_to_real_interface: | |
| gr.Markdown("## Anime to Real") | |
| gr.Markdown("Convert anime characters to realistic humans") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| anime_to_real_input = gr.Image( | |
| label="Upload Anime Character", | |
| type="pil", | |
| sources=["upload", "clipboard"], | |
| height=250 | |
| ) | |
| with gr.Row(): | |
| anime_to_real_btn = gr.Button("Convert to Real", variant="primary", scale=2) | |
| anime_to_real_cancel_btn = gr.Button("Cancel", variant="secondary", scale=1, visible=False) | |
| anime_to_real_status = gr.Markdown("") | |
| with gr.Column(scale=3): | |
| anime_to_real_result = gr.Image( | |
| label="Anime to Real Result", | |
| show_label=True, | |
| show_download_button=True, | |
| show_share_button=True | |
| ) | |
| anime_to_real_info = gr.Markdown(""" | |
| ### ๐ก Tips | |
| - Anime character: supports various anime/game characters | |
| - Facial features: clearer facial features yield better conversion | |
| - Image quality: higher-quality anime images perform better | |
| - Character type: humanoid characters convert best | |
| - Processing time: typically 1โ3 minutes | |
| """, visible=True) | |
| # ๆทปๅ ไบๆฌกๅ ่ฝฌ็ไบบ็examples | |
| gr.Examples( | |
| examples=ANIME_TO_REAL_EXAMPLES_WITH_RESULTS, | |
| inputs=[anime_to_real_input], | |
| outputs=[anime_to_real_input, anime_to_real_result], | |
| fn=load_anime_to_real_example_result, | |
| label="๐ก Anime to Real Examples - Click to preview", | |
| examples_per_page=1, | |
| cache_examples=True | |
| ) | |
| # ็ไบบ่ฝฌๅจๆผซๅ่ฝๅบๅ๏ผๅๅง้่๏ผ | |
| with gr.Column(visible=False) as real_to_anime_interface: | |
| gr.Markdown("## Real to Anime") | |
| gr.Markdown("Convert real photos into anime style") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| real_to_anime_input = gr.Image( | |
| label="Upload Real Photo", | |
| type="pil", | |
| sources=["upload", "clipboard"], | |
| height=250 | |
| ) | |
| with gr.Row(): | |
| real_to_anime_btn = gr.Button("Convert to Anime", variant="primary", scale=2) | |
| real_to_anime_cancel_btn = gr.Button("Cancel", variant="secondary", scale=1, visible=False) | |
| real_to_anime_status = gr.Markdown("") | |
| with gr.Column(scale=3): | |
| real_to_anime_result = gr.Image( | |
| label="Real to Anime Result", | |
| show_label=True, | |
| show_download_button=True, | |
| show_share_button=True | |
| ) | |
| real_to_anime_info = gr.Markdown(""" | |
| ### ๐ก Tips | |
| - Real photo: upload a clear portrait | |
| - Lighting: evenly lit photos work better | |
| - Pose: front or side portraits convert best | |
| - Background: simple backgrounds help the subject stand out | |
| - Processing time: typically 1โ3 minutes | |
| """, visible=True) | |
| # ๆทปๅ ็ไบบ่ฝฌๅจๆผซ็examples | |
| gr.Examples( | |
| examples=REAL_TO_ANIME_EXAMPLES_WITH_RESULTS, | |
| inputs=[real_to_anime_input], | |
| outputs=[real_to_anime_input, real_to_anime_result], | |
| fn=load_real_to_anime_example_result, | |
| label="๐ก Real to Anime Examples - Click to preview", | |
| examples_per_page=3, | |
| cache_examples=True | |
| ) | |
| # Bind events to sidebar buttons within the Blocks context | |
| # pylint: disable=no-member | |
| # Define all interfaces in consistent order for interface switching | |
| all_interface_outputs = [ | |
| welcome_content, # 0 | |
| dynamic_content, # 1 | |
| text_to_image_interface, # 2 | |
| image_convert_interface, # 3 | |
| five_view_interface, # 4 | |
| figure_3d_interface, # 5 | |
| character_figure_collaboration_interface, # 6 | |
| photo_style_interface, # 7 | |
| interior_interface, # 8 | |
| watermark_interface, # 9 | |
| line_art_interface, # 10 | |
| expand_interface, # 11 | |
| anime_to_real_interface, # 12 | |
| real_to_anime_interface # 13 | |
| ] | |
| # Helper function to show specific interface | |
| def show_interface(interface_index): | |
| """Show specific interface by index, hide all others""" | |
| updates = [] | |
| for i in range(len(all_interface_outputs)): | |
| if i == interface_index: | |
| updates.append(gr.update(visible=True)) | |
| else: | |
| updates.append(gr.update(visible=False)) | |
| return tuple(updates) | |
| # Text-to-image events | |
| text_to_image_btn.click( | |
| fn=lambda: show_interface(2), # text_to_image_interface is at index 2 | |
| outputs=all_interface_outputs | |
| ) | |
| generate_btn.click( | |
| fn=generate_text_to_image, | |
| inputs=[prompt_input, resolution_input], | |
| outputs=[result_image, status_info, image_info, generate_btn, cancel_btn], | |
| show_progress=True | |
| ) | |
| cancel_btn.click( | |
| fn=cancel_current_task, | |
| outputs=[generate_btn, cancel_btn, status_info], | |
| queue=False | |
| ) | |
| # Image convert events | |
| image_convert_btn.click( | |
| fn=lambda: show_interface(3), # image_convert_interface is at index 3 | |
| outputs=all_interface_outputs | |
| ) | |
| convert_btn.click( | |
| fn=generate_image_to_image, | |
| inputs=[convert_input], | |
| outputs=[convert_result, convert_status, convert_info, convert_btn, convert_cancel_btn], | |
| show_progress=True | |
| ) | |
| convert_cancel_btn.click( | |
| fn=cancel_current_task, | |
| outputs=[convert_btn, convert_cancel_btn, convert_status], | |
| queue=False | |
| ) | |
| # Five view events | |
| five_view_btn_sidebar.click( | |
| fn=lambda: show_interface(4), # five_view_interface is at index 4 | |
| outputs=all_interface_outputs | |
| ) | |
| five_view_btn.click( | |
| fn=generate_five_view, | |
| inputs=[five_view_input], | |
| outputs=[five_view_result, five_view_status, five_view_info, five_view_btn, five_view_cancel_btn], | |
| show_progress=True | |
| ) | |
| five_view_cancel_btn.click( | |
| fn=cancel_current_task, | |
| outputs=[five_view_btn, five_view_cancel_btn, five_view_status], | |
| queue=False | |
| ) | |
| # Figure 3D events | |
| figure_3d_btn_sidebar.click( | |
| fn=lambda: show_interface(5), # figure_3d_interface is at index 5 | |
| outputs=all_interface_outputs | |
| ) | |
| figure_3d_btn.click( | |
| fn=generate_figure_3d, | |
| inputs=[figure_3d_input, figure_3d_style, figure_3d_resolution], | |
| outputs=[figure_3d_result, figure_3d_status, figure_3d_info, figure_3d_btn, figure_3d_cancel_btn], | |
| show_progress=True | |
| ) | |
| figure_3d_cancel_btn.click( | |
| fn=cancel_current_task, | |
| outputs=[figure_3d_btn, figure_3d_cancel_btn, figure_3d_status], | |
| queue=False | |
| ) | |
| # Character Figure Collaboration events | |
| character_figure_btn_sidebar.click( | |
| fn=lambda: show_interface(6), # character_figure_collaboration_interface is at index 6 | |
| outputs=all_interface_outputs | |
| ) | |
| character_figure_btn.click( | |
| fn=generate_character_figure_collaboration, | |
| inputs=[character_figure_input], | |
| outputs=[character_figure_result, character_figure_status, character_figure_info, character_figure_btn, character_figure_cancel_btn], | |
| show_progress=True | |
| ) | |
| character_figure_cancel_btn.click( | |
| fn=cancel_current_task, | |
| outputs=[character_figure_btn, character_figure_cancel_btn, character_figure_status], | |
| queue=False | |
| ) | |
| # Photo style events | |
| photo_style_btn_sidebar.click( | |
| fn=lambda: show_interface(7), # photo_style_interface is at index 7 | |
| outputs=all_interface_outputs | |
| ) | |
| photo_style_btn.click( | |
| fn=generate_photo_style, | |
| inputs=[photo_style_input, photo_style_dropdown], | |
| outputs=[photo_style_result, photo_style_status, photo_style_info, photo_style_btn, photo_style_cancel_btn], | |
| show_progress=True | |
| ) | |
| photo_style_cancel_btn.click( | |
| fn=cancel_current_task, | |
| outputs=[photo_style_btn, photo_style_cancel_btn, photo_style_status], | |
| queue=False | |
| ) | |
| # Interior design events | |
| interior_btn_sidebar.click( | |
| fn=lambda: show_interface(8), # interior_interface is at index 8 | |
| outputs=all_interface_outputs | |
| ) | |
| interior_btn.click( | |
| fn=generate_interior_design, | |
| inputs=[interior_input, interior_style], | |
| outputs=[interior_result, interior_status, interior_info, interior_btn, interior_cancel_btn], | |
| show_progress=True | |
| ) | |
| interior_cancel_btn.click( | |
| fn=cancel_current_task, | |
| outputs=[interior_btn, interior_cancel_btn, interior_status], | |
| queue=False | |
| ) | |
| # Watermark removal events | |
| watermark_btn_sidebar.click( | |
| fn=lambda: show_interface(9), # watermark_interface is at index 9 | |
| outputs=all_interface_outputs | |
| ) | |
| watermark_btn.click( | |
| fn=generate_watermark_removal, | |
| inputs=[watermark_input], | |
| outputs=[watermark_result, watermark_status, watermark_info, watermark_btn, watermark_cancel_btn], | |
| show_progress=True | |
| ) | |
| watermark_cancel_btn.click( | |
| fn=cancel_current_task, | |
| outputs=[watermark_btn, watermark_cancel_btn, watermark_status], | |
| queue=False | |
| ) | |
| # Line art events | |
| line_art_btn_sidebar.click( | |
| fn=lambda: show_interface(10), # line_art_interface is at index 10 | |
| outputs=all_interface_outputs | |
| ) | |
| line_art_btn.click( | |
| fn=generate_line_art, | |
| inputs=[line_art_input], | |
| outputs=[line_art_result, line_art_status, line_art_info, line_art_btn, line_art_cancel_btn], | |
| show_progress=True | |
| ) | |
| line_art_cancel_btn.click( | |
| fn=cancel_current_task, | |
| outputs=[line_art_btn, line_art_cancel_btn, line_art_status], | |
| queue=False | |
| ) | |
| # Expand events | |
| expand_btn_sidebar.click( | |
| fn=lambda: show_interface(11), # expand_interface is at index 11 | |
| outputs=all_interface_outputs | |
| ) | |
| expand_btn.click( | |
| fn=generate_image_outpainting, | |
| inputs=[expand_input, expand_height, expand_width], | |
| outputs=[expand_result, expand_status, expand_info, expand_btn, expand_cancel_btn], | |
| show_progress=True | |
| ) | |
| expand_cancel_btn.click( | |
| fn=cancel_current_task, | |
| outputs=[expand_btn, expand_cancel_btn, expand_status], | |
| queue=False | |
| ) | |
| # Anime to real events | |
| anime_to_real_btn_sidebar.click( | |
| fn=lambda: show_interface(12), # anime_to_real_interface is at index 12 | |
| outputs=all_interface_outputs | |
| ) | |
| anime_to_real_btn.click( | |
| fn=generate_anime_to_real, | |
| inputs=[anime_to_real_input], | |
| outputs=[ | |
| anime_to_real_result, anime_to_real_status, anime_to_real_info, | |
| anime_to_real_btn, anime_to_real_cancel_btn | |
| ], | |
| show_progress=True | |
| ) | |
| anime_to_real_cancel_btn.click( | |
| fn=cancel_current_task, | |
| outputs=[anime_to_real_btn, anime_to_real_cancel_btn, anime_to_real_status], | |
| queue=False | |
| ) | |
| # Real to anime events | |
| real_to_anime_btn_sidebar.click( | |
| fn=lambda: show_interface(13), # real_to_anime_interface is at index 13 | |
| outputs=all_interface_outputs | |
| ) | |
| real_to_anime_btn.click( | |
| fn=generate_real_to_anime, | |
| inputs=[real_to_anime_input], | |
| outputs=[ | |
| real_to_anime_result, real_to_anime_status, real_to_anime_info, | |
| real_to_anime_btn, real_to_anime_cancel_btn | |
| ], | |
| show_progress=True | |
| ) | |
| real_to_anime_cancel_btn.click( | |
| fn=cancel_current_task, | |
| outputs=[real_to_anime_btn, real_to_anime_cancel_btn, real_to_anime_status], | |
| queue=False | |
| ) | |
| # pylint: enable=no-member | |
| return interface, { | |
| # Sidebar buttons | |
| 'text_to_image_btn': text_to_image_btn, | |
| 'image_convert_btn': image_convert_btn, | |
| 'five_view_btn_sidebar': five_view_btn_sidebar, | |
| 'figure_3d_btn_sidebar': figure_3d_btn_sidebar, | |
| 'character_figure_btn_sidebar': character_figure_btn_sidebar, | |
| 'photo_style_btn_sidebar': photo_style_btn_sidebar, | |
| 'interior_btn_sidebar': interior_btn_sidebar, | |
| 'watermark_btn_sidebar': watermark_btn_sidebar, | |
| 'line_art_btn_sidebar': line_art_btn_sidebar, | |
| 'expand_btn_sidebar': expand_btn_sidebar, | |
| 'anime_to_real_btn_sidebar': anime_to_real_btn_sidebar, | |
| 'real_to_anime_btn_sidebar': real_to_anime_btn_sidebar, | |
| # Main content areas | |
| 'welcome_content': welcome_content, | |
| 'dynamic_content': dynamic_content, | |
| # All interface components | |
| 'text_to_image_interface': text_to_image_interface, | |
| 'image_convert_interface': image_convert_interface, | |
| 'five_view_interface': five_view_interface, | |
| 'figure_3d_interface': figure_3d_interface, | |
| 'character_figure_collaboration_interface': character_figure_collaboration_interface, | |
| 'photo_style_interface': photo_style_interface, | |
| 'interior_interface': interior_interface, | |
| 'watermark_interface': watermark_interface, | |
| 'line_art_interface': line_art_interface, | |
| 'expand_interface': expand_interface, | |
| 'anime_to_real_interface': anime_to_real_interface, | |
| 'real_to_anime_interface': real_to_anime_interface, | |
| # Text-to-image components | |
| 'prompt_input': prompt_input, | |
| 'resolution_input': resolution_input, | |
| 'generate_btn': generate_btn, | |
| 'cancel_btn': cancel_btn, | |
| 'result_image': result_image, | |
| 'status_info': status_info, | |
| 'image_info': image_info, | |
| # Image convert components | |
| 'convert_input': convert_input, | |
| 'convert_btn': convert_btn, | |
| 'convert_cancel_btn': convert_cancel_btn, | |
| 'convert_result': convert_result, | |
| 'convert_status': convert_status, | |
| 'convert_info': convert_info, | |
| # Five view components | |
| 'five_view_input': five_view_input, | |
| 'five_view_btn': five_view_btn, | |
| 'five_view_cancel_btn': five_view_cancel_btn, | |
| 'five_view_result': five_view_result, | |
| 'five_view_status': five_view_status, | |
| 'five_view_info': five_view_info, | |
| # Photo style components | |
| 'photo_style_input': photo_style_input, | |
| 'photo_style_dropdown': photo_style_dropdown, | |
| 'photo_style_btn': photo_style_btn, | |
| 'photo_style_cancel_btn': photo_style_cancel_btn, | |
| 'photo_style_result': photo_style_result, | |
| 'photo_style_status': photo_style_status, | |
| 'photo_style_info': photo_style_info, | |
| # Interior design components | |
| 'interior_input': interior_input, | |
| 'interior_style': interior_style, | |
| 'interior_btn': interior_btn, | |
| 'interior_cancel_btn': interior_cancel_btn, | |
| 'interior_result': interior_result, | |
| 'interior_status': interior_status, | |
| 'interior_info': interior_info, | |
| # Watermark removal components | |
| 'watermark_input': watermark_input, | |
| 'watermark_btn': watermark_btn, | |
| 'watermark_cancel_btn': watermark_cancel_btn, | |
| 'watermark_result': watermark_result, | |
| 'watermark_status': watermark_status, | |
| 'watermark_info': watermark_info, | |
| # Line art conversion components | |
| 'line_art_input': line_art_input, | |
| 'line_art_btn': line_art_btn, | |
| 'line_art_cancel_btn': line_art_cancel_btn, | |
| 'line_art_result': line_art_result, | |
| 'line_art_status': line_art_status, | |
| 'line_art_info': line_art_info, | |
| # Image expansion components | |
| 'expand_input': expand_input, | |
| 'expand_height': expand_height, | |
| 'expand_width': expand_width, | |
| 'expand_btn': expand_btn, | |
| 'expand_cancel_btn': expand_cancel_btn, | |
| 'expand_result': expand_result, | |
| 'expand_status': expand_status, | |
| 'expand_info': expand_info, | |
| # Anime to real components | |
| 'anime_to_real_input': anime_to_real_input, | |
| 'anime_to_real_btn': anime_to_real_btn, | |
| 'anime_to_real_cancel_btn': anime_to_real_cancel_btn, | |
| 'anime_to_real_result': anime_to_real_result, | |
| 'anime_to_real_status': anime_to_real_status, | |
| 'anime_to_real_info': anime_to_real_info, | |
| # Real to anime components | |
| 'real_to_anime_input': real_to_anime_input, | |
| 'real_to_anime_btn': real_to_anime_btn, | |
| 'real_to_anime_cancel_btn': real_to_anime_cancel_btn, | |
| 'real_to_anime_result': real_to_anime_result, | |
| 'real_to_anime_status': real_to_anime_status, | |
| 'real_to_anime_info': real_to_anime_info | |
| } | |
| # Create the main interface | |
| demo, components = create_main_interface() | |
| # ========================================================================= | |
| # Security Configuration | |
| # ========================================================================= | |
| try: | |
| from fastapi import Request | |
| from starlette.responses import PlainTextResponse | |
| app = demo.app # Gradio's underlying FastAPI app | |
| async def block_gradio_settings(request: Request, call_next): | |
| path = request.url.path.lower() | |
| # Block known and future settings-related paths aggressively | |
| blocked_keywords = ( | |
| "/settings", | |
| "/studio", | |
| "/screen", | |
| "/record", | |
| ) | |
| if any(k in path for k in blocked_keywords): | |
| return PlainTextResponse("404 Not Found", status_code=404) | |
| return await call_next(request) | |
| print("๐ Server-level interceptor enabled: /settings and related internal pages will return 404") | |
| except Exception as _e: | |
| # If anything goes wrong, do not block app startup; log only | |
| print(f"โ ๏ธ Failed to install Settings route interceptor: {_e}") | |
| # ============================================================================ | |
| # Launch Configuration | |
| # ============================================================================ | |
| if __name__ == "__main__": | |
| # Setup server configuration | |
| SERVER_NAME_CONFIG = CONFIG.SERVER_HOST | |
| SERVER_PORT_CONFIG = CONFIG.SERVER_PORT | |
| SHARE_CONFIG = CONFIG.ENABLE_SHARE | |
| # Print startup information | |
| print(f"๐ Starting {CONFIG.APP_TITLE}") | |
| print(f"๐ Access URL: http://{SERVER_NAME_CONFIG}:{SERVER_PORT_CONFIG}") | |
| if SHARE_CONFIG: | |
| print("๐ Public share: enabled") | |
| else: | |
| print("๐ก Tip: Local access only (more secure)") | |
| # Launch the application | |
| print("๐ Launching AI Image Generator...") | |
| print(f"๐ Server: {SERVER_NAME_CONFIG}:{SERVER_PORT_CONFIG}") | |
| print("๐จ Forced light theme: enabled") | |
| print("๐ Debug mode: disabled") | |
| try: | |
| demo.launch( | |
| server_name=SERVER_NAME_CONFIG, | |
| server_port=SERVER_PORT_CONFIG, | |
| share=SHARE_CONFIG, | |
| debug=False, # Force disable debug mode | |
| show_error=False, # Hide error details for production | |
| quiet=False # Show basic launch info | |
| ) | |
| except Exception as e: | |
| print(f"โ Launch failed: {e}") | |
| import traceback | |
| traceback.print_exc() | |