import gradio as gr import numpy as np import random import os from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler import torch from PIL import Image import time import psutil # Ustawienia środowiska dla lepszej wydajności na CPU device = "cuda" if torch.cuda.is_available() else "cpu" torch.set_grad_enabled(False) # Wyłącz gradienty dla inferencji # Optymalizacje dla CPU if device == "cpu": os.environ["OMP_NUM_THREADS"] = str(os.cpu_count()) torch.set_num_threads(os.cpu_count()) print(f"Using {os.cpu_count()} CPU threads") model_repo_id = "dhead/wai-nsfw-illustrious-sdxl-v140-sdxl" # Optymalizacje typu danych try: if torch.cuda.is_available(): torch_dtype = torch.float16 pipe = DiffusionPipeline.from_pretrained( model_repo_id, torch_dtype=torch_dtype, use_safetensors=True, variant="fp16" if any(f for f in ["fp16", "fp16-safetensors"] if f in model_repo_id) else None ) else: torch_dtype = torch.float32 pipe = DiffusionPipeline.from_pretrained( model_repo_id, torch_dtype=torch_dtype, use_safetensors=True ) except Exception as e: print(f"Error loading model: {e}") # Fallback to basic loading pipe = DiffusionPipeline.from_pretrained(model_repo_id) torch_dtype = torch.float32 # Optymalizacje potoku try: pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) except: print("Using default scheduler") pipe = pipe.to(device) # Optymalizacje tylko dla CPU if device == "cpu": try: pipe.enable_attention_slicing() print("Attention slicing enabled") except Exception as e: print(f"Could not enable attention slicing: {e}") MAX_SEED = np.iinfo(np.int32).max MAX_IMAGE_SIZE = 1024 DEFAULT_IMAGE_SIZE = 512 # Zmniejszony domyślny rozmiar dla CPU def get_memory_info(): """Pobierz informacje o użyciu pamięci""" memory = psutil.virtual_memory() return { 'total': memory.total / (1024**3), 'available': memory.available / (1024**3), 'used': memory.used / (1024**3), 'percent': memory.percent } def optimize_for_prompt_and_memory(prompt, width, height): """Automatyczna optymalizacja parametrów na podstawie promptu i dostępnej pamięci""" prompt_lower = prompt.lower() memory_info = get_memory_info() # Bazowa liczba kroków na podstawie złożoności promptu complex_keywords = ['detailed', 'intricate', 'complex', '8k', 'ultra detailed', 'high detail'] simple_keywords = ['simple', 'minimal', 'basic', 'sketch'] base_steps = 20 if any(keyword in prompt_lower for keyword in complex_keywords): base_steps = min(25, base_steps + 5) elif any(keyword in prompt_lower for keyword in simple_keywords): base_steps = max(15, base_steps - 5) # Dostosuj na podstawie dostępnej pamięci if memory_info['available'] < 4: # Mniej niż 4GB dostępne base_steps = max(15, base_steps - 5) width = min(width, 512) height = min(height, 512) elif memory_info['available'] < 8: # Mniej niż 8GB dostępne base_steps = max(18, base_steps - 2) width = min(width, 768) height = min(height, 768) # Ogranicz całkowitą liczbę pikseli total_pixels = width * height if total_pixels > 1024 * 1024: scale_factor = (1024 * 1024) / total_pixels width = int(width * scale_factor ** 0.5) height = int(height * scale_factor ** 0.5) width = (width // 32) * 32 # Zaokrąglij do wielokrotności 32 height = (height // 32) * 32 return base_steps, width, height def infer( prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, enable_optimizations=True, progress=gr.Progress(track_tqdm=True), ): if not prompt.strip(): return None, 0, "Please enter a prompt" start_time = time.time() memory_before = get_memory_info() if randomize_seed: seed = random.randint(0, MAX_SEED) generator = torch.Generator().manual_seed(seed) # Automatyczne optymalizacje original_steps = num_inference_steps original_width = width original_height = height if enable_optimizations: num_inference_steps, width, height = optimize_for_prompt_and_memory(prompt, width, height) try: # Sprawdź dostępną pamięć przed generowaniem memory_info = get_memory_info() if memory_info['available'] < 2: # Mniej niż 2GB dostępne return None, seed, "Error: Not enough memory available. Please try with lower resolution or fewer steps." image = pipe( prompt=prompt, negative_prompt=negative_prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, width=width, height=height, generator=generator, ).images[0] generation_time = time.time() - start_time memory_after = get_memory_info() info_text = f"✅ Generation time: {generation_time:.1f}s | " info_text += f"Steps: {num_inference_steps} | " info_text += f"Size: {width}x{height} | " info_text += f"Memory: {memory_after['used']:.1f}GB used" if enable_optimizations and (original_steps != num_inference_steps or original_width != width or original_height != height): info_text += f" | ⚡ Auto-optimized" return image, seed, info_text except torch.cuda.OutOfMemoryError: return None, seed, "❌ CUDA Out of Memory Error. Please reduce image size or steps." except RuntimeError as e: if "out of memory" in str(e).lower(): return None, seed, "❌ System Out of Memory Error. Please reduce image size or steps." else: return None, seed, f"❌ Runtime Error: {str(e)}" except Exception as e: return None, seed, f"❌ Error: {str(e)}" def save_image(image, prompt, seed): """Zapisz wygenerowany obraz""" if image is None: return "No image to save" try: timestamp = int(time.time()) filename = f"generated_{timestamp}_{seed}.png" # Tworzenie folderu jeśli nie istnieje os.makedirs("generated_images", exist_ok=True) filepath = os.path.join("generated_images", filename) image.save(filepath) # Zapisz metadane metadata_file = f"generated_images/metadata_{timestamp}.txt" with open(metadata_file, "w") as f: f.write(f"Prompt: {prompt}\n") f.write(f"Seed: {seed}\n") f.write(f"Timestamp: {timestamp}\n") f.write(f"Model: {model_repo_id}\n") return f"✅ Image saved as {filename}" except Exception as e: return f"❌ Error saving image: {str(e)}" def clear_all(): """Wyczyść wszystkie wyniki""" return None, 0, "Ready for new generation" # Przykłady examples = [ "A beautiful sunset over mountains, digital art", "A cute cat wearing a wizard hat, fantasy art", "Futuristic city with flying cars, cyberpunk style", "Peaceful forest with glowing mushrooms, magical", "A bowl of fruit on a table, still life painting", ] css = """ #col-container { margin: 0 auto; max-width: 800px; } .gallery-container { display: grid; grid-template-columns: repeat(auto-fill, minmax(200px, 1fr)); gap: 10px; margin-top: 20px; } .performance-info { background: #f0f0f0; padding: 10px; border-radius: 5px; margin: 10px 0; font-family: monospace; } .memory-warning { background: #fff3cd; border: 1px solid #ffeaa7; padding: 10px; border-radius: 5px; margin: 10px 0; } """ with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo: with gr.Column(elem_id="col-container"): gr.Markdown(""" # 🎨 Advanced Text-to-Image Generator *Optimized for CPU performance - 18GB RAM* """) # Wyświetl informacje o systemie memory_info = get_memory_info() gr.Markdown(f"""