Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import requests | |
| from PIL import Image | |
| from io import BytesIO | |
| import os | |
| import time | |
| import json | |
| from datetime import datetime | |
| import logging | |
| import socket | |
| import sys | |
| from pathlib import Path | |
| # ========== Настройка логирования ========== | |
| class CustomFormatter(logging.Formatter): | |
| """Кастомный форматтер для цветного вывода в консоль""" | |
| grey = "\x1b[38;20m" | |
| yellow = "\x1b[33;20m" | |
| red = "\x1b[31;20m" | |
| bold_red = "\x1b[31;1m" | |
| green = "\x1b[32;20m" | |
| blue = "\x1b[34;20m" | |
| cyan = "\x1b[36;20m" | |
| reset = "\x1b[0m" | |
| format = "%(asctime)s - %(levelname)s - %(message)s" | |
| datefmt = "%Y-%m-%d %H:%M:%S" | |
| FORMATS = { | |
| logging.DEBUG: blue + format + reset, | |
| logging.INFO: green + format + reset, | |
| logging.WARNING: yellow + format + reset, | |
| logging.ERROR: red + format + reset, | |
| logging.CRITICAL: bold_red + format + reset | |
| } | |
| def format(self, record): | |
| log_fmt = self.FORMATS.get(record.levelno) | |
| formatter = logging.Formatter(log_fmt, datefmt=self.datefmt) | |
| return formatter.format(record) | |
| # Создаем логгер | |
| logger = logging.getLogger('FluxGenerator') | |
| logger.setLevel(logging.DEBUG) | |
| # Создаем обработчик для консоли | |
| console_handler = logging.StreamHandler(sys.stdout) | |
| console_handler.setLevel(logging.DEBUG) | |
| console_handler.setFormatter(CustomFormatter()) | |
| # Создаем обработчик для файла | |
| file_handler = logging.FileHandler('flux_generator.log', encoding='utf-8') | |
| file_handler.setLevel(logging.DEBUG) | |
| file_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') | |
| file_handler.setFormatter(file_formatter) | |
| # Добавляем обработчики | |
| logger.addHandler(console_handler) | |
| logger.addHandler(file_handler) | |
| # ========== Функции для работы с сетью ========== | |
| def get_local_ip(): | |
| """Получение локального IP адреса""" | |
| try: | |
| # Создаем сокет для определения IP | |
| s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) | |
| s.connect(("8.8.8.8", 80)) | |
| local_ip = s.getsockname()[0] | |
| s.close() | |
| logger.info(f"🌐 Локальный IP адрес определен: {local_ip}") | |
| return local_ip | |
| except Exception as e: | |
| logger.error(f"❌ Не удалось определить локальный IP: {e}") | |
| return "127.0.0.1" | |
| def get_public_ip(): | |
| """Получение публичного IP адреса""" | |
| try: | |
| response = requests.get('https://api.ipify.org?format=json', timeout=5) | |
| public_ip = response.json()['ip'] | |
| logger.info(f"🌍 Публичный IP адрес: {public_ip}") | |
| return public_ip | |
| except Exception as e: | |
| logger.warning(f"⚠️ Не удалось определить публичный IP: {e}") | |
| return "Не удалось определить" | |
| def check_api_connectivity(api_url, api_token): | |
| """Проверка доступности API""" | |
| logger.info(f"🔍 Проверка подключения к API: {api_url}") | |
| if not api_token: | |
| logger.error("❌ API токен не найден!") | |
| return False | |
| try: | |
| headers = {"Authorization": f"Bearer {api_token}"} | |
| response = requests.get(api_url, headers=headers, timeout=10) | |
| if response.status_code == 200: | |
| logger.info("✅ API доступен и отвечает") | |
| return True | |
| else: | |
| logger.warning(f"⚠️ API ответил с кодом: {response.status_code}") | |
| return False | |
| except requests.exceptions.Timeout: | |
| logger.error("❌ Таймаут при подключении к API") | |
| return False | |
| except requests.exceptions.ConnectionError: | |
| logger.error("❌ Ошибка подключения к API") | |
| return False | |
| except Exception as e: | |
| logger.error(f"❌ Неизвестная ошибка при проверке API: {e}") | |
| return False | |
| # ========== Основной класс генератора ========== | |
| class ImageGenerator: | |
| def __init__(self, api_token, api_url): | |
| self.api_token = api_token | |
| self.api_url = api_url | |
| self.headers = {"Authorization": f"Bearer {api_token}"} | |
| self.generation_history = [] | |
| self.request_count = 0 | |
| self.error_count = 0 | |
| logger.info("🖼️ Инициализация ImageGenerator") | |
| logger.debug(f"API URL: {api_url}") | |
| logger.debug(f"API Token: {'*' * 8}{api_token[-4:] if api_token else 'None'}") | |
| # Проверяем подключение | |
| if check_api_connectivity(api_url, api_token): | |
| logger.info("✅ Готов к работе") | |
| else: | |
| logger.warning("⚠️ Возможны проблемы с подключением к API") | |
| def log_request(self, endpoint, method="POST", status=None, error=None): | |
| """Логирование запросов""" | |
| self.request_count += 1 | |
| request_id = f"REQ_{self.request_count:06d}" | |
| log_data = { | |
| "request_id": request_id, | |
| "timestamp": datetime.now().isoformat(), | |
| "method": method, | |
| "endpoint": endpoint, | |
| "status": status, | |
| "error": error | |
| } | |
| if error: | |
| self.error_count += 1 | |
| logger.error(f"[{request_id}] {method} {endpoint} - Status: {status} - Error: {error}") | |
| else: | |
| logger.info(f"[{request_id}] {method} {endpoint} - Status: {status}") | |
| logger.debug(f"Детали запроса: {json.dumps(log_data, ensure_ascii=False)}") | |
| return request_id | |
| def generate_image(self, prompt, negative_prompt="", num_inference_steps=50, | |
| guidance_scale=7.5, seed=None, progress=gr.Progress()): | |
| """ | |
| Generate an image with advanced parameters | |
| """ | |
| request_id = self.log_request("/generate", "POST") | |
| logger.info(f"[{request_id}] Начало генерации изображения") | |
| logger.info(f"[{request_id}] Prompt: {prompt[:100]}{'...' if len(prompt) > 100 else ''}") | |
| if not self.api_token: | |
| error_msg = "HF_API_TOKEN environment variable not set" | |
| logger.critical(f"[{request_id}] {error_msg}") | |
| return None, f"Error: {error_msg}" | |
| if not prompt or prompt.strip() == "": | |
| error_msg = "Please enter a prompt" | |
| logger.warning(f"[{request_id}] {error_msg}") | |
| return None, f"Error: {error_msg}" | |
| # Prepare the payload | |
| payload = { | |
| "inputs": prompt, | |
| "parameters": { | |
| "negative_prompt": negative_prompt if negative_prompt else None, | |
| "num_inference_steps": num_inference_steps, | |
| "guidance_scale": guidance_scale, | |
| "seed": seed if seed else None | |
| } | |
| } | |
| # Remove None values | |
| payload["parameters"] = {k: v for k, v in payload["parameters"].items() if v is not None} | |
| logger.debug(f"[{request_id}] Payload: {json.dumps(payload, ensure_ascii=False, default=str)}") | |
| try: | |
| progress(0.1, desc="Initializing generation...") | |
| logger.debug(f"[{request_id}] Отправка запроса к API") | |
| # Make API request | |
| start_time = time.time() | |
| response = requests.post(self.api_url, headers=self.headers, json=payload, timeout=60) | |
| elapsed_time = time.time() - start_time | |
| logger.debug(f"[{request_id}] Время ответа API: {elapsed_time:.2f} секунд") | |
| progress(0.5, desc="Processing response...") | |
| if response.status_code == 200: | |
| logger.info(f"[{request_id}] ✅ Успешный ответ от API (200)") | |
| logger.debug(f"[{request_id}] Размер ответа: {len(response.content)} байт") | |
| # Parse the response | |
| image_bytes = BytesIO(response.content) | |
| image = Image.open(image_bytes) | |
| logger.info(f"[{request_id}] Изображение успешно загружено") | |
| logger.debug(f"[{request_id}] Размер изображения: {image.size}, Формат: {image.format}") | |
| # Save to history | |
| timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") | |
| history_entry = { | |
| "request_id": request_id, | |
| "timestamp": timestamp, | |
| "prompt": prompt, | |
| "negative_prompt": negative_prompt, | |
| "seed": seed, | |
| "steps": num_inference_steps, | |
| "guidance": guidance_scale, | |
| "response_time": f"{elapsed_time:.2f}s" | |
| } | |
| self.generation_history.append(history_entry) | |
| logger.info(f"[{request_id}] Генерация завершена за {elapsed_time:.2f}с") | |
| progress(1.0, desc="Complete!") | |
| return image, f"Success! Image generated at {timestamp} (Request: {request_id})" | |
| elif response.status_code == 503: | |
| logger.warning(f"[{request_id}] Модель загружается (503)") | |
| progress(0.3, desc="Model is loading, please wait...") | |
| time.sleep(5) | |
| return self.generate_image(prompt, negative_prompt, num_inference_steps, | |
| guidance_scale, seed, progress) | |
| else: | |
| error_msg = f"Error {response.status_code}: {response.text[:200]}" | |
| logger.error(f"[{request_id}] {error_msg}") | |
| self.log_request("/generate", "POST", response.status_code, error_msg) | |
| return None, error_msg | |
| except requests.exceptions.Timeout: | |
| error_msg = "Request timed out. Please try again." | |
| logger.error(f"[{request_id}] ⏰ {error_msg}") | |
| self.log_request("/generate", "POST", "TIMEOUT", error_msg) | |
| return None, f"Error: {error_msg}" | |
| except requests.exceptions.ConnectionError: | |
| error_msg = "Connection error. Please check your internet connection." | |
| logger.error(f"[{request_id}] 🔌 {error_msg}") | |
| self.log_request("/generate", "POST", "CONNECTION_ERROR", error_msg) | |
| return None, f"Error: {error_msg}" | |
| except Exception as e: | |
| error_msg = str(e) | |
| logger.error(f"[{request_id}] ❌ {error_msg}", exc_info=True) | |
| self.log_request("/generate", "POST", "EXCEPTION", error_msg) | |
| return None, f"Error: {error_msg}" | |
| def get_history(self): | |
| """Return generation history as formatted text""" | |
| if not self.generation_history: | |
| return "No generations yet" | |
| stats = f"📊 **Статистика:** Всего запросов: {self.request_count}, Ошибок: {self.error_count}\n\n" | |
| history_text = stats + "### Generation History\n\n" | |
| for i, item in enumerate(reversed(self.generation_history[-10:]), 1): | |
| history_text += f"{i}. **[{item['request_id']}] {item['timestamp']}**\n" | |
| history_text += f" Prompt: {item['prompt'][:50]}...\n" | |
| if item['negative_prompt']: | |
| history_text += f" Negative: {item['negative_prompt'][:30]}...\n" | |
| if item['seed']: | |
| history_text += f" Seed: {item['seed']}\n" | |
| history_text += f" Response time: {item['response_time']}\n\n" | |
| return history_text | |
| def clear_history(self): | |
| """Clear generation history""" | |
| self.generation_history = [] | |
| self.request_count = 0 | |
| self.error_count = 0 | |
| logger.info("История очищена") | |
| return "History cleared" | |
| # ========== Создание интерфейса ========== | |
| def create_enhanced_ui(): | |
| """Create an enhanced Gradio interface with more features""" | |
| # Определяем IP адреса | |
| local_ip = get_local_ip() | |
| public_ip = get_public_ip() | |
| # Initialize generator | |
| generator = ImageGenerator(API_TOKEN, API_URL) | |
| # Custom CSS for better styling | |
| custom_css = """ | |
| .gradio-container { | |
| max-width: 1200px !important; | |
| margin: auto !important; | |
| } | |
| .generate-btn { | |
| background: linear-gradient(90deg, #6366f1 0%, #8b5cf6 100%) !important; | |
| color: white !important; | |
| border: none !important; | |
| } | |
| .generate-btn:hover { | |
| background: linear-gradient(90deg, #4f46e5 0%, #7c3aed 100%) !important; | |
| } | |
| .history-panel { | |
| background: #f3f4f6; | |
| border-radius: 8px; | |
| padding: 10px; | |
| } | |
| .ip-info { | |
| background: #e8f0fe; | |
| padding: 10px; | |
| border-radius: 5px; | |
| margin-bottom: 10px; | |
| font-family: monospace; | |
| } | |
| """ | |
| with gr.Blocks(theme="hev832/Applio", css=custom_css, title="Flux Uncensored Enhanced") as ui: | |
| # Отображаем информацию о IP | |
| gr.Markdown(f""" | |
| # 🎨 Flux Uncensored Image Generator | |
| <div class="ip-info"> | |
| 🌐 **Сетевые адреса:** | |
| • Локальный IP: `{local_ip}` | |
| • Публичный IP: `{public_ip}` | |
| • Порт: `7860` | |
| • API Endpoint: `{API_URL}` | |
| </div> | |
| ### Доступ к интерфейсу: | |
| • Локально: http://localhost:7860 | |
| • В локальной сети: http://{local_ip}:7860 | |
| """) | |
| with gr.Tabs(): | |
| # Main Generation Tab | |
| with gr.TabItem("Generate"): | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| # Main input | |
| prompt = gr.Textbox( | |
| label="📝 Prompt", | |
| placeholder="Describe the image you want to generate in detail...", | |
| lines=4 | |
| ) | |
| # Advanced options | |
| with gr.Accordion("⚙️ Advanced Options", open=False): | |
| negative_prompt = gr.Textbox( | |
| label="Negative Prompt", | |
| placeholder="What to avoid in the image...", | |
| lines=2 | |
| ) | |
| with gr.Row(): | |
| steps = gr.Slider( | |
| label="Inference Steps", | |
| minimum=20, | |
| maximum=100, | |
| value=50, | |
| step=1 | |
| ) | |
| guidance = gr.Slider( | |
| label="Guidance Scale", | |
| minimum=1.0, | |
| maximum=20.0, | |
| value=7.5, | |
| step=0.5 | |
| ) | |
| seed = gr.Number( | |
| label="Seed (optional)", | |
| value=None, | |
| precision=0 | |
| ) | |
| # Generate button | |
| generate_btn = gr.Button( | |
| "🎨 Generate Image", | |
| variant="primary", | |
| elem_classes="generate-btn" | |
| ) | |
| with gr.Column(scale=1): | |
| # Status and info | |
| status = gr.Textbox( | |
| label="Status", | |
| value="Ready to generate", | |
| interactive=False | |
| ) | |
| # Output | |
| with gr.Row(): | |
| output_image = gr.Image( | |
| label="Generated Image", | |
| type="pil", | |
| height=400 | |
| ) | |
| # History Tab | |
| with gr.TabItem("📜 History"): | |
| with gr.Row(): | |
| history_text = gr.Markdown("No generations yet") | |
| with gr.Row(): | |
| refresh_history_btn = gr.Button("🔄 Refresh History") | |
| clear_history_btn = gr.Button("🗑️ Clear History", variant="stop") | |
| # Info Tab | |
| with gr.TabItem("ℹ️ Info"): | |
| gr.Markdown(f""" | |
| ## About Flux Uncensored | |
| This is an unofficial Gradio interface for the Flux Uncensored model on Hugging Face. | |
| ### System Information: | |
| - **Local IP:** `{local_ip}` | |
| - **Public IP:** `{public_ip}` | |
| - **API Status:** {"✅ Connected" if API_TOKEN else "❌ No Token"} | |
| - **Log File:** `flux_generator.log` | |
| ### Tips for better results: | |
| - Be specific and detailed in your prompts | |
| - Use negative prompts to avoid unwanted elements | |
| - Experiment with different guidance scales (7.5 is a good starting point) | |
| - More inference steps generally produce better quality but take longer | |
| ### Network Access: | |
| Access this interface from other devices on your network using: | |
| ``` | |
| http://{local_ip}:7860 | |
| ``` | |
| ### Note: | |
| Make sure to set your `HF_API_TOKEN` environment variable before running. | |
| """) | |
| # Event handlers | |
| def on_generate(prompt, negative_prompt, steps, guidance, seed): | |
| logger.info(f"🖼️ Новая генерация с параметрами: steps={steps}, guidance={guidance}, seed={seed}") | |
| img, msg = generator.generate_image( | |
| prompt, | |
| negative_prompt, | |
| steps, | |
| guidance, | |
| seed if seed != 0 else None | |
| ) | |
| return img, msg | |
| generate_btn.click( | |
| fn=on_generate, | |
| inputs=[prompt, negative_prompt, steps, guidance, seed], | |
| outputs=[output_image, status] | |
| ) | |
| # History handlers | |
| def update_history(): | |
| return generator.get_history() | |
| refresh_history_btn.click( | |
| fn=update_history, | |
| outputs=[history_text] | |
| ) | |
| clear_history_btn.click( | |
| fn=generator.clear_history, | |
| outputs=[history_text] | |
| ).then( | |
| fn=lambda: "History cleared", | |
| outputs=[history_text] | |
| ) | |
| # Auto-refresh history when generating | |
| generate_btn.click( | |
| fn=update_history, | |
| outputs=[history_text] | |
| ) | |
| # Clear inputs | |
| def clear_inputs(): | |
| logger.debug("Очистка полей ввода") | |
| return "", "", 50, 7.5, None | |
| with gr.Row(): | |
| clear_btn = gr.Button("🗑️ Clear Inputs") | |
| clear_btn.click( | |
| fn=clear_inputs, | |
| outputs=[prompt, negative_prompt, steps, guidance, seed] | |
| ) | |
| return ui | |
| # ========== Запуск приложения ========== | |
| if __name__ == "__main__": | |
| # Показываем баннер при запуске | |
| print("\n" + "="*60) | |
| print("🚀 Flux Uncensored Image Generator") | |
| print("="*60) | |
| print(f"📅 Запуск приложения: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}") | |
| print("="*60 + "\n") | |
| # Load API Token from environment variable | |
| API_TOKEN = os.getenv("HF_API_TOKEN") | |
| API_URL = "https://api-inference.huggingface.co/models/enhanceaiteam/Flux-uncensored" | |
| # Check for API token | |
| if not API_TOKEN: | |
| logger.critical("⚠️ HF_API_TOKEN environment variable is not set!") | |
| logger.critical("Please set it u |