dsfsadfsdfdsf commited on
Commit
51fc11d
·
verified ·
1 Parent(s): 5a53caa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +475 -252
app.py CHANGED
@@ -1,103 +1,220 @@
1
- import os
2
- import time
3
  import gradio as gr
4
  import torch
5
  from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler
 
6
  from PIL import Image
 
7
  import json
8
- from datetime import datetime
9
 
10
- # 📦 ДВЕ МОДЕЛИ ДЛЯ ВЫБОРА
11
- MODELS = {
12
- "dreamlike": {
13
- "name": "dreamlike-art/dreamlike-photoreal-2.0",
14
- "display": "✨ Dreamlike Photoreal 2.0",
15
- "description": "Художественный фотореализм, яркие цвета",
16
- "default_steps": 25,
17
- "max_size": 768
18
- },
19
- "sd15": {
20
- "name": "runwayml/stable-diffusion-v1-5",
21
- "display": "🎨 Stable Diffusion 1.5",
22
- "description": "Классическая модель, стабильная детализация",
23
- "default_steps": 25,
24
- "max_size": 512
25
- }
26
- }
27
 
28
- class SimpleGenerator:
 
 
 
29
  def __init__(self):
30
- self.device = "cpu"
31
- self.current_model = None
32
- self.pipe = None
33
- self.history = []
34
 
35
- print("🤖 Инициализация генератора...")
36
- print(f"💻 Устройство: {self.device}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
- def load_model(self, model_key):
39
- """Загружает выбранную модель"""
40
- if model_key not in MODELS:
41
- return False, f"❌ Модель {model_key} не найдена"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
- model_info = MODELS[model_key]
 
44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  try:
46
- print(f"🚀 Загружаем {model_info['display']}...")
47
- start_time = time.time()
 
 
48
 
49
- # Выгружаем предыдущую модель для экономии памяти
50
  if self.pipe is not None:
51
  del self.pipe
52
  torch.cuda.empty_cache() if torch.cuda.is_available() else None
53
 
 
 
54
  # Загружаем новую модель
55
  self.pipe = StableDiffusionPipeline.from_pretrained(
56
  model_info["name"],
57
- torch_dtype=torch.float32,
58
  safety_checker=None,
59
  requires_safety_checker=False
60
  )
61
 
62
- self.pipe = self.pipe.to(self.device)
63
  self.pipe.scheduler = EulerDiscreteScheduler.from_config(
64
  self.pipe.scheduler.config
65
  )
66
- self.pipe.enable_attention_slicing()
67
 
68
- load_time = time.time() - start_time
69
- self.current_model = model_key
 
70
 
71
- print(f"✅ {model_info['display']} загружена за {load_time:.1f} сек")
72
- return True, f"✅ {model_info['display']} загружена!\n📝 {model_info['description']}"
 
73
 
74
  except Exception as e:
75
- print(f" Ошибка загрузки: {e}")
76
- return False, f" Ошибка загрузки: {str(e)[:100]}"
77
 
78
- def generate(self, prompt, negative_prompt="", steps=25, seed=-1, size=512):
79
- """Генерация изображения"""
80
  if self.pipe is None:
81
- return None, " Модель не загружена. Выберите модель сначала."
82
-
83
- model_info = MODELS[self.current_model]
84
-
85
- # Ограничиваем размер по модели
86
- if size > model_info["max_size"]:
87
- size = model_info["max_size"]
88
-
89
- # Seed
90
- if seed == -1:
91
- seed = int(time.time()) % 1000000
92
- generator = torch.Generator(device=self.device).manual_seed(seed)
93
-
94
- print(f"\n🎨 ГЕНЕРАЦИЯ [{model_info['display']}]")
95
- print(f" 📝 Промпт: {prompt[:60]}...")
96
- print(f" 🔢 Шаги: {steps}, Размер: {size}x{size}, Seed: {seed}")
97
-
98
- start_time = time.time()
99
 
100
  try:
 
 
 
 
 
 
 
 
101
  image = self.pipe(
102
  prompt=prompt,
103
  negative_prompt=negative_prompt,
@@ -108,225 +225,331 @@ class SimpleGenerator:
108
  width=size
109
  ).images[0]
110
 
111
- gen_time = time.time() - start_time
112
-
113
- # Сохраняем в историю
114
- history_entry = {
115
- "timestamp": datetime.now().isoformat(),
116
- "model": model_info["display"],
117
- "prompt": prompt,
118
- "time": gen_time,
119
- "steps": steps,
120
- "size": size,
121
- "seed": seed
122
- }
123
- self.history.append(history_entry)
124
-
125
- # Информация
126
  info = f"""
127
- **Готово!**
128
- 🎨 **Модель:** {model_info['display']}
129
- 🕐 **Время:** {gen_time:.1f} секунд
130
- 🔢 **Шаги:** {steps}
131
- 📏 **Размер:** {size}x{size}
132
  🎲 **Seed:** {seed}
133
 
134
- 💡 **Совет:** Используйте этот seed ({seed}) для повторения
135
  """
136
 
137
  return image, info
138
 
139
  except Exception as e:
140
- print(f"💥 Ошибка генерации: {e}")
141
- return None, f" Ошибка генерации: {str(e)[:200]}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
 
143
- def get_history_html(self):
144
- """Получает историю в HTML формате"""
145
- if not self.history:
146
- return "<p style='color: #666; text-align: center;'>История пуста</p>"
147
-
148
- html = "<div style='max-height: 300px; overflow-y: auto; padding: 10px;'>"
149
-
150
- # Группируем по мо��елям для статистики
151
- model_stats = {}
152
- for entry in self.history:
153
- model = entry["model"]
154
- if model not in model_stats:
155
- model_stats[model] = {"count": 0, "total_time": 0}
156
- model_stats[model]["count"] += 1
157
- model_stats[model]["total_time"] += entry["time"]
158
-
159
- # Статистика
160
- html += "<div style='background: #f8f9fa; padding: 10px; border-radius: 8px; margin-bottom: 15px;'>"
161
- html += "<strong>📊 Статистика:</strong><br>"
162
- for model, stats in model_stats.items():
163
- avg_time = stats["total_time"] / stats["count"]
164
- html += f"{model}: {stats['count']} раз, среднее время: {avg_time:.1f} сек<br>"
165
- html += "</div>"
166
-
167
- # Последние генерации
168
- for entry in reversed(self.history[-5:]):
169
- dt = datetime.fromisoformat(entry["timestamp"]).strftime("%H:%M")
170
-
171
- html += f"""
172
- <div style='
173
- border: 1px solid #ddd;
174
- border-radius: 8px;
175
- padding: 12px;
176
- margin-bottom: 10px;
177
- background: white;
178
- '>
179
- <div style='display: flex; justify-content: space-between;'>
180
- <span style='font-weight: 500;'>{entry['model']}</span>
181
- <span style='color: #888; font-size: 12px;'>{dt}</span>
182
- </div>
183
- <div style='margin: 5px 0; color: #333;'>{entry['prompt'][:50]}...</div>
184
- <div style='font-size: 12px; color: #666;'>
185
- ⏱️ {entry['time']:.1f} сек • 🔢 {entry['steps']} шагов • 📏 {entry['size']}px
186
- </div>
187
- </div>
188
- """
189
-
190
- html += "</div>"
191
- return html
192
 
193
- # Инициализация
194
- print("=" * 60)
195
- print("🎯 ГЕНЕРАТОР С ВЫБОРОМ МОДЕЛИ")
196
- print("=" * 60)
197
- generator = SimpleGenerator()
 
 
 
 
 
 
 
 
 
 
 
 
 
198
 
199
- # Создаем интерфейс
200
- with gr.Blocks() as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  gr.Markdown("""
202
- # 🎨 Генератор с выбором модели
203
- ### Сравниваем Dreamlike Photoreal 2.0 и Stable Diffusion 1.5
 
 
 
204
 
205
- <div style='background: #f0f7ff; padding: 20px; border-radius: 10px; border-left: 5px solid #4a6fa5;'>
206
- <strong>✨ Как использовать:</strong>
207
- <ol>
208
- <li>Выберите модель из списка</li>
209
- <li>Нажмите "Загрузить модель"</li>
210
- <li>Введите промпт и настройте параметры</li>
211
- <li>Нажмите "Сгенерировать"</li>
212
- </ol>
213
- </div>
214
  """)
215
 
216
- with gr.Row():
217
- with gr.Column(scale=1):
218
- # Выбор модели
219
- gr.Markdown("### 1. 🎯 Выберите модель")
220
- model_selector = gr.Radio(
221
- choices=[
222
- (" Dreamlike Photoreal 2.0 (художественный)", "dreamlike"),
223
- ("🎨 Stable Diffusion 1.5 (классический)", "sd15")
224
- ],
225
- value="dreamlike",
226
- label="Модель",
227
- interactive=True
228
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
229
 
230
- load_model_btn = gr.Button("📥 Загрузить модель", variant="secondary")
231
- model_status = gr.Markdown("Выберите модель и загрузите её")
 
 
 
 
232
 
233
- gr.Markdown("---")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234
 
235
- # Промпты
236
- gr.Markdown("### 2. 📝 Описание")
237
- prompt = gr.Textbox(
238
- label="Что создать?",
239
- value="close-up portrait of a woman with beautiful detailed eyes, photorealistic",
240
- lines=3
241
  )
242
 
243
- negative = gr.Textbox(
244
- label="Чего избегать",
245
- value="blurry, deformed, ugly, cartoon",
246
- lines=2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
247
  )
248
 
249
- # Настройки
250
- with gr.Accordion("⚙️ Настройки генерации", open=False):
251
- steps = gr.Slider(15, 35, 25, step=1, label="Количество шагов")
252
- seed = gr.Number(-1, label="Seed (-1 = случайный)")
253
- size = gr.Slider(384, 768, 512, step=64, label="Размер изображения")
 
 
 
 
 
254
 
255
- generate_btn = gr.Button("✨ Сгенерировать изображение", variant="primary", size="lg")
 
 
 
 
 
 
 
 
256
 
257
- # Быстрые промпты
258
- gr.Markdown("---")
259
- gr.Markdown("### 🚀 Быстрые тесты:")
 
 
260
 
261
- with gr.Row():
262
- test_eyes = gr.Button("👁️ Детальные глаза", size="sm")
263
- test_portrait = gr.Button("👩 Художественный портрет", size="sm")
264
-
265
- with gr.Column(scale=1):
266
- # Результат
267
- gr.Markdown("### 3. 🎨 Результат")
268
- output_image = gr.Image(
269
- label="Сгенерированное изображение",
270
- height=500
271
- )
272
 
273
- output_info = gr.Markdown(
274
- label="Информация о генерации",
275
- value="Здесь появится информация..."
276
- )
277
 
278
- # История
279
- with gr.Accordion("📖 История генераций", open=True):
280
- history_display = gr.HTML(
281
- value="<p style='color: #666; text-align: center;'>История пуста</p>"
282
- )
283
 
284
- # Обработчики событий
285
- def load_model_handler(model_key):
286
- success, message = generator.load_model(model_key)
287
- return message
288
-
289
- def generate_handler(prompt, negative, steps, seed, size):
290
- return generator.generate(prompt, negative, steps, seed, size)
291
-
292
- def update_history():
293
- return generator.get_history_html()
294
-
295
- # Привязка кнопок
296
- load_model_btn.click(
297
- fn=load_model_handler,
298
- inputs=[model_selector],
299
- outputs=[model_status]
300
- )
301
-
302
- generate_btn.click(
303
- fn=generate_handler,
304
- inputs=[prompt, negative, steps, seed, size],
305
- outputs=[output_image, output_info]
306
- ).then(
307
- fn=update_history,
308
- inputs=None,
309
- outputs=[history_display]
310
- )
311
-
312
- # Быстрые промпты
313
- test_eyes.click(
314
- fn=lambda: ("macro photography of stunning blue eyes with water droplets, detailed iris, eyelashes, extreme close-up, photorealistic, 8k resolution", "blurry, makeup, photoshop"),
315
- inputs=None,
316
- outputs=[prompt, negative]
317
- )
318
-
319
- test_portrait.click(
320
- fn=lambda: ("beautiful woman in traditional dress, intricate jewelry, soft natural lighting, detailed fabric texture, portrait photography, professional", "cartoon, 3d, cgi, blurry"),
321
- inputs=None,
322
- outputs=[prompt, negative]
323
- )
324
 
325
- # Запуск
326
  if __name__ == "__main__":
327
  demo.launch(
328
  server_name="0.0.0.0",
329
  server_port=7860,
330
- share=False,
331
- show_error=True
332
  )
 
 
 
1
  import gradio as gr
2
  import torch
3
  from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer
5
  from PIL import Image
6
+ import time
7
  import json
 
8
 
9
+ # ==================== НАСТРОЙКИ ====================
10
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
11
+ logger.info(f"Using device: {DEVICE}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
+ # ==================== ЗАГРУЗКА ДВУХ AI МОДЕЛЕЙ ====================
14
+
15
+ # 1. AI ДЛЯ ОБРАБОТКИ ПРОМПТОВ (Llama или небольшая модель)
16
+ class PromptAI:
17
  def __init__(self):
18
+ self.model = None
19
+ self.tokenizer = None
20
+ self.loaded = False
 
21
 
22
+ def load(self):
23
+ """Загружаем модель для обработки текста"""
24
+ try:
25
+ # Используем небольшую модель для промптов
26
+ model_name = "microsoft/DialoGPT-small"
27
+ logger.info(f"Loading prompt AI: {model_name}")
28
+
29
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name)
30
+ self.model = AutoModelForCausalLM.from_pretrained(model_name)
31
+
32
+ if DEVICE == "cuda":
33
+ self.model = self.model.half().to(DEVICE)
34
+ else:
35
+ self.model = self.model.to(DEVICE)
36
+
37
+ self.loaded = True
38
+ logger.info("✅ Prompt AI loaded")
39
+ return True
40
+ except Exception as e:
41
+ logger.error(f"Error loading prompt AI: {e}")
42
+ return False
43
+
44
+ def enhance_prompt(self, russian_prompt: str) -> dict:
45
+ """Улучшает русский промпт"""
46
+ if not self.loaded:
47
+ return self._enhance_fallback(russian_prompt)
48
+
49
+ try:
50
+ # Простая логика улучшения
51
+ enhanced = self._simple_enhance(russian_prompt)
52
+
53
+ return {
54
+ "english_prompt": enhanced["english"],
55
+ "negative_prompt": enhanced["negative"],
56
+ "style": enhanced["style"],
57
+ "steps": enhanced["steps"],
58
+ "size": enhanced["size"],
59
+ "enhancements": enhanced["enhancements"]
60
+ }
61
+ except Exception as e:
62
+ logger.error(f"Prompt enhancement error: {e}")
63
+ return self._enhance_fallback(russian_prompt)
64
 
65
+ def _simple_enhance(self, prompt: str) -> dict:
66
+ """Простое улучшение промпта"""
67
+ # Перевод ключевых слов
68
+ translations = {
69
+ "кот": "cat", "кошка": "cat",
70
+ "собака": "dog",
71
+ "девушка": "woman", "женщина": "woman",
72
+ "парень": "man", "мужчина": "man",
73
+ "красивый": "beautiful", "милый": "cute",
74
+ "детальный": "detailed", "яркий": "bright",
75
+ "лес": "forest", "море": "sea", "горы": "mountains",
76
+ "город": "city", "космос": "space",
77
+ "портрет": "portrait", "пейзаж": "landscape",
78
+ "аниме": "anime", "фэнтези": "fantasy",
79
+ "фотореалистично": "photorealistic"
80
+ }
81
+
82
+ # Переводим
83
+ english = prompt
84
+ for rus, eng in translations.items():
85
+ if rus in prompt.lower():
86
+ english = english.replace(rus, eng)
87
+
88
+ # Определяем стиль
89
+ prompt_lower = prompt.lower()
90
+ if any(word in prompt_lower for word in ["аниме", "anime"]):
91
+ style = "anime"
92
+ enhancements = "anime style, manga, Japanese animation, vibrant colors, detailed"
93
+ negative = "realistic, photorealistic, 3d, blurry, deformed, ugly"
94
+ steps, size = 22, 384
95
+ elif any(word in prompt_lower for word in ["фэнтези", "fantasy", "магия", "дракон"]):
96
+ style = "fantasy"
97
+ enhancements = "fantasy art, magical, epic, concept art, digital painting"
98
+ negative = "realistic, mundane, boring, modern, simple"
99
+ steps, size = 23, 384
100
+ elif any(word in prompt_lower for word in ["портрет", "portrait", "лицо", "человек"]):
101
+ style = "portrait"
102
+ enhancements = "portrait, beautiful face, detailed eyes, perfect skin, studio lighting"
103
+ negative = "ugly, deformed, bad anatomy, extra limbs, cartoon"
104
+ steps, size = 25, 384
105
+ elif any(word in prompt_lower for word in ["пейзаж", "landscape", "природа"]):
106
+ style = "landscape"
107
+ enhancements = "landscape, scenery, nature, detailed environment, beautiful view"
108
+ negative = "closeup, portrait, person, face, indoor"
109
+ steps, size = 20, 384
110
+ else:
111
+ style = "general"
112
+ enhancements = "masterpiece, best quality, detailed, beautiful, high resolution"
113
+ negative = "blurry, deformed, ugly, bad anatomy, cartoon, watermark"
114
+ steps, size = 22, 384
115
 
116
+ # Собираем финальный промпт
117
+ final_english = f"{english}, {enhancements}"
118
 
119
+ # Ограничиваем длину
120
+ if len(final_english.split()) > 40:
121
+ words = final_english.split()[:35]
122
+ final_english = " ".join(words) + ", detailed, high quality"
123
+
124
+ return {
125
+ "english": final_english,
126
+ "negative": negative,
127
+ "style": style,
128
+ "steps": steps,
129
+ "size": size,
130
+ "enhancements": enhancements
131
+ }
132
+
133
+ def _enhance_fallback(self, prompt: str) -> dict:
134
+ """Fallback если AI не работает"""
135
+ return {
136
+ "english_prompt": f"{prompt}, masterpiece, best quality, detailed",
137
+ "negative_prompt": "blurry, deformed, ugly, bad anatomy",
138
+ "style": "general",
139
+ "steps": 25,
140
+ "size": 512,
141
+ "enhancements": "basic enhancements"
142
+ }
143
+
144
+ # 2. AI ДЛЯ ГЕНЕРАЦИИ ИЗОБРАЖЕНИЙ (Stable Diffusion)
145
+ class ImageAI:
146
+ def __init__(self):
147
+ self.pipe = None
148
+ self.current_model = None
149
+ self.models = {
150
+ "dreamlike": {
151
+ "name": "dreamlike-art/dreamlike-photoreal-2.0",
152
+ "display": "✨ Dreamlike Photoreal 2.0",
153
+ "default_steps": 25,
154
+ "max_size": 768
155
+ },
156
+ "sd15": {
157
+ "name": "runwayml/stable-diffusion-v1-5",
158
+ "display": "🎨 Stable Diffusion 1.5",
159
+ "default_steps": 25,
160
+ "max_size": 512
161
+ }
162
+ }
163
+
164
+ def load_model(self, model_key="dreamlike"):
165
+ """Загружает модель Stable Diffusion"""
166
  try:
167
+ if model_key not in self.models:
168
+ return False, "Model not found"
169
+
170
+ model_info = self.models[model_key]
171
 
172
+ # Выгружаем предыдущую модель
173
  if self.pipe is not None:
174
  del self.pipe
175
  torch.cuda.empty_cache() if torch.cuda.is_available() else None
176
 
177
+ logger.info(f"Loading image model: {model_info['display']}")
178
+
179
  # Загружаем новую модель
180
  self.pipe = StableDiffusionPipeline.from_pretrained(
181
  model_info["name"],
182
+ torch_dtype=torch.float16 if DEVICE == "cuda" else torch.float32,
183
  safety_checker=None,
184
  requires_safety_checker=False
185
  )
186
 
187
+ self.pipe = self.pipe.to(DEVICE)
188
  self.pipe.scheduler = EulerDiscreteScheduler.from_config(
189
  self.pipe.scheduler.config
190
  )
 
191
 
192
+ if DEVICE == "cuda":
193
+ self.pipe.enable_attention_slicing()
194
+ self.pipe.enable_xformers_memory_efficient_attention()
195
 
196
+ self.current_model = model_key
197
+ logger.info(f"✅ Image model loaded: {model_info['display']}")
198
+ return True, f"✅ {model_info['display']} loaded"
199
 
200
  except Exception as e:
201
+ logger.error(f"Error loading image model: {e}")
202
+ return False, f"Error: {str(e)}"
203
 
204
+ def generate_image(self, prompt, negative_prompt="", steps=25, seed=-1, size=512):
205
+ """Генерирует изображение"""
206
  if self.pipe is None:
207
+ return None, "Image model not loaded"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
 
209
  try:
210
+ # Seed
211
+ if seed == -1:
212
+ seed = int(time.time()) % 1000000
213
+ generator = torch.Generator(device=DEVICE).manual_seed(seed)
214
+
215
+ logger.info(f"Generating image: {prompt[:50]}...")
216
+
217
+ # Генерация
218
  image = self.pipe(
219
  prompt=prompt,
220
  negative_prompt=negative_prompt,
 
225
  width=size
226
  ).images[0]
227
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228
  info = f"""
229
+ **Generation Complete!**
230
+ 🎨 **Model:** {self.models[self.current_model]['display']}
231
+ 🔢 **Steps:** {steps}
232
+ 📏 **Size:** {size}x{size}
 
233
  🎲 **Seed:** {seed}
234
 
235
+ 💡 **Tip:** Use seed {seed} to reproduce
236
  """
237
 
238
  return image, info
239
 
240
  except Exception as e:
241
+ logger.error(f"Generation error: {e}")
242
+ return None, f"Error: {str(e)}"
243
+
244
+ # ==================== ИНИЦИАЛИЗАЦИЯ AI ====================
245
+ logger.info("=" * 60)
246
+ logger.info("🚀 INITIALIZING DUAL AI SYSTEM")
247
+ logger.info("=" * 60)
248
+
249
+ # Создаем обе AI
250
+ prompt_ai = PromptAI()
251
+ image_ai = ImageAI()
252
+
253
+ # Загружаем модели
254
+ logger.info("Loading AI models...")
255
+
256
+ # Загружаем промпт AI
257
+ prompt_loaded = prompt_ai.load()
258
+
259
+ # Загружаем модель изображений по умолчанию
260
+ image_loaded, image_msg = image_ai.load_model("dreamlike")
261
+
262
+ logger.info(f"Prompt AI loaded: {prompt_loaded}")
263
+ logger.info(f"Image AI loaded: {image_loaded}")
264
+
265
+ # ==================== GRADIO ИНТЕРФЕЙС ====================
266
+ def enhance_prompt_interface(russian_prompt):
267
+ """Интерфейс для улучшения промптов"""
268
+ if not russian_prompt or len(russian_prompt.strip()) < 2:
269
+ return "Please enter a prompt", "", "", 25, 512
270
 
271
+ result = prompt_ai.enhance_prompt(russian_prompt.strip())
272
+
273
+ # Форматируем вывод
274
+ output = f"""
275
+ 🤖 **AI ENHANCED PROMPT**
276
+
277
+ **Original (Russian):** {russian_prompt}
278
+
279
+ **Enhanced (English):** {result['english_prompt']}
280
+
281
+ **Negative Prompt:** {result['negative_prompt']}
282
+
283
+ **Style:** {result['style']}
284
+ **Recommended Steps:** {result['steps']}
285
+ **Recommended Size:** {result['size']}px
286
+
287
+ **Enhancements:** {result['enhancements']}
288
+ """
289
+
290
+ return output, result['english_prompt'], result['negative_prompt'], result['steps'], result['size']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
291
 
292
+ def generate_image_interface(prompt, negative_prompt, steps, seed, size, model_choice):
293
+ """Интерфейс для генерации изображений"""
294
+ # Загружаем выбранную модель если нужно
295
+ if image_ai.current_model != model_choice:
296
+ success, msg = image_ai.load_model(model_choice)
297
+ if not success:
298
+ return None, f"❌ {msg}"
299
+
300
+ # Генерация
301
+ image, info = image_ai.generate_image(
302
+ prompt=prompt,
303
+ negative_prompt=negative_prompt,
304
+ steps=steps,
305
+ seed=seed,
306
+ size=size
307
+ )
308
+
309
+ return image, info
310
 
311
+ def dual_ai_workflow(russian_prompt, model_choice="dreamlike"):
312
+ """Полный workflow: улучшение промпта + генерация"""
313
+ # 1. Улучшаем промпт
314
+ if not russian_prompt:
315
+ return None, "Please enter a prompt", "", "", 25, 512
316
+
317
+ logger.info(f"🤖 Step 1: Enhancing prompt: {russian_prompt[:50]}...")
318
+ enhanced_info, english_prompt, negative_prompt, steps, size = enhance_prompt_interface(russian_prompt)
319
+
320
+ # 2. Генерируем изображение
321
+ logger.info(f"🎨 Step 2: Generating image with enhanced prompt...")
322
+ image, gen_info = generate_image_interface(
323
+ prompt=english_prompt,
324
+ negative_prompt=negative_prompt,
325
+ steps=steps,
326
+ seed=-1,
327
+ size=size,
328
+ model_choice=model_choice
329
+ )
330
+
331
+ # 3. Объединяем информацию
332
+ full_info = f"""
333
+ {enhanced_info}
334
+
335
+ ---
336
+
337
+ {gen_info}
338
+ """
339
+
340
+ return image, full_info, english_prompt, negative_prompt, steps, size
341
+
342
+ # ==================== СОЗДАНИЕ ИНТЕРФЕЙСА ====================
343
+ with gr.Blocks(theme=gr.themes.Soft(), title="🤖 Dual AI System") as demo:
344
  gr.Markdown("""
345
+ # 🤖 DUAL AI SYSTEM
346
+ ## Two AI Models Working Together
347
+
348
+ 1. **🤖 Prompt AI** - Enhances and translates Russian prompts
349
+ 2. **🎨 Image AI** - Generates high-quality images
350
 
351
+ **Simply enter a Russian description and get a perfect image!**
 
 
 
 
 
 
 
 
352
  """)
353
 
354
+ with gr.Tabs():
355
+ with gr.TabItem("🚀 Full AI Workflow"):
356
+ with gr.Row():
357
+ with gr.Column(scale=1):
358
+ gr.Markdown("### 1. Enter Russian Description")
359
+ russian_input = gr.Textbox(
360
+ label="Describe your image in Russian",
361
+ value="красивый кот с зелеными глазами",
362
+ lines=3
363
+ )
364
+
365
+ model_selector = gr.Radio(
366
+ choices=[
367
+ ("✨ Dreamlike Photoreal 2.0", "dreamlike"),
368
+ ("🎨 Stable Diffusion 1.5", "sd15")
369
+ ],
370
+ value="dreamlike",
371
+ label="Select Image Model"
372
+ )
373
+
374
+ generate_btn = gr.Button(
375
+ "🤖 Process with Dual AI",
376
+ variant="primary",
377
+ size="lg"
378
+ )
379
+
380
+ gr.Markdown("### Quick Examples:")
381
+ with gr.Row():
382
+ ex1 = gr.Button("🐱 Cute Cat", size="sm")
383
+ ex2 = gr.Button("👩 Anime Girl", size="sm")
384
+ ex3 = gr.Button("🏞️ Fantasy Landscape", size="sm")
385
+
386
+ with gr.Column(scale=1):
387
+ gr.Markdown("### 2. AI Enhanced Result")
388
+ output_image = gr.Image(label="Generated Image", height=400)
389
+
390
+ output_info = gr.Markdown(
391
+ label="AI Processing Info",
392
+ value="*AI will show processing information here*"
393
+ )
394
+
395
+ with gr.Accordion("📋 Enhanced Parameters", open=False):
396
+ english_display = gr.Textbox(label="English Prompt", interactive=False)
397
+ negative_display = gr.Textbox(label="Negative Prompt", interactive=False)
398
+ steps_display = gr.Number(label="Steps", interactive=False)
399
+ size_display = gr.Number(label="Size", interactive=False)
400
 
401
+ # Связываем кнопки
402
+ generate_btn.click(
403
+ fn=dual_ai_workflow,
404
+ inputs=[russian_input, model_selector],
405
+ outputs=[output_image, output_info, english_display, negative_display, steps_display, size_display]
406
+ )
407
 
408
+ # Примеры
409
+ ex1.click(
410
+ fn=lambda: ("милый котенок с большими глазами, пушистая шерсть",),
411
+ inputs=[],
412
+ outputs=[russian_input]
413
+ )
414
+ ex2.click(
415
+ fn=lambda: ("аниме девушка с розовыми волосами, детальные глаза, фэнтези лес",),
416
+ inputs=[],
417
+ outputs=[russian_input]
418
+ )
419
+ ex3.click(
420
+ fn=lambda: ("фэнтезийный пейзаж с драконом, магия, эпичное небо, закат",),
421
+ inputs=[],
422
+ outputs=[russian_input]
423
+ )
424
+
425
+ with gr.TabItem("🔧 Separate Tools"):
426
+ with gr.Row():
427
+ with gr.Column():
428
+ gr.Markdown("### 🤖 Prompt Enhancer AI")
429
+ prompt_input = gr.Textbox(
430
+ label="Russian Prompt",
431
+ value="красивая девушка с голубыми глазами",
432
+ lines=3
433
+ )
434
+ enhance_btn = gr.Button("Enhance Prompt", variant="secondary")
435
+
436
+ enhanced_output = gr.Markdown(label="Enhanced Prompt")
437
+
438
+ with gr.Row():
439
+ enhanced_english = gr.Textbox(label="English Prompt", interactive=True)
440
+ enhanced_negative = gr.Textbox(label="Negative Prompt", interactive=True)
441
+
442
+ with gr.Row():
443
+ enhanced_steps = gr.Number(label="Recommended Steps", value=25)
444
+ enhanced_size = gr.Number(label="Recommended Size", value=512)
445
+
446
+ with gr.Column():
447
+ gr.Markdown("### 🎨 Image Generator AI")
448
+
449
+ with gr.Row():
450
+ gen_prompt = gr.Textbox(label="English Prompt", value="beautiful woman with blue eyes")
451
+ gen_negative = gr.Textbox(label="Negative Prompt", value="blurry, deformed, ugly")
452
+
453
+ with gr.Row():
454
+ gen_steps = gr.Slider(15, 50, 25, label="Steps")
455
+ gen_size = gr.Slider(256, 768, 512, step=64, label="Size")
456
+ gen_seed = gr.Number(-1, label="Seed (-1 for random)")
457
+
458
+ gen_model = gr.Radio(
459
+ choices=["dreamlike", "sd15"],
460
+ value="dreamlike",
461
+ label="Model"
462
+ )
463
+
464
+ gen_btn = gr.Button("Generate Image", variant="primary")
465
+
466
+ gen_image = gr.Image(label="Generated Image", height=400)
467
+ gen_info = gr.Markdown(label="Generation Info")
468
 
469
+ # Связываем enhancer
470
+ enhance_btn.click(
471
+ fn=enhance_prompt_interface,
472
+ inputs=[prompt_input],
473
+ outputs=[enhanced_output, enhanced_english, enhanced_negative, enhanced_steps, enhanced_size]
 
474
  )
475
 
476
+ # Автоматически заполняем генератор при улучшении
477
+ enhanced_english.change(
478
+ fn=lambda x: x,
479
+ inputs=[enhanced_english],
480
+ outputs=[gen_prompt]
481
+ )
482
+ enhanced_negative.change(
483
+ fn=lambda x: x,
484
+ inputs=[enhanced_negative],
485
+ outputs=[gen_negative]
486
+ )
487
+ enhanced_steps.change(
488
+ fn=lambda x: x,
489
+ inputs=[enhanced_steps],
490
+ outputs=[gen_steps]
491
+ )
492
+ enhanced_size.change(
493
+ fn=lambda x: x,
494
+ inputs=[enhanced_size],
495
+ outputs=[gen_size]
496
  )
497
 
498
+ # Связываем генератор
499
+ gen_btn.click(
500
+ fn=generate_image_interface,
501
+ inputs=[gen_prompt, gen_negative, gen_steps, gen_seed, gen_size, gen_model],
502
+ outputs=[gen_image, gen_info]
503
+ )
504
+
505
+ with gr.TabItem("📊 System Info"):
506
+ gr.Markdown("""
507
+ ## 🤖 DUAL AI SYSTEM INFORMATION
508
 
509
+ ### AI Model 1: Prompt Enhancer
510
+ - **Purpose:** Translates and enhances Russian prompts
511
+ - **Model:** Microsoft DialoGPT-small
512
+ - **Features:**
513
+ * Russian → English translation
514
+ * Prompt quality enhancement
515
+ * Negative prompt generation
516
+ * Style detection
517
+ * Parameter recommendation
518
 
519
+ ### AI Model 2: Image Generator
520
+ - **Purpose:** Generates high-quality images
521
+ - **Models Available:**
522
+ 1. **Dreamlike Photoreal 2.0** - Photorealistic, artistic
523
+ 2. **Stable Diffusion 1.5** - Classic, detailed
524
 
525
+ ### Workflow:
526
+ 1. User inputs Russian description
527
+ 2. Prompt AI enhances and translates
528
+ 3. Image AI generates the image
529
+ 4. User gets perfect result
 
 
 
 
 
 
530
 
531
+ ### System Status:
532
+ - Prompt AI: **Loaded**
533
+ - Image AI: **Loaded**
534
+ - Device: **{device}**
535
 
536
+ ### Requirements:
537
+ - GPU recommended for faster generation
538
+ - 8GB+ VRAM for best performance
539
+ - Hugging Face token for models
540
+ """.format(device=DEVICE.upper()))
541
 
542
+ # Footer
543
+ gr.Markdown("""
544
+ ---
545
+ *Powered by 🤖 Dual AI System | Made with Gradio & Hugging Face*
546
+ """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
547
 
548
+ # ==================== ЗАПУСК ====================
549
  if __name__ == "__main__":
550
  demo.launch(
551
  server_name="0.0.0.0",
552
  server_port=7860,
553
+ share=True,
554
+ debug=True
555
  )