Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -17,10 +17,9 @@ import easyocr
|
|
| 17 |
from web_engine import search_info, get_page_text # Импортируем функции из web_engine.py
|
| 18 |
|
| 19 |
AVAILABLE_MODELS = {
|
| 20 |
-
"GPT-4": "gpt-4",
|
| 21 |
-
"
|
| 22 |
-
|
| 23 |
-
"GPT-4o THINK": "gpt-4o-think"
|
| 24 |
}
|
| 25 |
|
| 26 |
SYSTEM_PROMPT = """
|
|
@@ -33,6 +32,9 @@ SYSTEM_PROMPT = """
|
|
| 33 |
4. Если пользователь просит найти информацию, например, "найди что-то про Адольфа Гитлера":
|
| 34 |
- Ответь с текстом [SEARCH: {prompt}]
|
| 35 |
- Затем используй полученную информацию для ответа на вопрос
|
|
|
|
|
|
|
|
|
|
| 36 |
5. Если пользователь просит нарисовать или сгенерировать изображение:
|
| 37 |
- Начните ответ с [GENERATE_IMAGE]
|
| 38 |
- Напишите детальный промпт на английском языке
|
|
@@ -146,10 +148,10 @@ def generate_image(prompt: str) -> str:
|
|
| 146 |
try:
|
| 147 |
client = Client()
|
| 148 |
response = client.images.generate(
|
| 149 |
-
model="flux-
|
| 150 |
prompt=prompt,
|
| 151 |
response_format="url",
|
| 152 |
-
|
| 153 |
)
|
| 154 |
|
| 155 |
image_url = response.data[0].url
|
|
@@ -176,472 +178,131 @@ def generate_image(prompt: str) -> str:
|
|
| 176 |
|
| 177 |
except Exception as e:
|
| 178 |
return f"Ошибка при генерации изобраения: {str(e)}"
|
| 179 |
-
|
| 180 |
def process_image(image_input) -> str:
|
| 181 |
-
"""Обработка изображения
|
| 182 |
try:
|
| 183 |
-
#
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
#
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
elif isinstance(image_input, Image.Image):
|
| 194 |
image = image_input
|
| 195 |
else:
|
| 196 |
-
return "
|
| 197 |
-
|
| 198 |
-
# Конвертируем изображение в RGB если оно в RGBA
|
| 199 |
-
if image.mode =
|
| 200 |
image = image.convert('RGB')
|
| 201 |
-
|
| 202 |
-
# Сохраняем изображение во временный файл
|
| 203 |
-
with tempfile.NamedTemporaryFile(suffix=
|
| 204 |
-
image.save(tmp.name, format=
|
| 205 |
-
#
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
text_results = reader.readtext(tmp.name)
|
| 210 |
-
|
| 211 |
-
# Собираем найденные объекты
|
| 212 |
-
detected_objects = []
|
| 213 |
-
for r in results:
|
| 214 |
-
for box in r.boxes:
|
| 215 |
-
class_id = int(box.cls)
|
| 216 |
-
class_name = model.names[class_id]
|
| 217 |
-
detected_objects.append(class_name)
|
| 218 |
-
|
| 219 |
-
# Собираем найденный текст
|
| 220 |
-
detected_text = []
|
| 221 |
-
for detection in text_results:
|
| 222 |
-
text = detection[1]
|
| 223 |
-
if text.strip(): # Проверяем, что текст не пу��той
|
| 224 |
-
detected_text.append(text)
|
| 225 |
-
|
| 226 |
-
if not detected_objects and not detected_text:
|
| 227 |
-
return """
|
| 228 |
-
<div class="image-analysis-animation">
|
| 229 |
-
<div class="analysis-text">К сожалению, я не смог распознать объекты на этом изобраении</div>
|
| 230 |
-
</div>
|
| 231 |
-
"""
|
| 232 |
-
|
| 233 |
-
# Формируем сообщение для GPT
|
| 234 |
-
prompt = "Я на этом изображении вижу "
|
| 235 |
-
if detected_objects:
|
| 236 |
-
objects_str = ", ".join(detected_objects)
|
| 237 |
-
prompt += f"следующие объекты: {objects_str}. "
|
| 238 |
-
|
| 239 |
-
if detected_text:
|
| 240 |
-
text_str = ", ".join(detected_text)
|
| 241 |
-
prompt += f"Также на изображении есть текст: {text_str}"
|
| 242 |
-
|
| 243 |
-
messages = [
|
| 244 |
-
{
|
| 245 |
-
"role": "system",
|
| 246 |
-
"content": """Ты - система компьютерного зрения.
|
| 247 |
-
Тебе нужно:
|
| 248 |
-
1. Перевести названия объектов на русский язык
|
| 249 |
-
2. Описать что ты видишь простыми словами
|
| 250 |
-
3. Всегда отвечать на русском языке
|
| 251 |
-
4. Если есть текст, упомянуть его в описании
|
| 252 |
-
5. Никогда не говори "Похоже,что у вас есть описание изображения..."
|
| 253 |
-
"""
|
| 254 |
-
},
|
| 255 |
-
{
|
| 256 |
-
"role": "user",
|
| 257 |
-
"content": prompt
|
| 258 |
-
}
|
| 259 |
-
]
|
| 260 |
-
|
| 261 |
-
response = g4f.ChatCompletion.create(
|
| 262 |
model="gpt-4o",
|
| 263 |
-
messages=
|
|
|
|
|
|
|
| 264 |
)
|
| 265 |
-
|
| 266 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 267 |
|
| 268 |
except Exception as e:
|
| 269 |
return f"❌ Ошибка при обработке изображения: {str(e)}"
|
| 270 |
finally:
|
| 271 |
-
|
|
|
|
| 272 |
Path(tmp.name).unlink(missing_ok=True)
|
| 273 |
-
|
| 274 |
def chat_response(message, history, model_name, direct_mode, thinking_depth=1.0, uploaded_file=None):
|
| 275 |
messages = [{"role": "system", "content": SYSTEM_PROMPT}]
|
|
|
|
| 276 |
history = history or []
|
| 277 |
history.append({"role": "user", "content": message})
|
| 278 |
history.append({"role": "assistant", "content": ""})
|
| 279 |
-
|
| 280 |
try:
|
| 281 |
-
#
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
info_text = ""
|
| 296 |
-
for link in results:
|
| 297 |
-
info_text += f"Ссылка: {link}\n"
|
| 298 |
-
text = get_page_text(link)
|
| 299 |
-
info_text += f"Текст: {text}\n\n" # Добавляем текст страницы
|
| 300 |
-
print(f"Вот информация по запросу '{prompt}':\n\n{info_text}\nСделай краткий ответ на основе этой информации.")
|
| 301 |
-
# Формируем запрос для GPT
|
| 302 |
-
web_search = f"Вот информация по запросу '{prompt}':\n\n{info_text}\nСделай краткий ответ на основе этой информации."
|
| 303 |
-
print("Отправка запроса к GPT...") # Отладочное сообщение
|
| 304 |
-
response = g4f.ChatCompletion.create(
|
| 305 |
-
model="gpt-4o",
|
| 306 |
-
messages=[
|
| 307 |
-
{"role": "system", "content": "Сделай краткий ответ на основе следующей информации:"},
|
| 308 |
-
{"role": "user", "content": web_search}
|
| 309 |
-
],
|
| 310 |
-
stream=True,
|
| 311 |
-
)
|
| 312 |
-
|
| 313 |
-
# Получаем ответ от GPT
|
| 314 |
-
if isinstance(response, str):
|
| 315 |
-
final_response = response
|
| 316 |
-
else:
|
| 317 |
-
final_response = "Не удалось получить ответ."
|
| 318 |
-
|
| 319 |
-
history.append({"role": "assistant", "content": final_response}) # Добавляем ответ ИИ
|
| 320 |
-
yield history, *[gr.update(visible=False) for _ in range(6)]
|
| 321 |
-
return
|
| 322 |
|
| 323 |
-
if
|
| 324 |
-
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
|
| 329 |
-
|
| 330 |
-
thinking_depth = float(thinking_depth)
|
| 331 |
-
num_steps = max(min_steps, int(thinking_depth))
|
| 332 |
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
Глубина анализа: {thinking_depth:.1f}/10
|
| 336 |
-
Количество шагов размышления: {num_steps}
|
| 337 |
-
|
| 338 |
-
ВАЖНО: Строго соблюдай формат ответа. Каждый тег должен быть на новой строке:
|
| 339 |
|
| 340 |
-
[
|
| 341 |
-
|
| 342 |
-
[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 343 |
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
|
|
|
|
|
|
|
|
|
| 347 |
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 351 |
|
| 352 |
-
|
| 353 |
"""
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
<div class="thinking-step">🤔 Думаю...</div>
|
| 361 |
-
</div>
|
| 362 |
-
"""
|
| 363 |
-
history[-1]["content"] = thinking_html
|
| 364 |
-
yield history, *[gr.update(visible=False) for _ in range(6)]
|
| 365 |
-
print('Об��аботка запроса')
|
| 366 |
-
# Создаем запрос с таймаутом только для первого чанка
|
| 367 |
-
response = g4f.ChatCompletion.create(
|
| 368 |
-
model="gpt-4o",
|
| 369 |
-
messages=[{"role": "system", "content": thinking_prompt}],
|
| 370 |
-
stream=True
|
| 371 |
-
)
|
| 372 |
-
|
| 373 |
-
# Флаг для отслеживания получения первого чанка
|
| 374 |
-
received_first_chunk = False
|
| 375 |
-
partial_message = ""
|
| 376 |
-
current_block = ""
|
| 377 |
-
in_thinking_step = False
|
| 378 |
-
in_final_answer = False
|
| 379 |
-
|
| 380 |
-
for chunk in response:
|
| 381 |
-
if chunk and isinstance(chunk, str):
|
| 382 |
-
received_first_chunk = True
|
| 383 |
-
current_block += chunk
|
| 384 |
-
|
| 385 |
-
# Исправляем неправильно закрытые теги
|
| 386 |
-
# ... существующий код ...
|
| 387 |
-
current_block = (current_block
|
| 388 |
-
.replace("[/THINKING_STEP\n", "[/THINKING_STEP]\n") # Исправляем закрывающий тег THINKING_STEP
|
| 389 |
-
.replace("[/FINAL_ANSWER\n", "[/FINAL_ANSWER]\n") # Исправляем закрывающий тег FINAL_ANSWER
|
| 390 |
-
.replace("\n[/THINKING_STEP", "\n[/THINKING_STEP]") # Исправляем порядок закрывающего тега THINKING_STEP
|
| 391 |
-
.replace("\n[/FINAL_ANSWER", "\n[/FINAL_ANSWER]") # Исправляем порядок закрывающего тега FINAL_ANSWER
|
| 392 |
-
.replace("]]]]]]", "") # Убираем лишние закрывающие скобки
|
| 393 |
-
.replace("]]", "") # Убираем лишние закрывающие скобки
|
| 394 |
-
.replace("]", "") # Исправляем закрывающий тег THINKING_STEP с учетом закрывающей скобки
|
| 395 |
-
)
|
| 396 |
-
# ... существующий код ...
|
| 397 |
-
|
| 398 |
-
if "[THINKING_STEP]" in current_block:
|
| 399 |
-
in_thinking_step = True
|
| 400 |
-
current_block = current_block.replace("[THINKING_STEP]", "")
|
| 401 |
-
if "[THINKING_STEP" in current_block:
|
| 402 |
-
in_thinking_step = True
|
| 403 |
-
current_block = current_block.replace("[THINKING_STEP", "")
|
| 404 |
-
|
| 405 |
-
if "[FINAL_ANSWER]" in current_block:
|
| 406 |
-
in_final_answer = True
|
| 407 |
-
current_block = current_block.replace("[FINAL_ANSWER]", "")
|
| 408 |
-
if "[FINAL_ANSWER" in current_block:
|
| 409 |
-
in_final_answer = True
|
| 410 |
-
|
| 411 |
-
current_block = current_block.replace("[FINAL_ANSWER", "")
|
| 412 |
-
|
| 413 |
-
# Проверяем завершение блоков
|
| 414 |
-
if "[/THINKING_STEP]" in current_block and in_thinking_step:
|
| 415 |
-
block_content = current_block[:current_block.find("[/THINKING_STEP]")]
|
| 416 |
-
formatted_block = f'<div class="thinking-step-block">{block_content}</div>'
|
| 417 |
-
partial_message += formatted_block
|
| 418 |
-
current_block = current_block[current_block.find("[/THINKING_STEP]") + len("[/THINKING_STEP]"):]
|
| 419 |
-
in_thinking_step = False
|
| 420 |
-
if "[/THINKING_STEP" in current_block and in_thinking_step:
|
| 421 |
-
block_content = current_block[:current_block.find("[/THINKING_STEP")]
|
| 422 |
-
formatted_block = f'<div class="thinking-step-block">{block_content}</div>'
|
| 423 |
-
partial_message += formatted_block
|
| 424 |
-
current_block = current_block[current_block.find("[/THINKING_STEP") + len("[/THINKING_STEP"):]
|
| 425 |
-
in_thinking_step = False
|
| 426 |
-
|
| 427 |
-
elif "[/FINAL_ANSWER]" in current_block and in_final_answer:
|
| 428 |
-
block_content = current_block[:current_block.find("[/FINAL_ANSWER]")]
|
| 429 |
-
formatted_block = f'<div class="final-answer-block">{block_content}</div>'
|
| 430 |
-
partial_message += formatted_block
|
| 431 |
-
current_block = current_block[current_block.find("[/FINAL_ANSWER") + len("[/FINAL_ANSWER]"):]
|
| 432 |
-
in_final_answer = False
|
| 433 |
-
elif "[/FINAL_ANSWER" in current_block and in_final_answer:
|
| 434 |
-
block_content = current_block[:current_block.find("[/FINAL_ANSWER")]
|
| 435 |
-
formatted_block = f'<div class="final-answer-block">{block_content}</div>'
|
| 436 |
-
partial_message += formatted_block
|
| 437 |
-
current_block = current_block[current_block.find("[/FINAL_ANSWER") + len("[/FINAL_ANSWER"):]
|
| 438 |
-
in_final_answer = False
|
| 439 |
-
|
| 440 |
-
# Убираем лишние пробелы и переносы строк
|
| 441 |
-
display_message = partial_message + current_block.strip()
|
| 442 |
-
history[-1]["content"] = display_message
|
| 443 |
-
yield history, *[gr.update(visible=False) for _ in range(6)]
|
| 444 |
-
|
| 445 |
-
# # Если получили первый чанк, создаем новый стрим без таймаута
|
| 446 |
-
# if received_first_chunk:
|
| 447 |
-
# response = g4f.ChatCompletion.create(
|
| 448 |
-
# model="gpt-4o",
|
| 449 |
-
# messages=[{"role": "system", "content": thinking_prompt}],
|
| 450 |
-
# stream=True,
|
| 451 |
-
# provider=g4f.Provider.Airforce
|
| 452 |
-
# )
|
| 453 |
-
# break
|
| 454 |
-
|
| 455 |
-
# Продолжаем обработку без таймаута
|
| 456 |
-
if received_first_chunk:
|
| 457 |
-
for chunk in response:
|
| 458 |
-
if chunk and isinstance(chunk, str):
|
| 459 |
-
current_block += chunk
|
| 460 |
-
|
| 461 |
-
# Исправляем неправильно закрытые теги
|
| 462 |
-
# ... существующий код ...
|
| 463 |
-
current_block = (current_block
|
| 464 |
-
.replace("[/THINKING_STEP\n", "[/THINKING_STEP]\n") # Исправляем закрывающий тег THINKING_STEP
|
| 465 |
-
.replace("[/THINKING_STEP", "[/THINKING_STEP]\n") # Исправляем закрывающий тег THINKING_STEP
|
| 466 |
-
# .replace("[/THINKING_STEP]", "[/THINKING_STEP]\n") # Исправляем закрывающий тег THINKING_STEP с учетом закрывающей скобки
|
| 467 |
-
|
| 468 |
-
.replace("[/FINAL_ANSWER\n", "[/FINAL_ANSWER]\n") # Исправляем закрывающий тег FINAL_ANSWER
|
| 469 |
-
.replace("[/FINAL_ANSWER]", "[/FINAL_ANSWER]\n") # Исправляем закрывающий тег FINAL_ANSWER с учетом закрывающей скобки
|
| 470 |
-
.replace("\n[/THINKING_STEP", "\n[/THINKING_STEP]") # Исправляем порядок закрывающего тега THINKING_STEP
|
| 471 |
-
.replace("\n[/FINAL_ANSWER", "\n[/FINAL_ANSWER]") # Исправляем порядок закрывающего тега FINAL_ANSWER
|
| 472 |
-
.replace("]]]]]]", "") # Убираем лишние закрывающие скобки
|
| 473 |
-
.replace("]]", "") # Убираем лишние закрывающие скобки
|
| 474 |
-
.replace("]", "") # Убираем лишние закрывающие скобки
|
| 475 |
-
|
| 476 |
-
)
|
| 477 |
-
|
| 478 |
-
|
| 479 |
-
# Проверяем начало блоков
|
| 480 |
-
if "[THINKING_STEP]" in current_block:
|
| 481 |
-
in_thinking_step = True
|
| 482 |
-
current_block = current_block.replace("[THINKING_STEP]", "")
|
| 483 |
-
if "[THINKING_STEP" in current_block:
|
| 484 |
-
in_thinking_step = True
|
| 485 |
-
current_block = current_block.replace("[THINKING_STEP", "")
|
| 486 |
-
|
| 487 |
-
if "[FINAL_ANSWER]" in current_block:
|
| 488 |
-
in_final_answer = True
|
| 489 |
-
current_block = current_block.replace("[FINAL_ANSWER]", "")
|
| 490 |
-
if "[FINAL_ANSWER" in current_block:
|
| 491 |
-
in_final_answer = True
|
| 492 |
-
current_block = current_block.replace("[FINAL_ANSWER", "")
|
| 493 |
-
|
| 494 |
-
# Проверяем завершение блоков
|
| 495 |
-
if "[/THINKING_STEP]" in current_block and in_thinking_step:
|
| 496 |
-
block_content = current_block[:current_block.find("[/THINKING_STEP]")]
|
| 497 |
-
formatted_block = f'<div class="thinking-step-block">{block_content}</div>'
|
| 498 |
-
partial_message += formatted_block
|
| 499 |
-
current_block = current_block[current_block.find("[/THINKING_STEP]") + len("[/THINKING_STEP]"):]
|
| 500 |
-
in_thinking_step = False
|
| 501 |
-
if "[/THINKING_STEP" in current_block and in_thinking_step:
|
| 502 |
-
block_content = current_block[:current_block.find("[/THINKING_STEP")]
|
| 503 |
-
formatted_block = f'<div class="thinking-step-block">{block_content}</div>'
|
| 504 |
-
partial_message += formatted_block
|
| 505 |
-
current_block = current_block[current_block.find("[/THINKING_STEP") + len("[/THINKING_STEP"):]
|
| 506 |
-
in_thinking_step = False
|
| 507 |
-
|
| 508 |
-
elif "[/FINAL_ANSWER]" in current_block and in_final_answer:
|
| 509 |
-
block_content = current_block[:current_block.find("[/FINAL_ANSWER]")]
|
| 510 |
-
formatted_block = f'<div class="final-answer-block">{block_content}</div>'
|
| 511 |
-
partial_message += formatted_block
|
| 512 |
-
current_block = current_block[current_block.find("[/FINAL_ANSWER") + len("[/FINAL_ANSWER]"):]
|
| 513 |
-
in_final_answer = False
|
| 514 |
-
elif "[/FINAL_ANSWER" in current_block and in_final_answer:
|
| 515 |
-
block_content = current_block[:current_block.find("[/FINAL_ANSWER")]
|
| 516 |
-
formatted_block = f'<div class="final-answer-block">{block_content}</div>'
|
| 517 |
-
partial_message += formatted_block
|
| 518 |
-
current_block = current_block[current_block.find("[/FINAL_ANSWER") + len("[/FINAL_ANSWER"):]
|
| 519 |
-
in_final_answer = False
|
| 520 |
-
|
| 521 |
-
# Убираем лишние пробелы и переносы строк
|
| 522 |
-
display_message = partial_message + current_block.strip()
|
| 523 |
-
history[-1]["content"] = display_message
|
| 524 |
-
yield history, *[gr.update(visible=False) for _ in range(6)]
|
| 525 |
-
break
|
| 526 |
-
|
| 527 |
-
retry_count += 1
|
| 528 |
-
if retry_count == max_retries:
|
| 529 |
-
error_message = f"""
|
| 530 |
-
<div class="error-animation">
|
| 531 |
-
<div class="error-text">❌ Не удалось получить ответ после {max_retries} попыток.
|
| 532 |
-
Попробуйте использовать обычный режим или повторите запрос позже.</div>
|
| 533 |
-
</div>
|
| 534 |
-
"""
|
| 535 |
-
history[-1]["content"] = error_message
|
| 536 |
-
yield history, *[gr.update(visible=False) for _ in range(6)]
|
| 537 |
-
return
|
| 538 |
-
|
| 539 |
-
except Exception as e:
|
| 540 |
-
retry_count += 1
|
| 541 |
-
if retry_count == max_retries:
|
| 542 |
-
error_message = f"""
|
| 543 |
-
<div class="error-animation">
|
| 544 |
-
<div class="error-text">❌ Ошибка при анализе: {str(e)}</div>
|
| 545 |
-
</div>
|
| 546 |
-
"""
|
| 547 |
-
history[-1]["content"] = error_message
|
| 548 |
-
yield history, *[gr.update(visible=False) for _ in range(6)]
|
| 549 |
-
return
|
| 550 |
|
| 551 |
-
|
| 552 |
-
|
| 553 |
-
|
| 554 |
-
# Прямая отправка в нейронку для генерации изображения
|
| 555 |
-
image_url = generate_image(message)
|
| 556 |
-
if not image_url.startswith("Ошибка"):
|
| 557 |
-
history[-1]["content"] = f""
|
| 558 |
-
else:
|
| 559 |
-
history[-1]["content"] = f"❌ {image_url}"
|
| 560 |
yield history, *[gr.update(visible=False) for _ in range(6)]
|
| 561 |
return
|
| 562 |
|
| 563 |
-
|
| 564 |
-
|
| 565 |
-
history,
|
| 566 |
-
gr.update(visible=False),
|
| 567 |
-
gr.update(visible=False),
|
| 568 |
-
gr.update(visible=False),
|
| 569 |
-
"",
|
| 570 |
-
gr.update(visible=False),
|
| 571 |
-
gr.update(visible=False)
|
| 572 |
-
)
|
| 573 |
-
|
| 574 |
-
if uploaded_file:
|
| 575 |
-
file_content = process_file(uploaded_file)
|
| 576 |
-
if file_content:
|
| 577 |
-
message = f"Файл содержит:\n```\n{file_content}\n```\n\n{message}"
|
| 578 |
-
|
| 579 |
-
# ормируем историю сообщений
|
| 580 |
-
for msg in history[:-2]:
|
| 581 |
-
messages.append({"role": msg["role"], "content": msg["content"]})
|
| 582 |
-
messages.append({"role": "user", "content": str(message)})
|
| 583 |
-
|
| 584 |
-
partial_message = ""
|
| 585 |
-
code_block = None
|
| 586 |
-
|
| 587 |
-
response = g4f.ChatCompletion.create(
|
| 588 |
-
model=AVAILABLE_MODELS.get(model_name, "gpt-4o"),
|
| 589 |
-
messages=messages,
|
| 590 |
-
stream=True,
|
| 591 |
-
provider=g4f.Provider.Airforce
|
| 592 |
-
)
|
| 593 |
-
|
| 594 |
-
for chunk in response:
|
| 595 |
-
if chunk and isinstance(chunk, str):
|
| 596 |
-
partial_message += chunk
|
| 597 |
-
|
| 598 |
-
# Проверяем на запрос генерации изображения
|
| 599 |
-
if "[GENERATE_IMAGE]" in partial_message and "[/GENERATE_IMAGE]" in partial_message:
|
| 600 |
-
start_idx = partial_message.find("[GENERATE_IMAGE]") + len("[GENERATE_IMAGE]")
|
| 601 |
-
end_idx = partial_message.find("[/GENERATE_IMAGE]")
|
| 602 |
-
image_prompt = partial_message[start_idx:end_idx].strip()
|
| 603 |
-
|
| 604 |
-
# Показываем статус генерации
|
| 605 |
-
history[-1]["content"] = """
|
| 606 |
-
<div class="generating-animation">
|
| 607 |
-
<div class="generating-text">Генерация изображения...</div>
|
| 608 |
-
</div>
|
| 609 |
-
"""
|
| 610 |
-
yield history, *[gr.update(visible=False) for _ in range(6)]
|
| 611 |
-
|
| 612 |
-
# Генерируем изображение
|
| 613 |
-
image_url = generate_image(image_prompt)
|
| 614 |
-
|
| 615 |
-
if not image_url.startswith("Ошибка"):
|
| 616 |
-
# Анализируем изображение
|
| 617 |
-
# image_analysis = process_image(image_url)
|
| 618 |
-
explanation_text = partial_message[end_idx + len("[/GENERATE_IMAGE]"):].strip()
|
| 619 |
-
partial_message = f"\n\n{explanation_text}"
|
| 620 |
-
else:
|
| 621 |
-
partial_message = f"❌ {image_url}"
|
| 622 |
-
|
| 623 |
-
history[-1]["content"] = partial_message + "▓"
|
| 624 |
-
yield history, *[gr.update(visible=False) for _ in range(6)]
|
| 625 |
-
|
| 626 |
-
# Проверяем наличие кода
|
| 627 |
-
if "```" in partial_message:
|
| 628 |
-
code_start = partial_message.rfind("```") + 3
|
| 629 |
-
code_end = partial_message.find("```", code_start)
|
| 630 |
-
if code_end != -1:
|
| 631 |
-
code_block = partial_message[code_start:code_end].strip()
|
| 632 |
-
|
| 633 |
-
# В конце убирае курсор и сбрасываем файл
|
| 634 |
-
history[-1]["content"] = partial_message
|
| 635 |
-
yield (
|
| 636 |
-
history,
|
| 637 |
-
*[gr.update(visible=True if code_block else False) for _ in range(5)],
|
| 638 |
-
gr.update(value=None) # Сбрасываем файл после ответа
|
| 639 |
-
)
|
| 640 |
|
| 641 |
except Exception as e:
|
| 642 |
-
print(f"Error: {e}")
|
| 643 |
-
if not history:
|
| 644 |
-
history = [{"role": "user", "content": message}, {"role": "assistant", "content": ""}]
|
| 645 |
history[-1]["content"] = f"❌ Произошла ошибка: {str(e)}"
|
| 646 |
yield (
|
| 647 |
history,
|
|
@@ -650,9 +311,86 @@ def chat_response(message, history, model_name, direct_mode, thinking_depth=1.0,
|
|
| 650 |
gr.update(visible=True),
|
| 651 |
"",
|
| 652 |
gr.update(visible=False),
|
| 653 |
-
gr.update(value=None)
|
| 654 |
)
|
| 655 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 656 |
def analyze_code(code):
|
| 657 |
"""Анализ кода и получение объяснения"""
|
| 658 |
if not code:
|
|
@@ -705,11 +443,75 @@ def create_interface():
|
|
| 705 |
border-radius: 10px;
|
| 706 |
box-shadow: 0 2px 6px rgba(0,0,0,0.15);
|
| 707 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 708 |
.message.user {
|
| 709 |
background-color: #3d3d3d !important;
|
| 710 |
color: white !important;
|
| 711 |
border-radius: 15px;
|
| 712 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 713 |
.message.bot {
|
| 714 |
background-color: #2d2d2d !important;
|
| 715 |
color: white !important;
|
|
@@ -1035,21 +837,7 @@ def create_interface():
|
|
| 1035 |
render_markdown=True
|
| 1036 |
)
|
| 1037 |
|
| 1038 |
-
|
| 1039 |
-
output_text = gr.Textbox(
|
| 1040 |
-
label="Вывод",
|
| 1041 |
-
interactive=False,
|
| 1042 |
-
visible=False
|
| 1043 |
-
)
|
| 1044 |
-
error_text = gr.Textbox(
|
| 1045 |
-
label="⚠️ Ошибки",
|
| 1046 |
-
interactive=False,
|
| 1047 |
-
visible=False
|
| 1048 |
-
)
|
| 1049 |
-
ask_help_btn = gr.Button(
|
| 1050 |
-
"❓ Спросить почему возникла ошибка",
|
| 1051 |
-
visible=False
|
| 1052 |
-
)
|
| 1053 |
|
| 1054 |
with gr.Row(elem_classes="input-row"):
|
| 1055 |
msg = gr.Textbox(
|
|
@@ -1071,7 +859,7 @@ def create_interface():
|
|
| 1071 |
with gr.Column(scale=1, visible=True) as sidebar:
|
| 1072 |
model = gr.Dropdown(
|
| 1073 |
choices=list(AVAILABLE_MODELS.keys()),
|
| 1074 |
-
value="
|
| 1075 |
label="Модель"
|
| 1076 |
)
|
| 1077 |
|
|
@@ -1107,64 +895,25 @@ def create_interface():
|
|
| 1107 |
elem_classes="run-btn"
|
| 1108 |
)
|
| 1109 |
|
| 1110 |
-
def run_code(code):
|
| 1111 |
-
if not code:
|
| 1112 |
-
return {
|
| 1113 |
-
output_text: gr.update(value=" Нет кода для выполнения", visible=True),
|
| 1114 |
-
error_text: gr.update(visible=False),
|
| 1115 |
-
ask_help_btn: gr.update(visible=False)
|
| 1116 |
-
}
|
| 1117 |
-
|
| 1118 |
-
result = test_code(code)
|
| 1119 |
-
|
| 1120 |
-
return {
|
| 1121 |
-
output_text: gr.update(value=result["output"] if result["success"] else "", visible=True),
|
| 1122 |
-
error_text: gr.update(value=result["error"], visible=bool(result["error"])),
|
| 1123 |
-
# ask_help_btn: gr.update(visible=bool(result["error"]))
|
| 1124 |
-
}
|
| 1125 |
-
|
| 1126 |
-
def handle_error_question(error_message, code_block):
|
| 1127 |
-
"""Обработчик кнопки помощи с ошибкой"""
|
| 1128 |
-
error_prompt = f"""Объясните ошибку и как её исправить:
|
| 1129 |
-
|
| 1130 |
-
Код:
|
| 1131 |
-
```
|
| 1132 |
-
{code_block}
|
| 1133 |
-
```
|
| 1134 |
|
| 1135 |
-
|
| 1136 |
-
{error_message}"""
|
| 1137 |
-
|
| 1138 |
-
try:
|
| 1139 |
-
response = g4f.ChatCompletion.create(
|
| 1140 |
-
model="gpt-3.5-turbo",
|
| 1141 |
-
messages=[{"role": "user", "content": error_prompt}],
|
| 1142 |
-
stream=False
|
| 1143 |
-
)
|
| 1144 |
-
return [{"role": "assistant", "content": response if isinstance(response, str) else "Не удалось получиь ответ"}]
|
| 1145 |
-
except Exception as e:
|
| 1146 |
-
return [{"role": "assistant", "content": f"Ошибка при получнии объяснения: {str(e)}"}]
|
| 1147 |
|
| 1148 |
# Обработчики событий
|
| 1149 |
msg.submit(
|
| 1150 |
fn=chat_response,
|
| 1151 |
inputs=[msg, chatbot, model, direct_mode, thinking_depth, file_output],
|
| 1152 |
-
outputs=[chatbot,
|
| 1153 |
api_name=None
|
| 1154 |
)
|
| 1155 |
|
| 1156 |
submit.click(
|
| 1157 |
fn=chat_response,
|
| 1158 |
inputs=[msg, chatbot, model, direct_mode, thinking_depth, file_output],
|
| 1159 |
-
outputs=[chatbot,
|
| 1160 |
api_name=None
|
| 1161 |
)
|
| 1162 |
|
| 1163 |
-
|
| 1164 |
-
fn=handle_error_question,
|
| 1165 |
-
inputs=[error_text, current_code],
|
| 1166 |
-
outputs=chatbot
|
| 1167 |
-
)
|
| 1168 |
|
| 1169 |
analyze_btn.click(
|
| 1170 |
fn=analyze_code,
|
|
@@ -1172,15 +921,11 @@ def create_interface():
|
|
| 1172 |
outputs=[chatbot]
|
| 1173 |
)
|
| 1174 |
|
| 1175 |
-
|
| 1176 |
-
fn=run_code,
|
| 1177 |
-
inputs=[current_code],
|
| 1178 |
-
outputs=[output_text, error_text, ask_help_btn]
|
| 1179 |
-
)
|
| 1180 |
|
| 1181 |
clear.click(
|
| 1182 |
fn=lambda: (None, "", "", False, ""),
|
| 1183 |
-
outputs=[chatbot,
|
| 1184 |
)
|
| 1185 |
|
| 1186 |
# Добавляем обработчик изменения модели
|
|
|
|
| 17 |
from web_engine import search_info, get_page_text # Импортируем функции из web_engine.py
|
| 18 |
|
| 19 |
AVAILABLE_MODELS = {
|
| 20 |
+
# "GPT-4": "gpt-4",
|
| 21 |
+
"O3 (NEW)": "openai",
|
| 22 |
+
|
|
|
|
| 23 |
}
|
| 24 |
|
| 25 |
SYSTEM_PROMPT = """
|
|
|
|
| 32 |
4. Если пользователь просит найти информацию, например, "найди что-то про Адольфа Гитлера":
|
| 33 |
- Ответь с текстом [SEARCH: {prompt}]
|
| 34 |
- Затем используй полученную информацию для ответа на вопрос
|
| 35 |
+
4.1 Если ты не знаешь информации которая пользователь запрашивает то напиши:
|
| 36 |
+
[SEARCH: {PROMPT}]
|
| 37 |
+
вот тут информацию которую ты не знаешь
|
| 38 |
5. Если пользователь просит нарисовать или сгенерировать изображение:
|
| 39 |
- Начните ответ с [GENERATE_IMAGE]
|
| 40 |
- Напишите детальный промпт на английском языке
|
|
|
|
| 148 |
try:
|
| 149 |
client = Client()
|
| 150 |
response = client.images.generate(
|
| 151 |
+
model="flux-schnell-black-forest-labs",
|
| 152 |
prompt=prompt,
|
| 153 |
response_format="url",
|
| 154 |
+
provider='HuggingSpace'
|
| 155 |
)
|
| 156 |
|
| 157 |
image_url = response.data[0].url
|
|
|
|
| 178 |
|
| 179 |
except Exception as e:
|
| 180 |
return f"Ошибка при генерации изобраения: {str(e)}"
|
|
|
|
| 181 |
def process_image(image_input) -> str:
|
| 182 |
+
"""Обработка изображения через провайдера Blackbox."""
|
| 183 |
try:
|
| 184 |
+
# Инициализация клиента GPT с провайдером Blackbox
|
| 185 |
+
client = Client()
|
| 186 |
+
|
| 187 |
+
# Определяем, как было передано изображение (URL или объект PIL.Image)
|
| 188 |
+
if isinstance(image_input, str): # Если передан URL изображения
|
| 189 |
+
response = requests.get(image_input, stream=True)
|
| 190 |
+
if response.status_code == 200:
|
| 191 |
+
image = Image.open(response.raw) # Преобразуем поток в объект PIL.Image
|
| 192 |
+
else:
|
| 193 |
+
return "❌ Ошибка загрузки изображения по URL."
|
| 194 |
+
elif isinstance(image_input, Image.Image): # Если передано изображение PIL
|
| 195 |
image = image_input
|
| 196 |
else:
|
| 197 |
+
return "❌ Неподдерживаемый формат изображения."
|
| 198 |
+
|
| 199 |
+
# Конвертируем изображение в режим RGB, если оно в RGBA или другом формате
|
| 200 |
+
if image.mode != 'RGB':
|
| 201 |
image = image.convert('RGB')
|
| 202 |
+
|
| 203 |
+
# Сохраняем изображение во временный файл
|
| 204 |
+
with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as tmp:
|
| 205 |
+
image.save(tmp.name, format="JPEG") # Сохраняем изображение как JPEG
|
| 206 |
+
image_file = open(tmp.name, "rb") # Открываем файл для передачи в запрос
|
| 207 |
+
|
| 208 |
+
# Запрос на анализ изображения через Blackbox
|
| 209 |
+
response = client.chat.completions.create(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 210 |
model="gpt-4o",
|
| 211 |
+
messages=[{"role": "user", "content": "Опишите, что изображено на этом изображении."}],
|
| 212 |
+
image=image_file,
|
| 213 |
+
provider="Blackbox"
|
| 214 |
)
|
| 215 |
+
|
| 216 |
+
# Закрываем временный файл
|
| 217 |
+
image_file.close()
|
| 218 |
+
|
| 219 |
+
# Возвращаем результат анализа
|
| 220 |
+
return response.choices[0].message.content
|
| 221 |
|
| 222 |
except Exception as e:
|
| 223 |
return f"❌ Ошибка при обработке изображения: {str(e)}"
|
| 224 |
finally:
|
| 225 |
+
# Удаляем временный файл после использования
|
| 226 |
+
if "tmp" in locals():
|
| 227 |
Path(tmp.name).unlink(missing_ok=True)
|
|
|
|
| 228 |
def chat_response(message, history, model_name, direct_mode, thinking_depth=1.0, uploaded_file=None):
|
| 229 |
messages = [{"role": "system", "content": SYSTEM_PROMPT}]
|
| 230 |
+
partial_message = "" # Инициализация переменной
|
| 231 |
history = history or []
|
| 232 |
history.append({"role": "user", "content": message})
|
| 233 |
history.append({"role": "assistant", "content": ""})
|
| 234 |
+
|
| 235 |
try:
|
| 236 |
+
# Обычный поток обработки сообщений
|
| 237 |
+
for msg in history[:-2]:
|
| 238 |
+
messages.append({"role": msg["role"], "content": msg["content"]})
|
| 239 |
+
messages.append({"role": "user", "content": str(message)})
|
| 240 |
+
|
| 241 |
+
partial_message = ""
|
| 242 |
+
response = None # Инициализация переменной response
|
| 243 |
+
response = g4f.ChatCompletion.create(
|
| 244 |
+
model=AVAILABLE_MODELS.get(model_name, "O3"),
|
| 245 |
+
messages=messages,
|
| 246 |
+
stream=False,
|
| 247 |
+
provider="PollinationsAI"
|
| 248 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 249 |
|
| 250 |
+
if response and isinstance(response, str):
|
| 251 |
+
partial_message += response
|
| 252 |
+
history[-1]["content"] = f"""
|
| 253 |
+
<div class="text-fade-in">
|
| 254 |
+
{partial_message}
|
| 255 |
+
</div>
|
| 256 |
+
"""
|
|
|
|
|
|
|
| 257 |
|
| 258 |
+
yield history, *[gr.update(visible=False) for _ in range(6)]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 259 |
|
| 260 |
+
# Если в ответе от модели есть [SEARCH:], выполняем поиск
|
| 261 |
+
if "[SEARCH:" in partial_message:
|
| 262 |
+
history[-1]["content"] = """
|
| 263 |
+
<div class="search-animation">
|
| 264 |
+
<div class="search-text">🔍 Выполняется поиск...</div>
|
| 265 |
+
</div>
|
| 266 |
+
"""
|
| 267 |
+
yield history, *[gr.update(visible=False) for _ in range(6)]
|
| 268 |
+
prompt = partial_message.split("[SEARCH:")[1].split("]")[0].strip()
|
| 269 |
|
| 270 |
+
# Выполняем поиск
|
| 271 |
+
results = search_info(prompt)
|
| 272 |
+
if not results:
|
| 273 |
+
history[-1]["content"] = "Не удалось найти информацию."
|
| 274 |
+
yield history, *[gr.update(visible=False) for _ in range(6)]
|
| 275 |
+
return
|
| 276 |
|
| 277 |
+
info_text = ""
|
| 278 |
+
for link in results:
|
| 279 |
+
info_text += f"Ссылка: {link}\n"
|
| 280 |
+
page_text = get_page_text(link)
|
| 281 |
+
info_text += f"Текст: {page_text}...\n\n"
|
| 282 |
+
|
| 283 |
+
search_response_prompt = f"""Вот информация по запросу "{prompt}":
|
| 284 |
+
|
| 285 |
+
{info_text}
|
| 286 |
|
| 287 |
+
На основе этой информации сформируй краткий и понятный ответ на русском языке.
|
| 288 |
"""
|
| 289 |
+
search_response = g4f.ChatCompletion.create(
|
| 290 |
+
model="gpt-4",
|
| 291 |
+
messages=[{"role": "user", "content": search_response_prompt}],
|
| 292 |
+
stream=False,
|
| 293 |
+
provider="Mhystical"
|
| 294 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 295 |
|
| 296 |
+
final_response = search_response if isinstance(search_response, str) else "Не удалось получить ответ."
|
| 297 |
+
final_response = final_response.replace(f"[SEARCH: {prompt}]", "").strip()
|
| 298 |
+
history.append({"role": "assistant", "content": final_response})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 299 |
yield history, *[gr.update(visible=False) for _ in range(6)]
|
| 300 |
return
|
| 301 |
|
| 302 |
+
history[-1]["content"] = partial_message.strip()
|
| 303 |
+
yield history, *[gr.update(visible=True) for _ in range(6)]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 304 |
|
| 305 |
except Exception as e:
|
|
|
|
|
|
|
|
|
|
| 306 |
history[-1]["content"] = f"❌ Произошла ошибка: {str(e)}"
|
| 307 |
yield (
|
| 308 |
history,
|
|
|
|
| 311 |
gr.update(visible=True),
|
| 312 |
"",
|
| 313 |
gr.update(visible=False),
|
| 314 |
+
gr.update(value=None)
|
| 315 |
)
|
| 316 |
|
| 317 |
+
if direct_mode:
|
| 318 |
+
image_url = generate_image(message)
|
| 319 |
+
if not image_url.startswith("Ошибка"):
|
| 320 |
+
history[-1]["content"] = f""
|
| 321 |
+
else:
|
| 322 |
+
history[-1]["content"] = f"❌ {image_url}"
|
| 323 |
+
yield history, *[gr.update(visible=False) for _ in range(6)]
|
| 324 |
+
return
|
| 325 |
+
|
| 326 |
+
yield (
|
| 327 |
+
history,
|
| 328 |
+
gr.update(visible=False),
|
| 329 |
+
gr.update(visible=False),
|
| 330 |
+
gr.update(visible=False),
|
| 331 |
+
"",
|
| 332 |
+
gr.update(visible=False),
|
| 333 |
+
gr.update(visible=False)
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
if uploaded_file:
|
| 337 |
+
file_content = process_file(uploaded_file)
|
| 338 |
+
if file_content:
|
| 339 |
+
message = f"Файл содержит:\n```\n{file_content}\n```\n\n{message}"
|
| 340 |
+
|
| 341 |
+
for msg in history[:-2]:
|
| 342 |
+
messages.append({"role": msg["role"], "content": msg["content"]})
|
| 343 |
+
messages.append({"role": "user", "content": str(message)})
|
| 344 |
+
|
| 345 |
+
partial_message = ""
|
| 346 |
+
code_block = None
|
| 347 |
+
response = g4f.ChatCompletion.create(
|
| 348 |
+
model=AVAILABLE_MODELS.get(model_name, "O3"),
|
| 349 |
+
messages=messages,
|
| 350 |
+
stream=False,
|
| 351 |
+
provider="PollinationsAI"
|
| 352 |
+
)
|
| 353 |
+
|
| 354 |
+
if response and isinstance(response, str):
|
| 355 |
+
partial_message += response
|
| 356 |
+
if "[GENERATE_IMAGE]" in partial_message:
|
| 357 |
+
history[-1]["content"] = """
|
| 358 |
+
<div class="generating-animation">
|
| 359 |
+
<div class="generating-text">Генерация изображения...</div>
|
| 360 |
+
</div>
|
| 361 |
+
"""
|
| 362 |
+
|
| 363 |
+
if "[GENERATE_IMAGE]" in partial_message and "[/GENERATE_IMAGE]" in partial_message:
|
| 364 |
+
start_idx = partial_message.find("[GENERATE_IMAGE]") + len("[GENERATE_IMAGE]")
|
| 365 |
+
end_idx = partial_message.find("[/GENERATE_IMAGE]")
|
| 366 |
+
image_prompt = partial_message[start_idx:end_idx].strip()
|
| 367 |
+
yield history, *[gr.update(visible=False) for _ in range(6)]
|
| 368 |
+
|
| 369 |
+
image_url = generate_image(image_prompt)
|
| 370 |
+
|
| 371 |
+
if not image_url.startswith("Ошибка"):
|
| 372 |
+
explanation_text = partial_message[end_idx + len("[/GENERATE_IMAGE]"):].strip()
|
| 373 |
+
partial_message = f"\n\n{explanation_text}"
|
| 374 |
+
else:
|
| 375 |
+
partial_message = f"❌ {image_url}"
|
| 376 |
+
|
| 377 |
+
history[-1]["content"] = partial_message + "|"
|
| 378 |
+
yield history, *[gr.update(visible=False) for _ in range(6)]
|
| 379 |
+
|
| 380 |
+
if "```" in partial_message:
|
| 381 |
+
code_start = partial_message.rfind("```") + 3
|
| 382 |
+
code_end = partial_message.find("```", code_start)
|
| 383 |
+
if code_end != -1:
|
| 384 |
+
code_block = partial_message[code_start:code_end].strip()
|
| 385 |
+
|
| 386 |
+
history[-1]["content"] = partial_message
|
| 387 |
+
yield (
|
| 388 |
+
history,
|
| 389 |
+
*[gr.update(visible=True if code_block else False) for _ in range(5)],
|
| 390 |
+
gr.update(value=None)
|
| 391 |
+
)
|
| 392 |
+
|
| 393 |
+
|
| 394 |
def analyze_code(code):
|
| 395 |
"""Анализ кода и получение объяснения"""
|
| 396 |
if not code:
|
|
|
|
| 443 |
border-radius: 10px;
|
| 444 |
box-shadow: 0 2px 6px rgba(0,0,0,0.15);
|
| 445 |
}
|
| 446 |
+
.text-fade-in {
|
| 447 |
+
opacity: 0; /* Исходная прозрачность */
|
| 448 |
+
transform: translateY(10px); /* Исходное смещение */
|
| 449 |
+
animation: fadeInText 1s ease-in-out forwards; /* Анимация при показе */
|
| 450 |
+
}
|
| 451 |
+
|
| 452 |
+
@keyframes fadeInText {
|
| 453 |
+
0% {
|
| 454 |
+
opacity: 0;
|
| 455 |
+
transform: translateY(10px); /* Немного ниже */
|
| 456 |
+
}
|
| 457 |
+
100% {
|
| 458 |
+
opacity: 1;
|
| 459 |
+
transform: translateY(0); /* Возвращается на место */
|
| 460 |
+
}
|
| 461 |
+
}
|
| 462 |
+
|
| 463 |
.message.user {
|
| 464 |
background-color: #3d3d3d !important;
|
| 465 |
color: white !important;
|
| 466 |
border-radius: 15px;
|
| 467 |
}
|
| 468 |
+
.search-animation {
|
| 469 |
+
position: relative;
|
| 470 |
+
width: 100%;
|
| 471 |
+
min-height: 80px;
|
| 472 |
+
background: linear-gradient(45deg, #2d2d2d, #3d3d3d);
|
| 473 |
+
border-radius: 15px;
|
| 474 |
+
overflow: hidden;
|
| 475 |
+
display: flex;
|
| 476 |
+
align-items: center;
|
| 477 |
+
justify-content: center;
|
| 478 |
+
}
|
| 479 |
+
|
| 480 |
+
.search-text {
|
| 481 |
+
color: white;
|
| 482 |
+
font-size: 18px;
|
| 483 |
+
z-index: 2;
|
| 484 |
+
animation: search-pulse 1.5s infinite;
|
| 485 |
+
}
|
| 486 |
+
|
| 487 |
+
.search-animation::before {
|
| 488 |
+
content: '';
|
| 489 |
+
position: absolute;
|
| 490 |
+
width: 200%;
|
| 491 |
+
height: 200%;
|
| 492 |
+
top: -50%;
|
| 493 |
+
left: -50%;
|
| 494 |
+
background:
|
| 495 |
+
radial-gradient(2px 2px at 20% 30%, rgba(255,255,255,0.3), rgba(0,0,0,0)),
|
| 496 |
+
radial-gradient(2px 2px at 40% 70%, rgba(255,255,255,0.3), rgba(0,0,0,0));
|
| 497 |
+
background-repeat: repeat;
|
| 498 |
+
animation: search-rotate 6s linear infinite;
|
| 499 |
+
}
|
| 500 |
+
|
| 501 |
+
@keyframes search-pulse {
|
| 502 |
+
0% { opacity: 0.6; }
|
| 503 |
+
50% { opacity: 1; }
|
| 504 |
+
100% { opacity: 0.6; }
|
| 505 |
+
}
|
| 506 |
+
|
| 507 |
+
@keyframes search-rotate {
|
| 508 |
+
from {
|
| 509 |
+
transform: rotate(0deg);
|
| 510 |
+
}
|
| 511 |
+
to {
|
| 512 |
+
transform: rotate(360deg);
|
| 513 |
+
}
|
| 514 |
+
}
|
| 515 |
.message.bot {
|
| 516 |
background-color: #2d2d2d !important;
|
| 517 |
color: white !important;
|
|
|
|
| 837 |
render_markdown=True
|
| 838 |
)
|
| 839 |
|
| 840 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 841 |
|
| 842 |
with gr.Row(elem_classes="input-row"):
|
| 843 |
msg = gr.Textbox(
|
|
|
|
| 859 |
with gr.Column(scale=1, visible=True) as sidebar:
|
| 860 |
model = gr.Dropdown(
|
| 861 |
choices=list(AVAILABLE_MODELS.keys()),
|
| 862 |
+
value="O3 (NEW)",
|
| 863 |
label="Модель"
|
| 864 |
)
|
| 865 |
|
|
|
|
| 895 |
elem_classes="run-btn"
|
| 896 |
)
|
| 897 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 898 |
|
| 899 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 900 |
|
| 901 |
# Обработчики событий
|
| 902 |
msg.submit(
|
| 903 |
fn=chat_response,
|
| 904 |
inputs=[msg, chatbot, model, direct_mode, thinking_depth, file_output],
|
| 905 |
+
outputs=[chatbot, current_code],
|
| 906 |
api_name=None
|
| 907 |
)
|
| 908 |
|
| 909 |
submit.click(
|
| 910 |
fn=chat_response,
|
| 911 |
inputs=[msg, chatbot, model, direct_mode, thinking_depth, file_output],
|
| 912 |
+
outputs=[chatbot, current_code],
|
| 913 |
api_name=None
|
| 914 |
)
|
| 915 |
|
| 916 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
| 917 |
|
| 918 |
analyze_btn.click(
|
| 919 |
fn=analyze_code,
|
|
|
|
| 921 |
outputs=[chatbot]
|
| 922 |
)
|
| 923 |
|
| 924 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
| 925 |
|
| 926 |
clear.click(
|
| 927 |
fn=lambda: (None, "", "", False, ""),
|
| 928 |
+
outputs=[chatbot, current_code]
|
| 929 |
)
|
| 930 |
|
| 931 |
# Добавляем обработчик изменения модели
|