Spaces:
Sleeping
Sleeping
| import os | |
| import vk_api | |
| from vk_api.longpoll import VkLongPoll, VkEventType | |
| from vk_api.upload import VkUpload | |
| from huggingface_hub import InferenceClient | |
| import io | |
| import re | |
| import time | |
| from collections import deque | |
| from threading import Thread | |
| # ─── Настройки ──────────────────────────────────────────────────────────────── | |
| try: | |
| VK_TOKEN = os.environ["VK_TOKEN"] | |
| HF_TOKEN = os.environ["HF_TOKEN"] # Обязательно зарегистрированный токен HF | |
| BOT_ID = int(os.environ["BOT_ID"]) | |
| except KeyError as e: | |
| print(f"ОШИБКА: Нет ключа {e} в переменных окружения / secrets") | |
| exit(1) | |
| BOT_NAMES = ["бот", "эй", "кензи", "ии", "kenzie", "кензи", "@kenzie"] | |
| DRAW_COMMAND = "!картинка" | |
| # Стабильные модели на февраль 2026 (бесплатный Inference API) | |
| LLM_MODEL = "Qwen/Qwen2.5-7B-Instruct" # или "mistralai/Mistral-7B-Instruct-v0.3" | |
| IMAGE_MODEL = "stabilityai/stable-diffusion-xl-base-1.0" # Самая надёжная бесплатная SDXL на HF Inference | |
| # Инициализация | |
| client = InferenceClient(token=HF_TOKEN.strip()) | |
| vk_session = vk_api.VkApi(token=VK_TOKEN) | |
| vk = vk_session.get_api() | |
| longpoll = VkLongPoll(vk_session) | |
| upload = VkUpload(vk_session) | |
| chat_memory = {} # {peer_id: deque([...]) } | |
| def send_msg(peer_id, text, reply_to=None, attach=None): | |
| try: | |
| params = { | |
| "peer_id": peer_id, | |
| "message": text, | |
| "random_id": 0 | |
| } | |
| if reply_to: | |
| params["reply_to"] = reply_to | |
| if attach: | |
| params["attachment"] = attach | |
| vk_session.method("messages.send", params) | |
| except Exception as e: | |
| print(f"VK send error → {e}") | |
| def ask_ai(messages: list): | |
| try: | |
| completion = client.chat.completions.create( | |
| model=LLM_MODEL, | |
| messages=messages, | |
| max_tokens=600, | |
| temperature=0.75, | |
| stream=False | |
| ) | |
| return completion.choices[0].message.content.strip() | |
| except Exception as e: | |
| print(f"LLM error: {e}") | |
| return ( | |
| "🤖 Что-то пошло не так… Возможно, лимит бесплатных запросов.\n" | |
| "Попробуй позже или спроси попроще." | |
| ) | |
| def generate_image(prompt: str): | |
| try: | |
| print(f"Генерация: {prompt} (модель: {IMAGE_MODEL})") | |
| # SDXL обычно лучше работает с 30–50 шагами, но на бесплатном API лимит скорости → ставим меньше | |
| image = client.text_to_image( | |
| prompt = prompt, | |
| model = IMAGE_MODEL, | |
| num_inference_steps = 28, # 20–40 — хороший компромисс скорость/качество | |
| guidance_scale = 7.5, # классическое значение для SDXL | |
| negative_prompt = "blurry, low quality, deformed, ugly, bad anatomy" # опционально | |
| ) | |
| img_bytes = io.BytesIO() | |
| image.save(img_bytes, format="PNG") | |
| img_bytes.seek(0) | |
| return img_bytes.read(), None | |
| except Exception as e: | |
| err_str = str(e) | |
| print(f"Image error: {err_str}") | |
| if "Pillow" in err_str: | |
| return None, "Установи Pillow → pip install pillow" | |
| if "429" in err_str or "rate limit" in err_str.lower(): | |
| return None, "Лимит бесплатных запросов HF. Подожди 1–5 минут." | |
| return None, err_str[:180] | |
| def handle_draw(peer_id, msg_id, text): | |
| prompt = text.replace(DRAW_COMMAND, "", 1).strip() | |
| if not prompt: | |
| send_msg(peer_id, "Что нарисовать?\nПример: !картинка cyberpunk city at night neon lights", reply_to=msg_id) | |
| return | |
| send_msg(peer_id, f"🎨 Генерирую (SDXL base, ~20–35 сек)...\n«{prompt}»", reply_to=msg_id) | |
| img_data, err = generate_image(prompt) | |
| if img_data: | |
| try: | |
| file_obj = io.BytesIO(img_data) | |
| file_obj.name = "generated.png" | |
| photos = upload.photo_messages(photos=file_obj) | |
| photo = photos[0] | |
| attach = f"photo{photo['owner_id']}_{photo['id']}" | |
| send_msg(peer_id, "✨ Готово!", reply_to=msg_id, attach=attach) | |
| except Exception as e: | |
| print(f"VK upload fail: {e}") | |
| send_msg(peer_id, "❌ Ошибка загрузки в ВК", reply_to=msg_id) | |
| else: | |
| send_msg(peer_id, f"❌ Не получилось\n{err}", reply_to=msg_id) | |
| def process_message(event): | |
| if not event.text: | |
| return | |
| text = event.text.strip() | |
| low = text.lower() | |
| # Рисование | |
| if low.startswith(DRAW_COMMAND): | |
| Thread(target=handle_draw, args=(event.peer_id, event.message_id, text)).start() | |
| return | |
| # Обращение к боту | |
| mentioned = any(word in low for word in BOT_NAMES) or f"[club{BOT_ID}|" in text | |
| if not mentioned: | |
| return | |
| clean = text | |
| for name in BOT_NAMES: | |
| clean = re.sub(rf'\b{name}\b', '', clean, flags=re.IGNORECASE) | |
| clean = re.sub(r'\[club\d+\|[^\]]+\]', '', clean) | |
| clean = clean.strip(" ,.!?*").strip() | |
| if not clean: | |
| send_msg(event.peer_id, "Привет! Чем помочь? 😊", reply_to=event.message_id) | |
| return | |
| if event.peer_id not in chat_memory: | |
| chat_memory[event.peer_id] = deque(maxlen=6) | |
| chat_memory[event.peer_id].append({"role": "user", "content": clean}) | |
| answer = ask_ai(list(chat_memory[event.peer_id])) | |
| chat_memory[event.peer_id].append({"role": "assistant", "content": answer}) | |
| send_msg(event.peer_id, answer, reply_to=event.message_id) | |
| if __name__ == "__main__": | |
| print("Бот запущен") | |
| print(f"LLM : {LLM_MODEL}") | |
| print(f"Image : {IMAGE_MODEL}") | |
| print("Ctrl+C → выход\n") | |
| while True: | |
| try: | |
| for event in longpoll.listen(): | |
| if event.type == VkEventType.MESSAGE_NEW and event.to_me: | |
| process_message(event) | |
| except KeyboardInterrupt: | |
| print("\nОстановка...") | |
| break | |
| except Exception as e: | |
| print(f"Критическая ошибка → {e}\nПерезапуск через 8 сек...") | |
| time.sleep(8) |