| | #!/bin/bash |
| | |
| | |
| | |
| | |
| |
|
| | set -e |
| |
|
| | |
| | |
| | |
| |
|
| | RED='\033[0;31m' |
| | GREEN='\033[0;32m' |
| | YELLOW='\033[1;33m' |
| | BLUE='\033[0;34m' |
| | NC='\033[0m' |
| |
|
| | |
| | |
| | |
| |
|
| | WORKSPACE_DIR="${WORKSPACE_DIR:-/workspace}" |
| | PROJECT_DIR="${PROJECT_DIR:-$WORKSPACE_DIR/Qwen-ImageForFlo-Advanced}" |
| | LORA_DIR="${LORA_DIR:-$WORKSPACE_DIR/loras}" |
| | MODEL_REPO="${MODEL_REPO:-Gerchegg/Qwen-Soloband-Diffusers}" |
| | SPACE_REPO="${SPACE_REPO:-Gerchegg/Qwen-ImageForFlo-Advanced}" |
| |
|
| | |
| | |
| | |
| |
|
| | log_info() { |
| | echo -e "${BLUE}[INFO]${NC} $1" |
| | } |
| |
|
| | log_success() { |
| | echo -e "${GREEN}[OK]${NC} $1" |
| | } |
| |
|
| | log_warning() { |
| | echo -e "${YELLOW}[WARN]${NC} $1" |
| | } |
| |
|
| | log_error() { |
| | echo -e "${RED}[ERROR]${NC} $1" |
| | } |
| |
|
| | print_header() { |
| | echo "" |
| | echo "╔════════════════════════════════════════════════════════════╗" |
| | echo "║ $1" |
| | echo "╚════════════════════════════════════════════════════════════╝" |
| | echo "" |
| | } |
| |
|
| | check_gpu() { |
| | if command -v nvidia-smi &> /dev/null; then |
| | log_info "Проверка GPU..." |
| | nvidia-smi --query-gpu=name,memory.total --format=csv,noheader |
| | log_success "GPU обнаружен" |
| | else |
| | log_warning "nvidia-smi не найден, пропускаем проверку GPU" |
| | fi |
| | } |
| |
|
| | |
| | |
| | |
| |
|
| | print_header "🛠️ ADVANCED INSTALLATION - Qwen-Soloband" |
| |
|
| | log_info "Параметры установки:" |
| | echo " WORKSPACE_DIR: $WORKSPACE_DIR" |
| | echo " PROJECT_DIR: $PROJECT_DIR" |
| | echo " LORA_DIR: $LORA_DIR" |
| | echo " MODEL_REPO: $MODEL_REPO" |
| | echo " SPACE_REPO: $SPACE_REPO" |
| | echo "" |
| |
|
| | |
| | |
| | |
| |
|
| | if [ -z "$HF_TOKEN" ]; then |
| | log_error "HF_TOKEN not set!" |
| | echo "" |
| | echo "Установите HF_TOKEN перед запуском:" |
| | echo " export HF_TOKEN='your_token_here'" |
| | echo "" |
| | echo "Или:" |
| | echo " HF_TOKEN='your_token_here' ./install_runpod.sh" |
| | echo "" |
| | echo "Получить токен: https://huggingface.co/settings/tokens" |
| | exit 1 |
| | fi |
| |
|
| | log_success "HF_TOKEN найден" |
| |
|
| | |
| | |
| | |
| |
|
| | log_info "Проверка системы..." |
| | echo " Python: $(python --version 2>&1)" |
| | echo " Pip: $(pip --version 2>&1)" |
| |
|
| | check_gpu |
| |
|
| | |
| | |
| | |
| |
|
| | print_header "📦 Установка Hugging Face CLI" |
| |
|
| | export HF_HUB_ENABLE_HF_TRANSFER=0 |
| | pip install -q --upgrade huggingface_hub[cli] |
| |
|
| | log_info "Авторизация в Hugging Face..." |
| | hf auth login --token "$HF_TOKEN" |
| | hf auth whoami |
| |
|
| | |
| | |
| | |
| |
|
| | print_header "📁 Создание структуры папок" |
| |
|
| | mkdir -p "$WORKSPACE_DIR" |
| | mkdir -p "$LORA_DIR" |
| | log_success "Папки созданы" |
| |
|
| | |
| | |
| | |
| |
|
| | print_header "⬇️ Скачивание проекта" |
| |
|
| | cd "$WORKSPACE_DIR" |
| |
|
| | if [ -d "$PROJECT_DIR" ]; then |
| | log_warning "Проект уже существует: $PROJECT_DIR" |
| | read -p "Перезаписать? (y/N) " -n 1 -r |
| | echo |
| | if [[ $REPLY =~ ^[Yy]$ ]]; then |
| | log_info "Обновление проекта..." |
| | cd "$PROJECT_DIR" |
| | hf download "$SPACE_REPO" --repo-type space --local-dir . |
| | else |
| | log_info "Пропускаем скачивание" |
| | cd "$PROJECT_DIR" |
| | fi |
| | else |
| | log_info "Скачивание $SPACE_REPO..." |
| | hf download "$SPACE_REPO" --repo-type space --local-dir "$(basename $PROJECT_DIR)" |
| | cd "$PROJECT_DIR" |
| | fi |
| |
|
| | log_success "Проект готов: $PROJECT_DIR" |
| |
|
| | |
| | |
| | |
| |
|
| | print_header "🐍 Установка Python зависимостей" |
| |
|
| | if [ -f "requirements.txt" ]; then |
| | log_info "Устанавливаем из requirements.txt..." |
| | pip install -r requirements.txt |
| | log_success "Зависимости установлены" |
| | else |
| | log_error "requirements.txt не найден!" |
| | exit 1 |
| | fi |
| |
|
| | |
| | |
| | |
| |
|
| | print_header "✅ Проверка установки" |
| |
|
| | log_info "Проверка импортов..." |
| | python -c "import torch; print(f' PyTorch: {torch.__version__}')" |
| | python -c "import gradio; print(f' Gradio: {gradio.__version__}')" |
| | python -c "from diffusers import DiffusionPipeline; print(' Diffusers: OK')" |
| | log_success "Все импорты работают" |
| |
|
| | |
| | |
| | |
| |
|
| | print_header "🎭 LoRA папка" |
| |
|
| | echo " Локальные LoRA будут загружены из: $LORA_DIR" |
| | echo "" |
| | echo " Положите ваши .safetensors файлы туда для автоматической загрузки" |
| | echo " Пример:" |
| | echo " $LORA_DIR/my_style.safetensors" |
| | echo "" |
| |
|
| | if [ -d "$LORA_DIR" ] && [ "$(ls -A $LORA_DIR/*.safetensors 2>/dev/null | wc -l)" -gt 0 ]; then |
| | log_info "Найденные LoRA файлы:" |
| | ls -lh "$LORA_DIR"/*.safetensors |
| | else |
| | log_info "LoRA файлы не найдены (это нормально)" |
| | fi |
| |
|
| | |
| | |
| | |
| |
|
| | print_header "🎉 УСТАНОВКА ЗАВЕРШЕНА!" |
| |
|
| | echo "🚀 Для запуска приложения:" |
| | echo "" |
| | echo " cd $PROJECT_DIR" |
| | echo " python app.py" |
| | echo "" |
| | echo "🎨 С кастомной моделью:" |
| | echo "" |
| | echo " MODEL_REPO='username/custom-model' python app.py" |
| | echo "" |
| | echo "📝 Доступные schedulers:" |
| | echo " - DPM++ 2M Karras (по умолчанию)" |
| | echo " - DPM++ SDE Karras" |
| | echo " - DPM++ 3M SDE Karras" |
| | echo " - Euler, Euler Ancestral" |
| | echo " - И другие (см. документацию)" |
| | echo "" |
| | echo "🎭 LoRA папка: $LORA_DIR" |
| | echo "" |
| | echo "📺 После запуска:" |
| | echo " RunPod Dashboard → Connect → HTTP Service [Port 7860]" |
| | echo "" |
| | echo "📚 Документация:" |
| | echo " README.md - Основная документация" |
| | echo " RUNPOD_DEPLOYMENT.md - Инструкции по развертыванию" |
| | echo "" |
| |
|
| | log_success "Готово к работе! 🎨" |
| |
|
| |
|
| |
|