Upload 4 files
Browse files- MyRunpodFinetuneScripts.zip +3 -0
- claude_analysis.py +199 -0
- finetune_analysis.sh +187 -0
- finetune_pipeline.sh +107 -0
MyRunpodFinetuneScripts.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:09397578cae05f483a28b579add66fdf375b1f447a5948d88e208e782ede10b4
|
| 3 |
+
size 5866
|
claude_analysis.py
ADDED
|
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
# -*- coding: utf-8 -*-
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
import sys
|
| 6 |
+
import json
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
|
| 9 |
+
try:
|
| 10 |
+
import anthropic
|
| 11 |
+
except ImportError:
|
| 12 |
+
print("\nERROR: anthropic library is not installed")
|
| 13 |
+
print("To install, run: pip install anthropic")
|
| 14 |
+
print("or: python -m pip install anthropic\n")
|
| 15 |
+
print(json.dumps({"error": "Missing anthropic library. Please run: pip install anthropic"}))
|
| 16 |
+
sys.exit(1)
|
| 17 |
+
|
| 18 |
+
# Проверяем версию anthropic
|
| 19 |
+
try:
|
| 20 |
+
anthropic_version = anthropic.__version__
|
| 21 |
+
print(f"\nDEBUG - Using anthropic version: {anthropic_version}")
|
| 22 |
+
except:
|
| 23 |
+
print("\nWARNING - Could not determine anthropic version")
|
| 24 |
+
|
| 25 |
+
api_key = os.environ.get("ANTHROPIC_API_KEY", "")
|
| 26 |
+
api_key = "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"
|
| 27 |
+
if not api_key:
|
| 28 |
+
print(json.dumps({"error": "No ANTHROPIC_API_KEY provided"}))
|
| 29 |
+
sys.exit(0)
|
| 30 |
+
|
| 31 |
+
def get_txt_contents(local_path=None):
|
| 32 |
+
"""
|
| 33 |
+
Получает содержимое .txt файлов
|
| 34 |
+
"""
|
| 35 |
+
txt_contents = []
|
| 36 |
+
|
| 37 |
+
# Проверяем аргументы командной строки
|
| 38 |
+
if len(sys.argv) > 2 and sys.argv[1] == "--files-list":
|
| 39 |
+
try:
|
| 40 |
+
with open(sys.argv[2], 'r') as f:
|
| 41 |
+
txt_file_list = [line.strip() for line in f if line.strip()]
|
| 42 |
+
|
| 43 |
+
for f in txt_file_list:
|
| 44 |
+
try:
|
| 45 |
+
with open(f, 'r', encoding='utf-8') as ff:
|
| 46 |
+
txt_contents.append(ff.read())
|
| 47 |
+
except Exception as e:
|
| 48 |
+
print(f"DEBUG - Error reading {f}: {e}")
|
| 49 |
+
|
| 50 |
+
return txt_contents
|
| 51 |
+
except Exception as e:
|
| 52 |
+
print(f"DEBUG - Error reading files list: {e}")
|
| 53 |
+
|
| 54 |
+
# Если список файлов не передан, используем локальную папку
|
| 55 |
+
if local_path:
|
| 56 |
+
try:
|
| 57 |
+
path = Path(local_path)
|
| 58 |
+
if path.exists():
|
| 59 |
+
print(f"\nDEBUG - Reading files from local path: {path}")
|
| 60 |
+
for txt_file in path.glob("*.txt"):
|
| 61 |
+
try:
|
| 62 |
+
with open(txt_file, 'r', encoding='utf-8') as f:
|
| 63 |
+
txt_contents.append(f.read())
|
| 64 |
+
print(f"DEBUG - Read file: {txt_file.name}")
|
| 65 |
+
except Exception as e:
|
| 66 |
+
print(f"DEBUG - Error reading {txt_file}: {e}")
|
| 67 |
+
return txt_contents
|
| 68 |
+
except Exception as e:
|
| 69 |
+
print(f"\nDEBUG - Error accessing local path: {e}")
|
| 70 |
+
|
| 71 |
+
# Если локальная папка недоступна, используем аргументы командной строки
|
| 72 |
+
txt_file_list = sys.argv[1:]
|
| 73 |
+
if not txt_file_list:
|
| 74 |
+
return txt_contents
|
| 75 |
+
|
| 76 |
+
for f in txt_file_list:
|
| 77 |
+
try:
|
| 78 |
+
with open(f, 'r', encoding='utf-8') as ff:
|
| 79 |
+
txt_contents.append(ff.read())
|
| 80 |
+
except Exception as e:
|
| 81 |
+
print(f"DEBUG - Error reading {f}: {e}")
|
| 82 |
+
|
| 83 |
+
return txt_contents
|
| 84 |
+
|
| 85 |
+
# -----------------------------------------------------------------------------
|
| 86 |
+
# 1. Собираем список файлов
|
| 87 |
+
# -----------------------------------------------------------------------------
|
| 88 |
+
local_path = r"G:\My Drive\Kohya_SS\Flux\SoloBand\IconsGray"
|
| 89 |
+
txt_contents = get_txt_contents(local_path)
|
| 90 |
+
|
| 91 |
+
if not txt_contents:
|
| 92 |
+
print("\nDEBUG - No text files found in local path or arguments")
|
| 93 |
+
|
| 94 |
+
combined_text = "\n---\n".join(txt_contents)
|
| 95 |
+
|
| 96 |
+
# Получаем комментарии пользователя из переменной окружения
|
| 97 |
+
user_comments = os.environ.get("USER_COMMENTS", "")
|
| 98 |
+
|
| 99 |
+
# -----------------------------------------------------------------------------
|
| 100 |
+
# 2. Формируем промпт для Claude
|
| 101 |
+
# -----------------------------------------------------------------------------
|
| 102 |
+
prompt_content = f"""You are an AI art director specializing in creating cohesive visual styles. Analyze the input and generate a JSON response that defines a clear artistic direction.
|
| 103 |
+
|
| 104 |
+
Rules:
|
| 105 |
+
1. Always return ONLY valid JSON with no additional text
|
| 106 |
+
2. Keep style consistent across all prompts
|
| 107 |
+
3. Focus on visual elements, not story
|
| 108 |
+
4. Be specific and detailed in descriptions
|
| 109 |
+
5. Consider user's additional comments in style selection
|
| 110 |
+
|
| 111 |
+
Required JSON format:
|
| 112 |
+
{{
|
| 113 |
+
"token": "SB_AI",
|
| 114 |
+
"art_type": "Short descriptive name of art category (2-4 words)",
|
| 115 |
+
"style_name": "Clear art style description (3-5 words)",
|
| 116 |
+
"model_name": "SB_AI_art_type_V1",
|
| 117 |
+
"prompts": [
|
| 118 |
+
"6 detailed prompts that match art_type and style",
|
| 119 |
+
"Each 1-2 sentences, focusing on visual elements",
|
| 120 |
+
"Include colors, shapes, textures, composition",
|
| 121 |
+
"Keep consistent style across all prompts",
|
| 122 |
+
"Be specific about materials and techniques",
|
| 123 |
+
"Maintain same level of detail in each prompt"
|
| 124 |
+
]
|
| 125 |
+
}}
|
| 126 |
+
|
| 127 |
+
Note: For casual art style (SB_AI token):
|
| 128 |
+
- Use bright, vibrant colors
|
| 129 |
+
- Focus on everyday objects with playful twists
|
| 130 |
+
- Keep designs simple but appealing
|
| 131 |
+
- Add small decorative details
|
| 132 |
+
- Use smooth, rounded shapes
|
| 133 |
+
- Maintain light, cheerful mood
|
| 134 |
+
|
| 135 |
+
Input content to analyze:
|
| 136 |
+
---
|
| 137 |
+
{combined_text}
|
| 138 |
+
|
| 139 |
+
Additional user comments:
|
| 140 |
+
{user_comments}
|
| 141 |
+
"""
|
| 142 |
+
|
| 143 |
+
# Добавляем отладочный вывод промпта
|
| 144 |
+
print("\nDEBUG - Full prompt being sent to Claude:")
|
| 145 |
+
print("="*80)
|
| 146 |
+
print(prompt_content)
|
| 147 |
+
print("="*80)
|
| 148 |
+
|
| 149 |
+
# -----------------------------------------------------------------------------
|
| 150 |
+
# 3. Обращаемся к Anthropic (Claude) с указанной моделью
|
| 151 |
+
# -----------------------------------------------------------------------------
|
| 152 |
+
try:
|
| 153 |
+
client = anthropic.Anthropic(api_key=api_key)
|
| 154 |
+
message = client.messages.create(
|
| 155 |
+
model="claude-3-sonnet-20240229",
|
| 156 |
+
max_tokens=1024,
|
| 157 |
+
messages=[
|
| 158 |
+
{
|
| 159 |
+
"role": "user",
|
| 160 |
+
"content": prompt_content
|
| 161 |
+
}
|
| 162 |
+
],
|
| 163 |
+
temperature=0.7,
|
| 164 |
+
)
|
| 165 |
+
raw_reply = message.content[0].text
|
| 166 |
+
except Exception as e:
|
| 167 |
+
print(json.dumps({"error": f"Request to Claude failed: {str(e)}"}))
|
| 168 |
+
sys.exit(0)
|
| 169 |
+
|
| 170 |
+
# -----------------------------------------------------------------------------
|
| 171 |
+
# 4. Пытаемся интерпретировать ответ как JSON
|
| 172 |
+
# -----------------------------------------------------------------------------
|
| 173 |
+
try:
|
| 174 |
+
data = json.loads(raw_reply)
|
| 175 |
+
except:
|
| 176 |
+
# Если парсить напрямую не получается, отправим ошибку
|
| 177 |
+
print(json.dumps({"error": "Claude response is not valid JSON", "raw_reply": raw_reply}))
|
| 178 |
+
sys.exit(0)
|
| 179 |
+
|
| 180 |
+
# -----------------------------------------------------------------------------
|
| 181 |
+
# 5. (опционально) Проверяем, что в ответе есть нужные поля
|
| 182 |
+
# -----------------------------------------------------------------------------
|
| 183 |
+
token = data.get("token", "SB_AI")
|
| 184 |
+
art_type = data.get("art_type", "UnknownArtType")
|
| 185 |
+
style_name = data.get("style_name", "UnknownStyle")
|
| 186 |
+
model_name = data.get("model_name", f"{token}_{art_type}_V1")
|
| 187 |
+
prompts = data.get("prompts", [])
|
| 188 |
+
|
| 189 |
+
# -----------------------------------------------------------------------------
|
| 190 |
+
# 6. Выводим финальный JSON в stdout
|
| 191 |
+
# -----------------------------------------------------------------------------
|
| 192 |
+
out = {
|
| 193 |
+
"token": token,
|
| 194 |
+
"art_type": art_type,
|
| 195 |
+
"style_name": style_name,
|
| 196 |
+
"model_name": model_name,
|
| 197 |
+
"prompts": prompts
|
| 198 |
+
}
|
| 199 |
+
print(json.dumps(out, ensure_ascii=False))
|
finetune_analysis.sh
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
set -e
|
| 3 |
+
|
| 4 |
+
echo ">>> [Script 2] This script will handle Claude analysis and config update."
|
| 5 |
+
|
| 6 |
+
###############################################################################
|
| 7 |
+
# 1. Ожидание нажатия Enter и получение дополнительных комментариев
|
| 8 |
+
###############################################################################
|
| 9 |
+
|
| 10 |
+
echo ">>> [Script 2] Введите дополнительные комментарии по поводу арта (или нажмите Enter, чтобы пропустить):"
|
| 11 |
+
read USER_COMMENTS
|
| 12 |
+
|
| 13 |
+
###############################################################################
|
| 14 |
+
# 2. Распаковываем все .zip в /workspace/MyLearningDataset/Images
|
| 15 |
+
###############################################################################
|
| 16 |
+
echo ">>> [Script 2] Unzipping all .zip in /workspace/MyLearningDataset/Images ..."
|
| 17 |
+
find /workspace/MyLearningDataset/Images -type f -name '*.zip' | while read zipf; do
|
| 18 |
+
unzip -o "$zipf" -d /workspace/MyLearningDataset/Images
|
| 19 |
+
rm -f "$zipf"
|
| 20 |
+
done
|
| 21 |
+
|
| 22 |
+
###############################################################################
|
| 23 |
+
# 3. Собираем до 35 .txt файлов
|
| 24 |
+
###############################################################################
|
| 25 |
+
echo ">>> [Script 2] Collecting up to 35 .txt files ..."
|
| 26 |
+
TXT_FILES=$(find /workspace/MyLearningDataset/Images -type f -name '*.txt' | head -n 35)
|
| 27 |
+
|
| 28 |
+
if [ -z "$TXT_FILES" ]; then
|
| 29 |
+
echo "[Script 2] WARNING: No .txt files found (up to 35)."
|
| 30 |
+
fi
|
| 31 |
+
|
| 32 |
+
###############################################################################
|
| 33 |
+
# 4. Устанавливаем библиотеку anthropic (если не установлена)
|
| 34 |
+
# И указываем ANTHROPIC_API_KEY
|
| 35 |
+
###############################################################################
|
| 36 |
+
echo ">>> [Script 2] Installing anthropic library (if needed) ..."
|
| 37 |
+
pip install anthropic
|
| 38 |
+
|
| 39 |
+
# Подставьте ваш реальный ключ, если нужно
|
| 40 |
+
ANTHROPIC_API_KEY="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"
|
| 41 |
+
export ANTHROPIC_API_KEY
|
| 42 |
+
|
| 43 |
+
###############################################################################
|
| 44 |
+
# 5. Запускаем Python-скрипт claude_analysis.py, передавая список файлов
|
| 45 |
+
###############################################################################
|
| 46 |
+
echo ">>> [Script 2] Sending request to Claude via claude_analysis.py ..."
|
| 47 |
+
# Сохраняем список файлов во временный файл
|
| 48 |
+
echo "$TXT_FILES" > /tmp/txt_files_list.txt
|
| 49 |
+
|
| 50 |
+
# Передаем комментарии через переменную окружения
|
| 51 |
+
export USER_COMMENTS="$USER_COMMENTS"
|
| 52 |
+
|
| 53 |
+
# Получаем ответ от скрипта и извлекаем только валидный JSON
|
| 54 |
+
PARSED_JSON=$(python /workspace/claude_analysis.py --files-list /tmp/txt_files_list.txt 2>/dev/null | grep -o '{"token".*}' || true)
|
| 55 |
+
|
| 56 |
+
# Выводим полный ответ для отладки
|
| 57 |
+
echo ">>> [Script 2] Raw response from claude_analysis.py:"
|
| 58 |
+
echo "$PARSED_JSON"
|
| 59 |
+
echo ">>> [Script 2] End of raw response"
|
| 60 |
+
echo
|
| 61 |
+
|
| 62 |
+
# Очистка временного файла
|
| 63 |
+
rm -f /tmp/txt_files_list.txt
|
| 64 |
+
|
| 65 |
+
if [ -z "$PARSED_JSON" ]; then
|
| 66 |
+
echo "[Script 2] ERROR: Claude response is empty or not found."
|
| 67 |
+
exit 1
|
| 68 |
+
fi
|
| 69 |
+
|
| 70 |
+
# Проверка на ошибку (на всякий случай)
|
| 71 |
+
if [[ "$PARSED_JSON" == *"error"* ]]; then
|
| 72 |
+
echo "[Script 2] ERROR: JSON parse problem. See logs."
|
| 73 |
+
echo "$PARSED_JSON"
|
| 74 |
+
exit 1
|
| 75 |
+
fi
|
| 76 |
+
|
| 77 |
+
###############################################################################
|
| 78 |
+
# 6. Из полученного JSON извлекаем нужные поля и даем возможность их отредактировать
|
| 79 |
+
###############################################################################
|
| 80 |
+
# Сохраняем JSON во временный файл для редактирования
|
| 81 |
+
TMP_JSON="/tmp/claude_response.json"
|
| 82 |
+
echo "$PARSED_JSON" > "$TMP_JSON"
|
| 83 |
+
|
| 84 |
+
echo ">>> [Script 2] Сохранен файл с результатами анализа: $TMP_JSON"
|
| 85 |
+
echo ">>> [Script 2] Текущие значения:"
|
| 86 |
+
echo "----------------------------------------"
|
| 87 |
+
echo "token = $(echo "$PARSED_JSON" | python -c 'import sys, json; print(json.load(sys.stdin)["token"])')"
|
| 88 |
+
echo "art_type = $(echo "$PARSED_JSON" | python -c 'import sys, json; print(json.load(sys.stdin)["art_type"])')"
|
| 89 |
+
echo "style_name = $(echo "$PARSED_JSON" | python -c 'import sys, json; print(json.load(sys.stdin)["style_name"])')"
|
| 90 |
+
echo "model_name = $(echo "$PARSED_JSON" | python -c 'import sys, json; print(json.load(sys.stdin)["model_name"])')"
|
| 91 |
+
echo "prompts:"
|
| 92 |
+
echo "$PARSED_JSON" | python -c 'import sys, json; print("\n".join(json.load(sys.stdin)["prompts"]))'
|
| 93 |
+
echo "----------------------------------------"
|
| 94 |
+
|
| 95 |
+
echo ">>> [Script 2] Пожалуйста, проверьте и отредактируйте файл если нужно: $TMP_JSON"
|
| 96 |
+
echo ">>> [Script 2] После завершения редактирования нажмите Enter для продолжения..."
|
| 97 |
+
read
|
| 98 |
+
|
| 99 |
+
# Читаем обновленный JSON
|
| 100 |
+
PARSED_JSON=$(cat "$TMP_JSON")
|
| 101 |
+
|
| 102 |
+
# Проверяем валидность JSON
|
| 103 |
+
if ! echo "$PARSED_JSON" | python -c 'import sys,json; json.load(sys.stdin)' >/dev/null 2>&1; then
|
| 104 |
+
echo "[Script 2] ERROR: Файл содержит невалидный JSON. Пожалуйста, исправьте и попробуйте снова."
|
| 105 |
+
exit 1
|
| 106 |
+
fi
|
| 107 |
+
|
| 108 |
+
# Извлекаем значения из обновленного JSON
|
| 109 |
+
TOKEN=$(echo "$PARSED_JSON" | python -c 'import sys, json; d=json.load(sys.stdin); print(d["token"])')
|
| 110 |
+
ART_TYPE=$(echo "$PARSED_JSON" | python -c 'import sys, json; d=json.load(sys.stdin); print(d["art_type"])')
|
| 111 |
+
STYLE_NAME=$(echo "$PARSED_JSON" | python -c 'import sys, json; d=json.load(sys.stdin); print(d["style_name"])')
|
| 112 |
+
MODEL_NAME=$(echo "$PARSED_JSON" | python -c 'import sys, json; d=json.load(sys.stdin); print(d["model_name"])')
|
| 113 |
+
|
| 114 |
+
# Многострочные промпты
|
| 115 |
+
PROMPTS=$(echo "$PARSED_JSON" | python -c '
|
| 116 |
+
import sys, json
|
| 117 |
+
d=json.load(sys.stdin)
|
| 118 |
+
prompts = d.get("prompts", [])
|
| 119 |
+
print("\n".join(prompts))
|
| 120 |
+
')
|
| 121 |
+
|
| 122 |
+
echo ">>> [Script 2] Обновленные значения:"
|
| 123 |
+
echo "token = $TOKEN"
|
| 124 |
+
echo "art_type = $ART_TYPE"
|
| 125 |
+
echo "style_name = $STYLE_NAME"
|
| 126 |
+
echo "model_name = $MODEL_NAME"
|
| 127 |
+
echo "prompts:"
|
| 128 |
+
echo "$PROMPTS"
|
| 129 |
+
echo
|
| 130 |
+
|
| 131 |
+
echo ">>> [Script 2] Нажмите Enter для продолжения или Ctrl+C для отмены..."
|
| 132 |
+
read
|
| 133 |
+
|
| 134 |
+
# Очистка временного файла
|
| 135 |
+
rm -f "$TMP_JSON"
|
| 136 |
+
|
| 137 |
+
###############################################################################
|
| 138 |
+
# 7. Переносим .txt и .png файлы в папку /workspace/MyLearningDataset/Images/1_{model_name}_{style_name}
|
| 139 |
+
###############################################################################
|
| 140 |
+
NEW_FOLDER="/workspace/MyLearningDataset/Images/1_${MODEL_NAME}_${STYLE_NAME}"
|
| 141 |
+
mkdir -p "$NEW_FOLDER"
|
| 142 |
+
|
| 143 |
+
echo ">>> [Script 2] Moving all .txt and .png files into $NEW_FOLDER ..."
|
| 144 |
+
find /workspace/MyLearningDataset/Images -type f \( -name '*.txt' -o -name '*.png' \) -exec mv -f {} "$NEW_FOLDER" \; 2>/dev/null || true
|
| 145 |
+
|
| 146 |
+
###############################################################################
|
| 147 |
+
# 8. Ищем FluxDatasetConfig.json и редактируем нужные поля
|
| 148 |
+
###############################################################################
|
| 149 |
+
FLUX_CONFIG_PATH=$(find /workspace -name "FluxDatasetConfig.json" | head -n 1)
|
| 150 |
+
if [ -z "$FLUX_CONFIG_PATH" ]; then
|
| 151 |
+
echo "[Script 2] ERROR: FluxDatasetConfig.json not found!"
|
| 152 |
+
exit 1
|
| 153 |
+
fi
|
| 154 |
+
|
| 155 |
+
echo ">>> [Script 2] Updating FluxDatasetConfig.json at $FLUX_CONFIG_PATH ..."
|
| 156 |
+
cat <<EOF > /workspace/update_flux_config.py
|
| 157 |
+
import json
|
| 158 |
+
|
| 159 |
+
path = r"${FLUX_CONFIG_PATH}"
|
| 160 |
+
with open(path, "r", encoding="utf-8") as f:
|
| 161 |
+
config = json.load(f)
|
| 162 |
+
|
| 163 |
+
config["train_data_dir"] = "/workspace/MyLearningDataset/Images"
|
| 164 |
+
config["output_dir"] = "/workspace/MyLearningDataset/Models"
|
| 165 |
+
config["output_name"] = "${MODEL_NAME}"
|
| 166 |
+
config["huggingface_repo_id"] = "Gerchegg/${MODEL_NAME}"
|
| 167 |
+
config["logging_dir"] = "/workspace/MyLearningDataset/Logs"
|
| 168 |
+
|
| 169 |
+
# Форматируем промпты с дополнительными параметрами
|
| 170 |
+
formatted_prompts = []
|
| 171 |
+
negative_params = "--n low quality, worst quality, bad anatomy, bad composition, poor, low effort --w 1024 --h 1024 --d 1 --l 3 --s 20"
|
| 172 |
+
|
| 173 |
+
for prompt in """${PROMPTS}""".split('\n'):
|
| 174 |
+
if prompt.strip():
|
| 175 |
+
formatted_prompt = f"${TOKEN}, {prompt.strip()} {negative_params}"
|
| 176 |
+
formatted_prompts.append(formatted_prompt)
|
| 177 |
+
|
| 178 |
+
# Объединяем промпты в строку с переносами строк
|
| 179 |
+
config["sample_prompts"] = "\n".join(formatted_prompts)
|
| 180 |
+
|
| 181 |
+
with open(path, "w", encoding="utf-8") as f:
|
| 182 |
+
json.dump(config, f, ensure_ascii=False, indent=2)
|
| 183 |
+
EOF
|
| 184 |
+
|
| 185 |
+
python /workspace/update_flux_config.py
|
| 186 |
+
|
| 187 |
+
echo ">>> [Script 2] Done. Analysis complete!"
|
finetune_pipeline.sh
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
set -e
|
| 3 |
+
|
| 4 |
+
###############################################################################
|
| 5 |
+
# 0. Подготовка среды: обновление apt, установка unzip
|
| 6 |
+
###############################################################################
|
| 7 |
+
echo ">>> [Script 1] Step 0: Installing system dependencies ..."
|
| 8 |
+
apt-get update && apt-get install -y unzip
|
| 9 |
+
|
| 10 |
+
###############################################################################
|
| 11 |
+
# 1. Скачиваем zip-файл с Hugging Face и распаковываем
|
| 12 |
+
###############################################################################
|
| 13 |
+
echo ">>> [Script 1] Step 1: Downloading MyRunpodFinetuneScripts.zip ..."
|
| 14 |
+
wget -O /workspace/MyRunpodFinetuneScripts.zip \
|
| 15 |
+
"https://huggingface.co/Gerchegg/FeaturesFluxAI/resolve/main/MyRunpodFinetuneScripts.zip"
|
| 16 |
+
|
| 17 |
+
echo ">>> [Script 1] Step 2: Unzipping MyRunpodFinetuneScripts.zip ..."
|
| 18 |
+
unzip -o /workspace/MyRunpodFinetuneScripts.zip -d /workspace
|
| 19 |
+
# Предполагается, что внутри появится папка Kohya_Flux*** с нужными файлами.
|
| 20 |
+
|
| 21 |
+
###############################################################################
|
| 22 |
+
# 2. Запускаем два "параллельных" потока (фона):
|
| 23 |
+
# - Thread 1: Устанавливает пакеты + python Download_Train_Models.py
|
| 24 |
+
# - Thread 2: Устанавливает нужные deps (kohya_ss, torch, xformers), запускает GUI
|
| 25 |
+
###############################################################################
|
| 26 |
+
echo ">>> [Script 1] Starting parallel threads ..."
|
| 27 |
+
|
| 28 |
+
# ------------------ Поток 1 --------------------
|
| 29 |
+
(
|
| 30 |
+
echo ">>> [Thread 1] Installing Python packages for huggingface ..."
|
| 31 |
+
pip install huggingface_hub ipywidgets hf_transfer
|
| 32 |
+
|
| 33 |
+
export HF_HUB_ENABLE_HF_TRANSFER=1
|
| 34 |
+
|
| 35 |
+
echo ">>> [Thread 1] Running Download_Train_Models.py ..."
|
| 36 |
+
dwn_path=$(find /workspace -name "Download_Train_Models.py" | head -n 1)
|
| 37 |
+
if [ -z "$dwn_path" ]; then
|
| 38 |
+
echo "[Thread 1] ERROR: Download_Train_Models.py not found!"
|
| 39 |
+
exit 1
|
| 40 |
+
fi
|
| 41 |
+
|
| 42 |
+
python "$dwn_path" --dir /workspace
|
| 43 |
+
|
| 44 |
+
echo ">>> [Thread 1] Done."
|
| 45 |
+
) &
|
| 46 |
+
|
| 47 |
+
# ------------------ Поток 2 --------------------
|
| 48 |
+
(
|
| 49 |
+
echo ">>> [Thread 2] Installing Python and OS dependencies ..."
|
| 50 |
+
apt update --yes
|
| 51 |
+
yes | apt-get install python3.10-tk
|
| 52 |
+
apt-get install psmisc --yes
|
| 53 |
+
|
| 54 |
+
echo ">>> [Thread 2] Cloning kohya_ss ..."
|
| 55 |
+
cd /workspace
|
| 56 |
+
if [ ! -d "/workspace/kohya_ss" ]; then
|
| 57 |
+
git clone https://github.com/bmaltais/kohya_ss.git
|
| 58 |
+
fi
|
| 59 |
+
|
| 60 |
+
cd /workspace/kohya_ss
|
| 61 |
+
git checkout sd3-flux.1
|
| 62 |
+
|
| 63 |
+
echo ">>> [Thread 2] Creating Python venv ..."
|
| 64 |
+
python3 -m venv venv
|
| 65 |
+
source venv/bin/activate
|
| 66 |
+
yes | apt-get install python3.10-tk
|
| 67 |
+
|
| 68 |
+
echo ">>> [Thread 2] Running setup.sh ..."
|
| 69 |
+
./setup.sh -n -u
|
| 70 |
+
|
| 71 |
+
apt update --yes
|
| 72 |
+
yes | apt-get install python3.10-tk
|
| 73 |
+
apt-get install psmisc --yes
|
| 74 |
+
|
| 75 |
+
pip install hf_transfer
|
| 76 |
+
export HF_HUB_ENABLE_HF_TRANSFER=0
|
| 77 |
+
|
| 78 |
+
echo ">>> [Thread 2] Killing processes on port 7860 ..."
|
| 79 |
+
fuser -k 7860/tcp || true
|
| 80 |
+
|
| 81 |
+
git checkout sd3-flux.1
|
| 82 |
+
source venv/bin/activate
|
| 83 |
+
|
| 84 |
+
echo ">>> [Thread 2] Updating torch & xformers ..."
|
| 85 |
+
pip uninstall -y xformers
|
| 86 |
+
pip install torch==2.5.1+cu124 torchvision --index-url https://download.pytorch.org/whl/cu124
|
| 87 |
+
pip install xformers==0.0.28.post3 --index-url https://download.pytorch.org/whl/cu124
|
| 88 |
+
|
| 89 |
+
echo ">>> [Thread 2] Launching kohya_ss GUI on 0.0.0.0:7860 ..."
|
| 90 |
+
./gui.sh --listen=0.0.0.0 --share --noverify
|
| 91 |
+
) &
|
| 92 |
+
|
| 93 |
+
###############################################################################
|
| 94 |
+
# 3. Создаём общую структуру папок (Models, Images, Logs)
|
| 95 |
+
# по заданию, делаем это в первой части
|
| 96 |
+
###############################################################################
|
| 97 |
+
echo ">>> [Script 1] Creating folder structure in /workspace/MyLearningDataset ..."
|
| 98 |
+
mkdir -p /workspace/MyLearningDataset/Models
|
| 99 |
+
mkdir -p /workspace/MyLearningDataset/Images
|
| 100 |
+
mkdir -p /workspace/MyLearningDataset/Logs
|
| 101 |
+
|
| 102 |
+
###############################################################################
|
| 103 |
+
# 4. Ожидаем завершения потоков
|
| 104 |
+
###############################################################################
|
| 105 |
+
echo ">>> [Script 1] Waiting for threads to finish ..."
|
| 106 |
+
wait
|
| 107 |
+
echo ">>> [Script 1] ALL DONE! Now run 'finetune_analysis.sh' to proceed."
|