File size: 2,929 Bytes
920b548
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
#!/bin/bash
# Ensure directories exist (relative to script location, e.g., repo root)
mkdir -p ./models/checkpoints
mkdir -p ./models/controlnet
mkdir -p ./models/embeddings
mkdir -p ./models/loras
mkdir -p ./models/motion
mkdir -p ./models/text_encoders
mkdir -p ./models/upscalers
mkdir -p ./output
mkdir -p ./workflows

# Install Hugging Face CLI if not present
if ! command -v huggingface-cli &> /dev/null; then
    pip install huggingface_hub
fi

# Set Hugging Face API token (via environment variable or default for testing)
export HF_TOKEN=${HF_TOKEN:-"your_huggingface_api_token"}  # Replace or set in SimplePod env vars

# Download all models from their original Hugging Face sources to your repo
echo "Downloading models from original Hugging Face repositories..."
# Stable Diffusion 3.5 Base (requires agreement, use token)
huggingface-cli download --token ${HF_TOKEN} stabilityai/stable-diffusion-3.5-large sd3.5_large.safetensors --local-dir ./models/checkpoints

# Dance LoRA (replace with your specific LoRA URL on Hugging Face or Civitai)
huggingface-cli download --token ${HF_TOKEN} your-username/dance-lora dance_lora.safetensors --local-dir ./models/loras
# If the LoRA is on Civitai, use curl/wget with the direct URL (adjust for authentication if needed):
# curl -L -o ./models/loras/dance_lora.safetensors "https://civitai.com/api/download/<lora-id>"

# AnimateDiff for Motion
huggingface-cli download --token ${HF_TOKEN} guoyww/animatediff mm_sd_v15_v2.ckpt --local-dir ./models/motion

# VAE (optional, for better quality)
huggingface-cli download --token ${HF_TOKEN} stabilityai/sd-vae-ft-mse-original vae-ft-mse-840000-ema-pruned.safetensors --local-dir ./models/vae

# ControlNet Models (optional, for pose/motion)
huggingface-cli download --token ${HF_TOKEN} lllyasviel/ControlNet-v1-1 control_v11p_sd15_openpose.pth --local-dir ./models/controlnet
huggingface-cli download --token ${HF_TOKEN} lllyasviel/ControlNet-v1-1 control_v11p_sd15_canny.pth --local-dir ./models/controlnet

# Install ComfyUI custom nodes for SD 3.5 and motion (if needed)
echo "Installing ComfyUI custom nodes for SD 3.5 and motion..."
pip install git+https://github.com/civitai/sd3-comfyui.git
git clone https://github.com/Kosinkadink/ComfyUI-VideoHelperSuite.git ./models/ComfyUI/custom_nodes/ComfyUI-VideoHelperSuite

# Ensure Rclone is installed (likely pre-installed in ai-dock/comfyui)
if ! command -v rclone &> /dev/null; then
    curl https://rclone.org/install.sh | sudo bash
fi

# Sync outputs from R2 on startup (optional, can be run later)
rclone copy r2bucket:output ./output --create-empty-src-dirs

# Start background sync loop for outputs to R2
nohup bash -c "while true; do rclone sync ./output r2bucket:output; sleep 3600; done" &

# Note: ComfyUI launch is not run locally; this is for SimplePod deployment
echo "Models and structure ready for ComfyUI deployment. Push to Hugging Face and use in SimplePod."