stateofw commited on
Commit
12d573d
·
1 Parent(s): b6102fb

Fix HF Spaces build: add pre-requirements.txt and missing files

Browse files

- Add pre-requirements.txt with PyTorch and build dependencies
- Create missing workflow JSON files for ComfyUI
- Add packages.txt for system dependencies
- Update directory structure with .gitkeep files
- Clean up requirements.txt to avoid duplicates
- Make app.py executable
- All features preserved

.gitignore ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ env/
8
+ venv/
9
+ ENV/
10
+ .venv
11
+
12
+ # ComfyUI
13
+ comfyui/ComfyUI/
14
+ outputs/
15
+ temp/
16
+ *.safetensors
17
+ *.ckpt
18
+ *.pt
19
+ *.pth
20
+ *.bin
21
+
22
+ # Models (large files)
23
+ models/checkpoints/*
24
+ models/loras/*
25
+ models/vae/*
26
+ !models/checkpoints/.gitkeep
27
+ !models/loras/.gitkeep
28
+ !models/vae/.gitkeep
29
+
30
+ # Logs
31
+ *.log
32
+ *.out
33
+ *.err
34
+
35
+ # OS
36
+ .DS_Store
37
+ Thumbs.db
38
+
39
+ # IDE
40
+ .vscode/
41
+ .idea/
42
+ *.swp
43
+ *.swo
44
+
45
+ # Temporary
46
+ *.tmp
47
+ *.temp
48
+ *.cache
49
+
50
+ # Hugging Face
51
+ .huggingface/
README.md CHANGED
@@ -4,9 +4,10 @@ emoji: 📸
4
  colorFrom: purple
5
  colorTo: pink
6
  sdk: gradio
7
- sdk_version: "4.44.0"
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
  # 🎨 Unified UGC Platform - Hugging Face Space
 
4
  colorFrom: purple
5
  colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 4.44.0
8
  app_file: app.py
9
  pinned: false
10
+ license: apache-2.0
11
  ---
12
 
13
  # 🎨 Unified UGC Platform - Hugging Face Space
app.py CHANGED
File without changes
models/checkpoints/.gitkeep ADDED
File without changes
models/loras/.gitkeep ADDED
File without changes
models/vae/.gitkeep ADDED
File without changes
packages.txt CHANGED
@@ -1,7 +1,8 @@
1
- git
2
- git-lfs
3
- wget
4
- curl
5
  ffmpeg
6
  libsm6
7
- libxext6
 
 
 
 
 
 
 
 
 
 
1
  ffmpeg
2
  libsm6
3
+ libxext6
4
+ libgl1-mesa-glx
5
+ libglib2.0-0
6
+ libgomp1
7
+ git
8
+ git-lfs
pre-requirements.txt CHANGED
@@ -1,3 +1,23 @@
1
- git
2
- wget
3
- curl
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Pre-requirements for Hugging Face Spaces
2
+ # These packages need to be installed before other dependencies
3
+
4
+ # PyTorch with CUDA support (Hugging Face Spaces uses CUDA by default)
5
+ torch==2.1.0
6
+ torchvision==0.16.0
7
+ torchaudio==2.1.0
8
+
9
+ # Essential build tools
10
+ wheel>=0.40.0
11
+ setuptools>=68.0.0
12
+ pip>=23.0
13
+
14
+ # Required for some ComfyUI dependencies
15
+ cmake>=3.25.0
16
+ ninja>=1.11.0
17
+
18
+ # Image processing fundamentals
19
+ pillow>=10.0.0
20
+ numpy>=1.24.0
21
+
22
+ # Required for xformers build
23
+ triton>=2.1.0
requirements.txt CHANGED
@@ -1,13 +1,9 @@
1
  # Core dependencies
2
  gradio>=4.44.0
3
- pillow>=10.0.0
4
- numpy>=1.24.0
5
  requests>=2.31.0
6
  huggingface_hub>=0.19.0
7
 
8
  # ComfyUI dependencies
9
- torch>=2.0.0
10
- torchvision>=0.15.0
11
  transformers>=4.30.0
12
  diffusers>=0.21.0
13
  accelerate>=0.20.0
@@ -41,7 +37,6 @@ albumentations>=1.3.0
41
  # Additional ComfyUI dependencies
42
  aiohttp>=3.8.0
43
  colorama>=0.4.6
44
- torchaudio>=2.0.0
45
  torchsde>=0.2.5
46
  soundfile>=0.12.1
47
  librosa>=0.10.0
 
1
  # Core dependencies
2
  gradio>=4.44.0
 
 
3
  requests>=2.31.0
4
  huggingface_hub>=0.19.0
5
 
6
  # ComfyUI dependencies
 
 
7
  transformers>=4.30.0
8
  diffusers>=0.21.0
9
  accelerate>=0.20.0
 
37
  # Additional ComfyUI dependencies
38
  aiohttp>=3.8.0
39
  colorama>=0.4.6
 
40
  torchsde>=0.2.5
41
  soundfile>=0.12.1
42
  librosa>=0.10.0
setup.sh CHANGED
@@ -1,7 +1,10 @@
1
  #!/bin/bash
2
  # Setup script for Hugging Face Spaces
3
 
 
 
4
  echo "🚀 Setting up Unified UGC Platform..."
 
5
 
6
  # Create directory structure
7
  mkdir -p comfyui/ComfyUI
@@ -15,7 +18,7 @@ mkdir -p temp
15
  # Clone ComfyUI if not exists
16
  if [ ! -d "comfyui/ComfyUI/.git" ]; then
17
  echo "📦 Cloning ComfyUI..."
18
- git clone https://github.com/comfyanonymous/ComfyUI.git comfyui/ComfyUI
19
  fi
20
 
21
  # Install custom nodes for face enhancement
@@ -23,10 +26,14 @@ echo "📦 Installing custom nodes..."
23
  cd comfyui/ComfyUI/custom_nodes
24
 
25
  # FaceDetailer (from Impact Pack)
26
- git clone https://github.com/ltdrdata/ComfyUI-Impact-Pack.git
 
 
27
 
28
  # Face Restore CF
29
- git clone https://github.com/mav-rik/facerestore_cf.git
 
 
30
 
31
  # Install dependencies for custom nodes
32
  pip install ultralytics insightface onnxruntime
@@ -44,90 +51,10 @@ echo "📥 Downloading models..."
44
  # wget -c https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors -P models/checkpoints/
45
  # wget -c https://huggingface.co/strangerzonehf/Flux-Super-Realism-LoRA/resolve/main/super_realism.safetensors -P models/loras/
46
 
47
- # Create default workflows
48
- echo "📝 Creating default workflows..."
49
-
50
- # Organic Portrait Workflow
51
- cat > workflows/organic_portrait_workflow.json << 'EOF'
52
- {
53
- "1": {
54
- "class_type": "CheckpointLoaderSimple",
55
- "inputs": {
56
- "ckpt_name": "sd_xl_base_1.0.safetensors"
57
- }
58
- },
59
- "2": {
60
- "class_type": "CLIPTextEncode",
61
- "inputs": {
62
- "text": "portrait photo",
63
- "clip": ["1", 1]
64
- }
65
- },
66
- "3": {
67
- "class_type": "KSampler",
68
- "inputs": {
69
- "seed": 0,
70
- "steps": 28,
71
- "cfg": 6.0,
72
- "sampler_name": "ddim",
73
- "scheduler": "normal",
74
- "denoise": 1.0,
75
- "model": ["1", 0],
76
- "positive": ["2", 0],
77
- "negative": ["2", 0],
78
- "latent_image": ["4", 0]
79
- }
80
- },
81
- "4": {
82
- "class_type": "EmptyLatentImage",
83
- "inputs": {
84
- "width": 768,
85
- "height": 1024,
86
- "batch_size": 1
87
- }
88
- },
89
- "5": {
90
- "class_type": "VAEDecode",
91
- "inputs": {
92
- "samples": ["3", 0],
93
- "vae": ["1", 2]
94
- }
95
- },
96
- "6": {
97
- "class_type": "SaveImage",
98
- "inputs": {
99
- "images": ["5", 0],
100
- "filename_prefix": "ugc_output"
101
- }
102
- }
103
- }
104
- EOF
105
-
106
- # Face Enhancement Workflow
107
- cat > workflows/face_enhancement_workflow.json << 'EOF'
108
- {
109
- "1": {
110
- "class_type": "LoadImage",
111
- "inputs": {
112
- "image": "input.png"
113
- }
114
- },
115
- "2": {
116
- "class_type": "FaceDetailer",
117
- "inputs": {
118
- "image": ["1", 0],
119
- "model": "GFPGAN",
120
- "strength": 0.5
121
- }
122
- },
123
- "3": {
124
- "class_type": "SaveImage",
125
- "inputs": {
126
- "images": ["2", 0],
127
- "filename_prefix": "face_enhanced"
128
- }
129
- }
130
- }
131
- EOF
132
 
133
  echo "✅ Setup complete!"
 
1
  #!/bin/bash
2
  # Setup script for Hugging Face Spaces
3
 
4
+ set -e # Exit on error
5
+
6
  echo "🚀 Setting up Unified UGC Platform..."
7
+ echo "Working directory: $(pwd)"
8
 
9
  # Create directory structure
10
  mkdir -p comfyui/ComfyUI
 
18
  # Clone ComfyUI if not exists
19
  if [ ! -d "comfyui/ComfyUI/.git" ]; then
20
  echo "📦 Cloning ComfyUI..."
21
+ git clone --depth 1 https://github.com/comfyanonymous/ComfyUI.git comfyui/ComfyUI
22
  fi
23
 
24
  # Install custom nodes for face enhancement
 
26
  cd comfyui/ComfyUI/custom_nodes
27
 
28
  # FaceDetailer (from Impact Pack)
29
+ if [ ! -d "ComfyUI-Impact-Pack" ]; then
30
+ git clone --depth 1 https://github.com/ltdrdata/ComfyUI-Impact-Pack.git
31
+ fi
32
 
33
  # Face Restore CF
34
+ if [ ! -d "facerestore_cf" ]; then
35
+ git clone --depth 1 https://github.com/mav-rik/facerestore_cf.git
36
+ fi
37
 
38
  # Install dependencies for custom nodes
39
  pip install ultralytics insightface onnxruntime
 
51
  # wget -c https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors -P models/checkpoints/
52
  # wget -c https://huggingface.co/strangerzonehf/Flux-Super-Realism-LoRA/resolve/main/super_realism.safetensors -P models/loras/
53
 
54
+ # Workflows are already created, just check they exist
55
+ echo "📝 Checking workflows..."
56
+ if [ ! -f "workflows/organic_portrait_workflow.json" ]; then
57
+ echo "⚠️ Missing workflow files!"
58
+ fi
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
  echo "✅ Setup complete!"
workflows/face_enhancement_workflow.json CHANGED
@@ -1,59 +1,34 @@
1
  {
2
- "3": {
3
- "inputs": {
4
- "seed": -1,
5
- "steps": 30,
6
- "cfg": 6.0,
7
- "sampler_name": "euler",
8
- "scheduler": "normal",
9
- "denoise": 1,
10
- "model": ["4", 0],
11
- "positive": ["6", 0],
12
- "negative": ["7", 0],
13
- "latent_image": ["5", 0]
14
- },
15
- "class_type": "KSampler"
16
- },
17
- "4": {
18
- "inputs": {
19
- "ckpt_name": "flux1-dev.safetensors"
20
- },
21
- "class_type": "CheckpointLoaderSimple"
22
- },
23
- "5": {
24
- "inputs": {
25
- "width": 768,
26
- "height": 1024,
27
- "batch_size": 1
28
- },
29
- "class_type": "EmptyLatentImage"
30
- },
31
- "6": {
32
  "inputs": {
33
- "text": "professional photograph",
34
- "clip": ["4", 1]
35
- },
36
- "class_type": "CLIPTextEncode"
37
  },
38
- "7": {
 
39
  "inputs": {
40
- "text": "",
41
- "clip": ["4", 1]
42
- },
43
- "class_type": "CLIPTextEncode"
 
44
  },
45
- "8": {
 
46
  "inputs": {
47
- "samples": ["3", 0],
48
- "vae": ["4", 2]
49
- },
50
- "class_type": "VAEDecode"
 
51
  },
52
- "9": {
 
53
  "inputs": {
54
- "filename_prefix": "ComfyUI",
55
- "images": ["8", 0]
56
- },
57
- "class_type": "SaveImage"
58
  }
59
  }
 
1
  {
2
+ "1": {
3
+ "class_type": "LoadImage",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  "inputs": {
5
+ "image": "input.png",
6
+ "upload": "image"
7
+ }
 
8
  },
9
+ "2": {
10
+ "class_type": "FaceRestoreCFWithModel",
11
  "inputs": {
12
+ "facerestore_model": "GFPGANv1.4.pth",
13
+ "image": ["1", 0],
14
+ "facedetection": "retinaface_resnet50",
15
+ "codeformer_fidelity": 0.5
16
+ }
17
  },
18
+ "3": {
19
+ "class_type": "ImageBlend",
20
  "inputs": {
21
+ "image1": ["1", 0],
22
+ "image2": ["2", 0],
23
+ "blend_factor": 0.8,
24
+ "blend_mode": "normal"
25
+ }
26
  },
27
+ "4": {
28
+ "class_type": "SaveImage",
29
  "inputs": {
30
+ "images": ["3", 0],
31
+ "filename_prefix": "face_enhanced"
32
+ }
 
33
  }
34
  }
workflows/full_body_workflow.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "1": {
3
+ "class_type": "CheckpointLoaderSimple",
4
+ "inputs": {
5
+ "ckpt_name": "sd_xl_base_1.0.safetensors"
6
+ }
7
+ },
8
+ "2": {
9
+ "class_type": "CLIPTextEncode",
10
+ "inputs": {
11
+ "text": "full body portrait",
12
+ "clip": ["1", 1]
13
+ }
14
+ },
15
+ "3": {
16
+ "class_type": "CLIPTextEncode",
17
+ "inputs": {
18
+ "text": "blurry, low quality, distorted",
19
+ "clip": ["1", 1]
20
+ }
21
+ },
22
+ "4": {
23
+ "class_type": "EmptyLatentImage",
24
+ "inputs": {
25
+ "width": 576,
26
+ "height": 1024,
27
+ "batch_size": 1
28
+ }
29
+ },
30
+ "5": {
31
+ "class_type": "KSampler",
32
+ "inputs": {
33
+ "seed": 0,
34
+ "steps": 28,
35
+ "cfg": 6.0,
36
+ "sampler_name": "dpmpp_2m",
37
+ "scheduler": "karras",
38
+ "denoise": 1.0,
39
+ "model": ["1", 0],
40
+ "positive": ["2", 0],
41
+ "negative": ["3", 0],
42
+ "latent_image": ["4", 0]
43
+ }
44
+ },
45
+ "6": {
46
+ "class_type": "VAEDecode",
47
+ "inputs": {
48
+ "samples": ["5", 0],
49
+ "vae": ["1", 2]
50
+ }
51
+ },
52
+ "7": {
53
+ "class_type": "SaveImage",
54
+ "inputs": {
55
+ "images": ["6", 0],
56
+ "filename_prefix": "full_body"
57
+ }
58
+ }
59
+ }
workflows/organic_portrait_workflow.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "1": {
3
+ "class_type": "CheckpointLoaderSimple",
4
+ "inputs": {
5
+ "ckpt_name": "sd_xl_base_1.0.safetensors"
6
+ }
7
+ },
8
+ "2": {
9
+ "class_type": "CLIPTextEncode",
10
+ "inputs": {
11
+ "text": "portrait photo",
12
+ "clip": ["1", 1]
13
+ }
14
+ },
15
+ "3": {
16
+ "class_type": "CLIPTextEncode",
17
+ "inputs": {
18
+ "text": "blurry, low quality, anime, cartoon",
19
+ "clip": ["1", 1]
20
+ }
21
+ },
22
+ "4": {
23
+ "class_type": "EmptyLatentImage",
24
+ "inputs": {
25
+ "width": 768,
26
+ "height": 1024,
27
+ "batch_size": 1
28
+ }
29
+ },
30
+ "5": {
31
+ "class_type": "KSampler",
32
+ "inputs": {
33
+ "seed": 0,
34
+ "steps": 28,
35
+ "cfg": 6.0,
36
+ "sampler_name": "ddim",
37
+ "scheduler": "normal",
38
+ "denoise": 1.0,
39
+ "model": ["1", 0],
40
+ "positive": ["2", 0],
41
+ "negative": ["3", 0],
42
+ "latent_image": ["4", 0]
43
+ }
44
+ },
45
+ "6": {
46
+ "class_type": "VAEDecode",
47
+ "inputs": {
48
+ "samples": ["5", 0],
49
+ "vae": ["1", 2]
50
+ }
51
+ },
52
+ "7": {
53
+ "class_type": "SaveImage",
54
+ "inputs": {
55
+ "images": ["6", 0],
56
+ "filename_prefix": "ugc_output"
57
+ }
58
+ }
59
+ }
workflows/street_photo_workflow.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "1": {
3
+ "class_type": "CheckpointLoaderSimple",
4
+ "inputs": {
5
+ "ckpt_name": "sd_xl_base_1.0.safetensors"
6
+ }
7
+ },
8
+ "2": {
9
+ "class_type": "CLIPTextEncode",
10
+ "inputs": {
11
+ "text": "candid street photography",
12
+ "clip": ["1", 1]
13
+ }
14
+ },
15
+ "3": {
16
+ "class_type": "CLIPTextEncode",
17
+ "inputs": {
18
+ "text": "posed, studio lighting, professional",
19
+ "clip": ["1", 1]
20
+ }
21
+ },
22
+ "4": {
23
+ "class_type": "EmptyLatentImage",
24
+ "inputs": {
25
+ "width": 1024,
26
+ "height": 768,
27
+ "batch_size": 1
28
+ }
29
+ },
30
+ "5": {
31
+ "class_type": "KSampler",
32
+ "inputs": {
33
+ "seed": 0,
34
+ "steps": 28,
35
+ "cfg": 7.5,
36
+ "sampler_name": "euler_a",
37
+ "scheduler": "normal",
38
+ "denoise": 1.0,
39
+ "model": ["1", 0],
40
+ "positive": ["2", 0],
41
+ "negative": ["3", 0],
42
+ "latent_image": ["4", 0]
43
+ }
44
+ },
45
+ "6": {
46
+ "class_type": "VAEDecode",
47
+ "inputs": {
48
+ "samples": ["5", 0],
49
+ "vae": ["1", 2]
50
+ }
51
+ },
52
+ "7": {
53
+ "class_type": "SaveImage",
54
+ "inputs": {
55
+ "images": ["6", 0],
56
+ "filename_prefix": "street_photo"
57
+ }
58
+ }
59
+ }