File size: 3,886 Bytes
68a219d
7e0cd58
 
 
 
03a026c
 
7e0cd58
 
 
 
 
 
 
 
 
 
656e238
 
 
 
44b1836
d220b97
 
de039d9
 
 
d220b97
7e0cd58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68a219d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
{
  "templates": {
    "video": {
      "description": "Video generation setup with Hunyuan models",
      "diffusion_models": [
        "https://huggingface.co/Kijai/HunyuanVideo_comfy/resolve/main/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors",
        "https://huggingface.co/Kijai/HunyuanVideo_comfy/resolve/main/FramePackI2V_HY_fp8_e4m3fn.safetensors"
      ],
      "vae": [
        "https://huggingface.co/Kijai/HunyuanVideo_comfy/resolve/main/hunyuan_video_vae_bf16.safetensors"
      ],
      "text_encoders": [
        "https://huggingface.co/calcuis/hunyuan-gguf/resolve/main/clip_l.safetensors",
        "https://huggingface.co/calcuis/hunyuan-gguf/resolve/main/llava_llama3_fp8_scaled.safetensors"
      ],
      "loras": [
        "https://huggingface.co/Kijai/HunyuanVideo_comfy/resolve/main/hyvideo_FastVideo_LoRA-fp8.safetensors",
        "https://huggingface.co/Kijai/Leapfusion-image2vid-comfy/resolve/main/leapfusion_img2vid544p_comfy.safetensors",
        "https://huggingface.co/Patarapoom/model/resolve/main/hunyuan_flat_color_v2.safetensors",
        "https://huggingface.co/Patarapoom/model/resolve/main/cinematik_flux50epoch.safetensors",
        "https://huggingface.co/Patarapoom/model/resolve/main/studio_ghibli_hv_v03_19.safetensors"
      ],
      "upscale_models": [
        "https://huggingface.co/uwg/upscaler/resolve/main/ESRGAN/4x_NMKD-Siax_200k.pth"
      ],
      "clip_vision": [
        "https://huggingface.co/Comfy-Org/sigclip_vision_384/resolve/main/sigclip_vision_patch14_384.safetensors"
      ]
    },
    "image": {
      "description": "Image generation setup with FLUX models",
      "upscale_models": [
        "https://huggingface.co/uwg/upscaler/resolve/main/ESRGAN/8x_NMKD-Superscale_150000_G.pth"
      ],
      "vae": [
        "https://huggingface.co/black-forest-labs/FLUX.1-schnell/resolve/main/ae.safetensors"
      ],
      "text_encoders": [
        "https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/clip_l.safetensors",
        "https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/t5xxl_fp8_e4m3fn.safetensors"
      ],
      "checkpoints": [
        "https://huggingface.co/Patarapoom/model/resolve/main/juggernautXL_juggXIByRundiffusion.safetensors"
      ],
      "unet": [
        "https://huggingface.co/Kijai/flux-fp8/resolve/main/flux1-dev-fp8.safetensors",
        "https://huggingface.co/Patarapoom/model/resolve/main/flux1FillDevFp8_v10.safetensors"
      ],
      "controlnet": [
        "https://huggingface.co/xinsir/controlnet-union-sdxl-1.0/resolve/main/diffusion_pytorch_model.safetensors",
        "https://huggingface.co/dim/Shakker-Labs_FLUX.1-dev-ControlNet-Union-Pro-fp8.safetensors/resolve/main/Shakker-Labs_FLUX.1-dev-ControlNet-Union-Pro-fp8.safetensors"
      ],
      "clip_vision": [
        "https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/model.safetensors",
        "https://huggingface.co/Comfy-Org/sigclip_vision_384/resolve/main/sigclip_vision_patch14_384.safetensors"
      ],
      "ipadapter": [
        "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/ip-adapter_sdxl_vit-h.safetensors",
        "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/ip-adapter-plus_sdxl_vit-h.safetensors",
        "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/ip-adapter_sdxl.safetensors"
      ],
      "style_models": [
        "https://huggingface.co/Patarapoom/model/resolve/main/flux1-redux-dev.safetensors"
      ],
      "diffusion_models": []
    },
    "empty": {
      "description": "Empty template with no model downloads",
      "diffusion_models": [],
      "unet": [],
      "vae": [],
      "text_encoders": [],
      "upscale_models": [],
      "checkpoints": [],
      "controlnet": [],
      "clip_vision": [],
      "ipadapter": [],
      "style_models": []
    }
  }
}