Update models_config.json
Browse files- models_config.json +72 -17
models_config.json
CHANGED
|
@@ -1,19 +1,74 @@
|
|
| 1 |
{
|
| 2 |
-
"
|
| 3 |
-
"
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
}
|
|
|
|
| 1 |
{
|
| 2 |
+
"templates": {
|
| 3 |
+
"video": {
|
| 4 |
+
"description": "Video generation setup with Hunyuan models",
|
| 5 |
+
"diffusion_models": [
|
| 6 |
+
"https://huggingface.co/Kijai/HunyuanVideo_comfy/resolve/main/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors"
|
| 7 |
+
],
|
| 8 |
+
"unet": [
|
| 9 |
+
"https://huggingface.co/city96/FastHunyuan-gguf/resolve/main/fast-hunyuan-video-t2v-720p-Q4_K_M.gguf"
|
| 10 |
+
],
|
| 11 |
+
"vae": [
|
| 12 |
+
"https://huggingface.co/Kijai/HunyuanVideo_comfy/resolve/main/hunyuan_video_vae_bf16.safetensors"
|
| 13 |
+
],
|
| 14 |
+
"text_encoders": [
|
| 15 |
+
"https://huggingface.co/calcuis/hunyuan-gguf/resolve/main/clip_l.safetensors",
|
| 16 |
+
"https://huggingface.co/calcuis/hunyuan-gguf/resolve/main/llava_llama3_fp8_scaled.safetensors"
|
| 17 |
+
],
|
| 18 |
+
"loras": [
|
| 19 |
+
"https://huggingface.co/Kijai/HunyuanVideo_comfy/resolve/main/hyvideo_FastVideo_LoRA-fp8.safetensors",
|
| 20 |
+
"https://huggingface.co/Kijai/Leapfusion-image2vid-comfy/resolve/main/leapfusion_img2vid544p_comfy.safetensors"
|
| 21 |
+
]
|
| 22 |
+
},
|
| 23 |
+
"image": {
|
| 24 |
+
"description": "Image generation setup with FLUX models",
|
| 25 |
+
"upscale_models": [
|
| 26 |
+
"https://huggingface.co/uwg/upscaler/resolve/main/ESRGAN/8x_NMKD-Superscale_150000_G.pth"
|
| 27 |
+
],
|
| 28 |
+
"vae": [
|
| 29 |
+
"https://huggingface.co/black-forest-labs/FLUX.1-schnell/resolve/main/ae.safetensors"
|
| 30 |
+
],
|
| 31 |
+
"text_encoders": [
|
| 32 |
+
"https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/clip_l.safetensors",
|
| 33 |
+
"https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/t5xxl_fp8_e4m3fn.safetensors"
|
| 34 |
+
],
|
| 35 |
+
"checkpoints": [
|
| 36 |
+
"https://huggingface.co/Patarapoom/model/resolve/main/juggernautXL_juggXIByRundiffusion.safetensors"
|
| 37 |
+
],
|
| 38 |
+
"unet": [
|
| 39 |
+
"https://huggingface.co/Kijai/flux-fp8/resolve/main/flux1-dev-fp8.safetensors",
|
| 40 |
+
"https://huggingface.co/Patarapoom/model/resolve/main/flux1FillDevFp8_v10.safetensors"
|
| 41 |
+
],
|
| 42 |
+
"controlnet": [
|
| 43 |
+
"https://huggingface.co/xinsir/controlnet-union-sdxl-1.0/resolve/main/diffusion_pytorch_model.safetensors",
|
| 44 |
+
"https://huggingface.co/dim/Shakker-Labs_FLUX.1-dev-ControlNet-Union-Pro-fp8.safetensors/resolve/main/Shakker-Labs_FLUX.1-dev-ControlNet-Union-Pro-fp8.safetensors"
|
| 45 |
+
],
|
| 46 |
+
"clip_vision": [
|
| 47 |
+
"https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/model.safetensors",
|
| 48 |
+
"https://huggingface.co/Comfy-Org/sigclip_vision_384/resolve/main/sigclip_vision_patch14_384.safetensors"
|
| 49 |
+
],
|
| 50 |
+
"ipadapter": [
|
| 51 |
+
"https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/ip-adapter_sdxl_vit-h.safetensors",
|
| 52 |
+
"https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/ip-adapter-plus_sdxl_vit-h.safetensors",
|
| 53 |
+
"https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/ip-adapter_sdxl.safetensors"
|
| 54 |
+
],
|
| 55 |
+
"style_models": [
|
| 56 |
+
"https://huggingface.co/Patarapoom/model/resolve/main/flux1-redux-dev.safetensors"
|
| 57 |
+
],
|
| 58 |
+
"diffusion_models": []
|
| 59 |
+
},
|
| 60 |
+
"empty": {
|
| 61 |
+
"description": "Empty template with no model downloads",
|
| 62 |
+
"diffusion_models": [],
|
| 63 |
+
"unet": [],
|
| 64 |
+
"vae": [],
|
| 65 |
+
"text_encoders": [],
|
| 66 |
+
"upscale_models": [],
|
| 67 |
+
"checkpoints": [],
|
| 68 |
+
"controlnet": [],
|
| 69 |
+
"clip_vision": [],
|
| 70 |
+
"ipadapter": [],
|
| 71 |
+
"style_models": []
|
| 72 |
+
}
|
| 73 |
+
}
|
| 74 |
}
|