Upload 72 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- wan-gp-defaults.json/ReadMe.txt +13 -0
- wan-gp-defaults.json/alpha.json +19 -0
- wan-gp-defaults.json/alpha_sf.json +17 -0
- wan-gp-defaults.json/animate.json +17 -0
- wan-gp-defaults.json/chatterbox.json +18 -0
- wan-gp-defaults.json/fantasy.json +11 -0
- wan-gp-defaults.json/flf2v_720p.json +16 -0
- wan-gp-defaults.json/flux.json +15 -0
- wan-gp-defaults.json/flux_chroma.json +17 -0
- wan-gp-defaults.json/flux_dev_kontext.json +16 -0
- wan-gp-defaults.json/flux_dev_kontext_dreamomni2.json +19 -0
- wan-gp-defaults.json/flux_dev_umo.json +23 -0
- wan-gp-defaults.json/flux_dev_uso.json +16 -0
- wan-gp-defaults.json/flux_krea.json +15 -0
- wan-gp-defaults.json/flux_schnell.json +16 -0
- wan-gp-defaults.json/flux_srpo.json +14 -0
- wan-gp-defaults.json/flux_srpo_uso.json +16 -0
- wan-gp-defaults.json/fun_inp.json +13 -0
- wan-gp-defaults.json/fun_inp_1.3B.json +11 -0
- wan-gp-defaults.json/hunyuan.json +12 -0
- wan-gp-defaults.json/hunyuan_avatar.json +12 -0
- wan-gp-defaults.json/hunyuan_custom.json +12 -0
- wan-gp-defaults.json/hunyuan_custom_audio.json +12 -0
- wan-gp-defaults.json/hunyuan_custom_edit.json +12 -0
- wan-gp-defaults.json/hunyuan_i2v.json +12 -0
- wan-gp-defaults.json/hunyuan_t2v_accvideo.json +30 -0
- wan-gp-defaults.json/hunyuan_t2v_fast.json +32 -0
- wan-gp-defaults.json/i2v.json +13 -0
- wan-gp-defaults.json/i2v_2_2.json +25 -0
- wan-gp-defaults.json/i2v_2_2_multitalk.json +18 -0
- wan-gp-defaults.json/i2v_720p.json +14 -0
- wan-gp-defaults.json/i2v_fusionix.json +11 -0
- wan-gp-defaults.json/infinitetalk.json +16 -0
- wan-gp-defaults.json/infinitetalk_multi.json +16 -0
- wan-gp-defaults.json/ltxv_13B.json +19 -0
- wan-gp-defaults.json/ltxv_distilled.json +15 -0
- wan-gp-defaults.json/lucy_edit.json +20 -0
- wan-gp-defaults.json/lucy_edit_fastwan.json +17 -0
- wan-gp-defaults.json/lynx.json +18 -0
- wan-gp-defaults.json/moviigen.json +16 -0
- wan-gp-defaults.json/multitalk.json +15 -0
- wan-gp-defaults.json/multitalk_720p.json +13 -0
- wan-gp-defaults.json/phantom_1.3B.json +11 -0
- wan-gp-defaults.json/phantom_14B.json +13 -0
- wan-gp-defaults.json/qwen_image_20B.json +21 -0
- wan-gp-defaults.json/qwen_image_edit_20B.json +18 -0
- wan-gp-defaults.json/qwen_image_edit_plus_20B.json +17 -0
- wan-gp-defaults.json/recam_1.3B.json +11 -0
- wan-gp-defaults.json/sky_df_1.3B.json +11 -0
- wan-gp-defaults.json/sky_df_14B.json +13 -0
wan-gp-defaults.json/ReadMe.txt
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Please dot not modify any file in this Folder.
|
| 2 |
+
|
| 3 |
+
If you want to change a property of a default model, copy the corrresponding model file in the ./finetunes folder and modify the properties you want to change in the new file.
|
| 4 |
+
If a property is not in the new file, it will be inherited automatically from the default file that matches the same name file.
|
| 5 |
+
|
| 6 |
+
For instance to hide a model:
|
| 7 |
+
|
| 8 |
+
{
|
| 9 |
+
"model":
|
| 10 |
+
{
|
| 11 |
+
"visible": false
|
| 12 |
+
}
|
| 13 |
+
}
|
wan-gp-defaults.json/alpha.json
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "Wan2.1 Alpha 14B",
|
| 5 |
+
"architecture" : "alpha",
|
| 6 |
+
"description": "This model successfully generates various scenes with accurate and clearly rendered transparency. Notably, it can synthesize diverse semi-transparent objects, glowing effects, and fine-grained details such as hair. For each video generated you will find a Zip file with the same name that will contain the corresponding RGBA images.",
|
| 7 |
+
"URLs": "t2v",
|
| 8 |
+
"preload_URLs": [
|
| 9 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan_alpha_2.1_vae_rgb_channel.safetensors",
|
| 10 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan_alpha_2.1_vae_alpha_channel.safetensors"
|
| 11 |
+
],
|
| 12 |
+
"loras": [
|
| 13 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan_alpha_2.1_dora.safetensors"
|
| 14 |
+
],
|
| 15 |
+
"loras_multipliers": [ 1 ]
|
| 16 |
+
},
|
| 17 |
+
"prompt": "A large orange octopus is seen resting. The background of the video is transparent."
|
| 18 |
+
|
| 19 |
+
}
|
wan-gp-defaults.json/alpha_sf.json
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "Wan2.1 Alpha Lightning 14B",
|
| 5 |
+
"architecture" : "alpha",
|
| 6 |
+
"description": "This model is accelerated by the Lightning / SelfForcing process. It successfully generates various scenes with accurate and clearly rendered transparency. Notably, it can synthesize diverse semi-transparent objects, glowing effects, and fine-grained details such as hair. For each video generated you will find a Zip file with the same name that will contain the corresponding RGBA images.",
|
| 7 |
+
"URLs": "t2v_sf",
|
| 8 |
+
"preload_URLs": "alpha",
|
| 9 |
+
"loras": "alpha",
|
| 10 |
+
"loras_multipliers": [ 1 ],
|
| 11 |
+
"profiles_dir" : [""]
|
| 12 |
+
},
|
| 13 |
+
"prompt": "A large orange octopus is seen resting. The background of the video is transparent.",
|
| 14 |
+
"num_inference_steps": 4,
|
| 15 |
+
"guidance_scale": 1,
|
| 16 |
+
"flow_shift": 3
|
| 17 |
+
}
|
wan-gp-defaults.json/animate.json
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": {
|
| 3 |
+
"name": "Wan2.2 Animate 14B",
|
| 4 |
+
"architecture": "animate",
|
| 5 |
+
"description": "Wan-Animate takes a video and a character image as input, and generates a video in either 'Animation' or 'Replacement' mode. Sliding Window of 81 frames at least are recommeded to obtain the best Style continuity.",
|
| 6 |
+
"URLs": [
|
| 7 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_animate_14B_bf16.safetensors",
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_animate_14B_quanto_fp16_int8.safetensors",
|
| 9 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_animate_14B_quanto_bf16_int8.safetensors"
|
| 10 |
+
],
|
| 11 |
+
"preload_URLs" :
|
| 12 |
+
[
|
| 13 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_animate_relighting_lora.safetensors"
|
| 14 |
+
],
|
| 15 |
+
"group": "wan2_2"
|
| 16 |
+
}
|
| 17 |
+
}
|
wan-gp-defaults.json/chatterbox.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": {
|
| 3 |
+
"name": "TTS Chatterbox Multilingual",
|
| 4 |
+
"architecture": "chatterbox",
|
| 5 |
+
"description": "Resemble AI's open multilingual TTS with language selection via model mode.",
|
| 6 |
+
"URLs": []
|
| 7 |
+
},
|
| 8 |
+
"prompt": "Welcome to Chatterbox !",
|
| 9 |
+
"negative_prompt": "",
|
| 10 |
+
"audio_prompt_type": "A",
|
| 11 |
+
"model_mode": "en",
|
| 12 |
+
"repeat_generation": 1,
|
| 13 |
+
"video_length": 0,
|
| 14 |
+
"num_inference_steps": 0,
|
| 15 |
+
"pace": 0.5,
|
| 16 |
+
"exaggeration": 0.5,
|
| 17 |
+
"temperature": 0.8
|
| 18 |
+
}
|
wan-gp-defaults.json/fantasy.json
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "Fantasy Talking 720p 14B",
|
| 5 |
+
"architecture" : "fantasy",
|
| 6 |
+
"modules": [ ["https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_fantasy_speaking_14B_bf16.safetensors"]],
|
| 7 |
+
"description": "The Fantasy Talking model corresponds to the original Wan image 2 video model combined with the Fantasy Speaking module to process an audio Input.",
|
| 8 |
+
"URLs": "i2v_720p"
|
| 9 |
+
},
|
| 10 |
+
"resolution": "1280x720"
|
| 11 |
+
}
|
wan-gp-defaults.json/flf2v_720p.json
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "First Last Frame to Video 720p (FLF2V) 14B",
|
| 5 |
+
"architecture" : "flf2v_720p",
|
| 6 |
+
"visible" : true,
|
| 7 |
+
"description": "The First Last Frame 2 Video model is the official model Image 2 Video model that supports Start and End frames.",
|
| 8 |
+
"URLs": [
|
| 9 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_FLF2V_720p_14B_mbf16.safetensors",
|
| 10 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_FLF2V_720p_14B_quanto_mbf16_int8.safetensors",
|
| 11 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_FLF2V_720p_14B_quanto_mfp16_int8.safetensors"
|
| 12 |
+
],
|
| 13 |
+
"auto_quantize": true
|
| 14 |
+
},
|
| 15 |
+
"resolution": "1280x720"
|
| 16 |
+
}
|
wan-gp-defaults.json/flux.json
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": {
|
| 3 |
+
"name": "Flux 1 Dev 12B",
|
| 4 |
+
"architecture": "flux",
|
| 5 |
+
"description": "FLUX.1 Dev is a 12 billion parameter rectified flow transformer capable of generating images from text descriptions.",
|
| 6 |
+
"URLs": [
|
| 7 |
+
"https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-dev_bf16.safetensors",
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-dev_quanto_bf16_int8.safetensors"
|
| 9 |
+
],
|
| 10 |
+
"image_outputs": true
|
| 11 |
+
},
|
| 12 |
+
"prompt": "draw a hat",
|
| 13 |
+
"resolution": "1280x720",
|
| 14 |
+
"batch_size": 1
|
| 15 |
+
}
|
wan-gp-defaults.json/flux_chroma.json
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": {
|
| 3 |
+
"name": "Flux 1 Chroma 1 HD 8.9B",
|
| 4 |
+
"architecture": "flux_chroma",
|
| 5 |
+
"description": "FLUX.1 Chroma is a 8.9 billion parameters model. As a base model, Chroma1 is intentionally designed to be an excellent starting point for finetuning. It provides a strong, neutral foundation for developers, researchers, and artists to create specialized models..",
|
| 6 |
+
"URLs": [
|
| 7 |
+
"https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-chroma_hd_bf16.safetensors",
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-chroma_hd_quanto_bf16_int8.safetensors"
|
| 9 |
+
],
|
| 10 |
+
"image_outputs": true
|
| 11 |
+
},
|
| 12 |
+
"prompt": "draw a hat",
|
| 13 |
+
"resolution": "1280x720",
|
| 14 |
+
"guidance_scale": 3.0,
|
| 15 |
+
"num_inference_steps": 20,
|
| 16 |
+
"batch_size": 1
|
| 17 |
+
}
|
wan-gp-defaults.json/flux_dev_kontext.json
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": {
|
| 3 |
+
"name": "Flux 1 Dev Kontext 12B",
|
| 4 |
+
"architecture": "flux_dev_kontext",
|
| 5 |
+
"description": "FLUX.1 Kontext is a 12 billion parameter rectified flow transformer capable of editing images based on instructions stored in the Prompt. Please be aware that Flux Kontext is picky on the resolution of the input image and the output dimensions may not match the dimensions of the input image.",
|
| 6 |
+
"URLs": [
|
| 7 |
+
"https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1_kontext_dev_bf16.safetensors",
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1_kontext_dev_quanto_bf16_int8.safetensors"
|
| 9 |
+
]
|
| 10 |
+
},
|
| 11 |
+
"prompt": "add a hat",
|
| 12 |
+
"resolution": "1280x720",
|
| 13 |
+
"batch_size": 1
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
|
wan-gp-defaults.json/flux_dev_kontext_dreamomni2.json
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": {
|
| 3 |
+
"name": "Flux 1 DreamOmni2 12B",
|
| 4 |
+
"architecture": "flux_dev_kontext_dreamomni2",
|
| 5 |
+
"description": "DreamOmni2 is a Multimodal Instruction-based Editing and Generation Model",
|
| 6 |
+
"URLs": [
|
| 7 |
+
"https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1_kontext_dev_bf16.safetensors",
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1_kontext_dev_quanto_bf16_int8.safetensors"
|
| 9 |
+
],
|
| 10 |
+
"preload_URLs": [ "https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux_dreamomni2_edit_lora.safetensors",
|
| 11 |
+
"https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux_dreamomni2_gen_lora.safetensors"
|
| 12 |
+
]
|
| 13 |
+
},
|
| 14 |
+
"prompt": "In the scene, the character from the first image stands on the left, and the character from the second image stands on the right. They are shaking hands against the backdrop of a spaceship interior.",
|
| 15 |
+
"resolution": "1280x720",
|
| 16 |
+
"batch_size": 1
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
|
wan-gp-defaults.json/flux_dev_umo.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": {
|
| 3 |
+
"name": "Flux 1 UMO Dev 12B",
|
| 4 |
+
"architecture": "flux_dev_umo",
|
| 5 |
+
"description": "FLUX.1 UMO Dev is a model that can Edit Images with a specialization in combining multiple image references (resized internally at 512x512 max) to produce an Image output. Best Image preservation at 768x768 Resolution Output.",
|
| 6 |
+
"URLs": "flux",
|
| 7 |
+
"loras": ["https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-dev-UMO_dit_lora_bf16.safetensors"],
|
| 8 |
+
"resolutions": [ ["1024x1024 (1:1)", "1024x1024"],
|
| 9 |
+
["768x1024 (3:4)", "768x1024"],
|
| 10 |
+
["1024x768 (4:3)", "1024x768"],
|
| 11 |
+
["512x1024 (1:2)", "512x1024"],
|
| 12 |
+
["1024x512 (2:1)", "1024x512"],
|
| 13 |
+
["768x768 (1:1)", "768x768"],
|
| 14 |
+
["768x512 (3:2)", "768x512"],
|
| 15 |
+
["512x768 (2:3)", "512x768"]]
|
| 16 |
+
},
|
| 17 |
+
"prompt": "the man is wearing a hat",
|
| 18 |
+
"embedded_guidance_scale": 4,
|
| 19 |
+
"resolution": "768x768",
|
| 20 |
+
"batch_size": 1
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
|
wan-gp-defaults.json/flux_dev_uso.json
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": {
|
| 3 |
+
"name": "Flux 1 USO Dev 12B",
|
| 4 |
+
"architecture": "flux_dev_uso",
|
| 5 |
+
"description": "FLUX.1 USO Dev is a model that can Edit Images with a specialization in Style Transfers (up to two).",
|
| 6 |
+
"modules": [ ["https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-dev-USO_projector_bf16.safetensors"]],
|
| 7 |
+
"URLs": "flux",
|
| 8 |
+
"loras": ["https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-dev-USO_dit_lora_bf16.safetensors"]
|
| 9 |
+
},
|
| 10 |
+
"prompt": "the man is wearing a hat",
|
| 11 |
+
"embedded_guidance_scale": 4,
|
| 12 |
+
"resolution": "1024x1024",
|
| 13 |
+
"batch_size": 1
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
|
wan-gp-defaults.json/flux_krea.json
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": {
|
| 3 |
+
"name": "Flux 1 Dev Krea 12B",
|
| 4 |
+
"architecture": "flux",
|
| 5 |
+
"description": "Cutting-edge output quality, with a focus on aesthetic photography..",
|
| 6 |
+
"URLs": [
|
| 7 |
+
"https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-krea-dev_bf16.safetensors",
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-krea-dev_quanto_bf16_int8.safetensors"
|
| 9 |
+
],
|
| 10 |
+
"image_outputs": true
|
| 11 |
+
},
|
| 12 |
+
"prompt": "draw a hat",
|
| 13 |
+
"resolution": "1280x720",
|
| 14 |
+
"batch_size": 1
|
| 15 |
+
}
|
wan-gp-defaults.json/flux_schnell.json
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": {
|
| 3 |
+
"name": "Flux 1 Schnell 12B",
|
| 4 |
+
"architecture": "flux_schnell",
|
| 5 |
+
"description": "FLUX.1 Schnell is a 12 billion parameter rectified flow transformer capable of generating images from text descriptions. As a distilled model it requires fewer denoising steps.",
|
| 6 |
+
"URLs": [
|
| 7 |
+
"https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-schnell_bf16.safetensors",
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-schnell_quanto_bf16_int8.safetensors"
|
| 9 |
+
],
|
| 10 |
+
"image_outputs": true
|
| 11 |
+
},
|
| 12 |
+
"prompt": "draw a hat",
|
| 13 |
+
"resolution": "1280x720",
|
| 14 |
+
"num_inference_steps": 10,
|
| 15 |
+
"batch_size": 1
|
| 16 |
+
}
|
wan-gp-defaults.json/flux_srpo.json
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": {
|
| 3 |
+
"name": "Flux 1 Dev SRPO 12B",
|
| 4 |
+
"architecture": "flux",
|
| 5 |
+
"description": "By fine-tuning the FLUX.1.dev model with optimized denoising and online reward adjustment, SRPO improves its human-evaluated realism and aesthetic quality by over 3x.",
|
| 6 |
+
"URLs": [
|
| 7 |
+
"https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-srpo-dev_bf16.safetensors",
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-srpo-dev_quanto_bf16_int8.safetensors"
|
| 9 |
+
]
|
| 10 |
+
},
|
| 11 |
+
"prompt": "draw a hat",
|
| 12 |
+
"resolution": "1024x1024",
|
| 13 |
+
"batch_size": 1
|
| 14 |
+
}
|
wan-gp-defaults.json/flux_srpo_uso.json
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": {
|
| 3 |
+
"name": "Flux 1 USO SRPO 12B",
|
| 4 |
+
"architecture": "flux_dev_uso",
|
| 5 |
+
"description": "FLUX.1 USO SRPO is a model that can Edit Images with a specialization in Style Transfers (up to two). It leverages the improved Image quality brought by the SRPO process",
|
| 6 |
+
"modules": [ "flux_dev_uso"],
|
| 7 |
+
"URLs": "flux_srpo",
|
| 8 |
+
"loras": "flux_dev_uso"
|
| 9 |
+
},
|
| 10 |
+
"prompt": "the man is wearing a hat",
|
| 11 |
+
"embedded_guidance_scale": 4,
|
| 12 |
+
"resolution": "1024x1024",
|
| 13 |
+
"batch_size": 1
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
|
wan-gp-defaults.json/fun_inp.json
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "Fun InP image2video 14B",
|
| 5 |
+
"architecture" : "fun_inp",
|
| 6 |
+
"description": "The Fun model is an alternative image 2 video that supports out the box End Image fixing (contrary to the original Wan image 2 video model).",
|
| 7 |
+
"URLs": [
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_Fun_InP_14B_bf16.safetensors",
|
| 9 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_Fun_InP_14B_quanto_int8.safetensors",
|
| 10 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_Fun_InP_14B_quanto_fp16_int8.safetensors"
|
| 11 |
+
]
|
| 12 |
+
}
|
| 13 |
+
}
|
wan-gp-defaults.json/fun_inp_1.3B.json
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "Fun InP image2video 1.3B",
|
| 5 |
+
"architecture" : "fun_inp_1.3B",
|
| 6 |
+
"description": "The Fun model is an alternative image 2 video that supports out the box End Image fixing (contrary to the original Wan image 2 video model). The 1.3B adds also image 2 to video capability to the 1.3B model.",
|
| 7 |
+
"URLs": [
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_Fun_InP_1.3B_bf16.safetensors"
|
| 9 |
+
]
|
| 10 |
+
}
|
| 11 |
+
}
|
wan-gp-defaults.json/hunyuan.json
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "Hunyuan Video Text2video 720p 13B",
|
| 5 |
+
"architecture" : "hunyuan",
|
| 6 |
+
"description": "Probably the best text 2 video model available.",
|
| 7 |
+
"URLs": [
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/hunyuan_video_720_bf16.safetensors",
|
| 9 |
+
"https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/hunyuan_video_720_quanto_int8.safetensors"
|
| 10 |
+
]
|
| 11 |
+
}
|
| 12 |
+
}
|
wan-gp-defaults.json/hunyuan_avatar.json
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "Hunyuan Video Avatar 720p 13B",
|
| 5 |
+
"architecture" : "hunyuan_avatar",
|
| 6 |
+
"description": "With the Hunyuan Video Avatar model you can animate a person based on the content of an audio input. Please note that the video generator works by processing 128 frames segment at a time (even if you ask less). The good news is that it will concatenate multiple segments for long video generation (max 3 segments recommended as the quality will get worse).",
|
| 7 |
+
"URLs": [
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/hunyuan_video_avatar_720_bf16.safetensors",
|
| 9 |
+
"https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/hunyuan_video_avatar_720_quanto_bf16_int8.safetensors"
|
| 10 |
+
]
|
| 11 |
+
}
|
| 12 |
+
}
|
wan-gp-defaults.json/hunyuan_custom.json
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "Hunyuan Video Custom 720p 13B",
|
| 5 |
+
"architecture" : "hunyuan_custom",
|
| 6 |
+
"description": "The Hunyuan Video Custom model is probably the best model to transfer people (only people for the moment) as it is quite good to keep their identity. However it is slow as to get good results, you need to generate 720p videos with 30 steps.",
|
| 7 |
+
"URLs": [
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/hunyuan_video_custom_720_bf16.safetensors",
|
| 9 |
+
"https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/hunyuan_video_custom_720_quanto_bf16_int8.safetensors"
|
| 10 |
+
]
|
| 11 |
+
}
|
| 12 |
+
}
|
wan-gp-defaults.json/hunyuan_custom_audio.json
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "Hunyuan Video Custom Audio 720p 13B",
|
| 5 |
+
"architecture" : "hunyuan_custom_audio",
|
| 6 |
+
"description": "The Hunyuan Video Custom Audio model can be used to generate scenes of a person speaking given a Reference Image and a Recorded Voice or Song. The reference image is not a start image and therefore one can represent the person in a different context.The video length can be anything up to 10s. It is also quite good to generate no sound Video based on a person.",
|
| 7 |
+
"URLs": [
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/hunyuan_video_custom_audio_720_bf16.safetensors",
|
| 9 |
+
"https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/hunyuan_video_custom_audio_720_quanto_bf16_int8.safetensors"
|
| 10 |
+
]
|
| 11 |
+
}
|
| 12 |
+
}
|
wan-gp-defaults.json/hunyuan_custom_edit.json
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "Hunyuan Video Custom Edit 720p 13B",
|
| 5 |
+
"architecture" : "hunyuan_custom_edit",
|
| 6 |
+
"description": "The Hunyuan Video Custom Edit model can be used to do Video inpainting on a person (add accessories or completely replace the person). You will need in any case to define a Video Mask which will indicate which area of the Video should be edited.",
|
| 7 |
+
"URLs": [
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/hunyuan_video_custom_edit_720_bf16.safetensors",
|
| 9 |
+
"https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/hunyuan_video_custom_edit_720_quanto_bf16_int8.safetensors"
|
| 10 |
+
]
|
| 11 |
+
}
|
| 12 |
+
}
|
wan-gp-defaults.json/hunyuan_i2v.json
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "Hunyuan Video Image2video 720p 13B",
|
| 5 |
+
"architecture" : "hunyuan_i2v",
|
| 6 |
+
"description": "A good looking image 2 video model, but not so good in prompt adherence.",
|
| 7 |
+
"URLs": [
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/hunyuan_video_i2v_720_bf16v2.safetensors",
|
| 9 |
+
"https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/hunyuan_video_i2v_720_quanto_int8v2.safetensors"
|
| 10 |
+
]
|
| 11 |
+
}
|
| 12 |
+
}
|
wan-gp-defaults.json/hunyuan_t2v_accvideo.json
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": {
|
| 3 |
+
"name": "Hunyuan Video Text2video 720p AccVideo 13B",
|
| 4 |
+
"architecture": "hunyuan",
|
| 5 |
+
"description": " AccVideo is a novel efficient distillation method to accelerate video diffusion models with synthetic datset. Our method is 8.5x faster than HunyuanVideo.",
|
| 6 |
+
"URLs": [
|
| 7 |
+
"https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/accvideo_hunyuan_video_720_quanto_int8.safetensors"
|
| 8 |
+
],
|
| 9 |
+
"preload_URLs": [
|
| 10 |
+
],
|
| 11 |
+
"auto_quantize": true
|
| 12 |
+
},
|
| 13 |
+
"negative_prompt": "",
|
| 14 |
+
"resolution": "832x480",
|
| 15 |
+
"video_length": 81,
|
| 16 |
+
"seed": 42,
|
| 17 |
+
"num_inference_steps": 5,
|
| 18 |
+
"flow_shift": 7,
|
| 19 |
+
"embedded_guidance_scale": 6,
|
| 20 |
+
"repeat_generation": 1,
|
| 21 |
+
"loras_multipliers": "",
|
| 22 |
+
"temporal_upsampling": "",
|
| 23 |
+
"spatial_upsampling": "",
|
| 24 |
+
"RIFLEx_setting": 0,
|
| 25 |
+
"slg_start_perc": 10,
|
| 26 |
+
"slg_end_perc": 90,
|
| 27 |
+
"prompt_enhancer": "",
|
| 28 |
+
"activated_loras": [
|
| 29 |
+
]
|
| 30 |
+
}
|
wan-gp-defaults.json/hunyuan_t2v_fast.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": {
|
| 3 |
+
"name": "Hunyuan Video Text2video 720p FastHunyuan 13B",
|
| 4 |
+
"architecture": "hunyuan",
|
| 5 |
+
"description": "Fast Hunyuan is an accelerated HunyuanVideo model. It can sample high quality videos with 6 diffusion steps.",
|
| 6 |
+
"settings_dir": [ "" ],
|
| 7 |
+
"URLs": [
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/fast_hunyuan_video_720_quanto_int8.safetensors"
|
| 9 |
+
],
|
| 10 |
+
"preload_URLs": [
|
| 11 |
+
"https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/fast_hunyuan_video_720_quanto_int8_map.json"
|
| 12 |
+
],
|
| 13 |
+
"auto_quantize": true
|
| 14 |
+
},
|
| 15 |
+
"negative_prompt": "",
|
| 16 |
+
"resolution": "832x480",
|
| 17 |
+
"video_length": 81,
|
| 18 |
+
"seed": 42,
|
| 19 |
+
"num_inference_steps": 6,
|
| 20 |
+
"flow_shift": 17,
|
| 21 |
+
"embedded_guidance_scale": 6,
|
| 22 |
+
"repeat_generation": 1,
|
| 23 |
+
"loras_multipliers": "",
|
| 24 |
+
"temporal_upsampling": "",
|
| 25 |
+
"spatial_upsampling": "",
|
| 26 |
+
"RIFLEx_setting": 0,
|
| 27 |
+
"slg_start_perc": 10,
|
| 28 |
+
"slg_end_perc": 90,
|
| 29 |
+
"prompt_enhancer": "",
|
| 30 |
+
"activated_loras": [
|
| 31 |
+
]
|
| 32 |
+
}
|
wan-gp-defaults.json/i2v.json
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "Wan2.1 Image2video 480p 14B",
|
| 5 |
+
"architecture" : "i2v",
|
| 6 |
+
"description": "The standard Wan Image 2 Video specialized to generate 480p images. It also offers Start and End Image support (End Image is not supported in the original model but seems to work well)",
|
| 7 |
+
"URLs": [
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_image2video_480p_14B_mbf16.safetensors",
|
| 9 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_image2video_480p_14B_quanto_mbf16_int8.safetensors",
|
| 10 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_image2video_480p_14B_quanto_mfp16_int8.safetensors"
|
| 11 |
+
]
|
| 12 |
+
}
|
| 13 |
+
}
|
wan-gp-defaults.json/i2v_2_2.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "Wan2.2 Image2video 14B",
|
| 5 |
+
"architecture" : "i2v_2_2",
|
| 6 |
+
"description": "Wan 2.2 Image 2 Video model. Contrary to the Wan Image2video 2.1 this model is structurally close to the t2v model. You will need consequently to store Loras for this model in the t2v Lora Folder.",
|
| 7 |
+
"URLs": [
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_image2video_14B_high_mbf16.safetensors",
|
| 9 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_image2video_14B_high_quanto_mbf16_int8.safetensors",
|
| 10 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_image2video_14B_high_quanto_mfp16_int8.safetensors"
|
| 11 |
+
],
|
| 12 |
+
"URLs2": [
|
| 13 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_image2video_14B_low_mbf16.safetensors",
|
| 14 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_image2video_14B_low_quanto_mbf16_int8.safetensors",
|
| 15 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_image2video_14B_low_quanto_mfp16_int8.safetensors"
|
| 16 |
+
],
|
| 17 |
+
"group": "wan2_2"
|
| 18 |
+
},
|
| 19 |
+
"guidance_phases": 2,
|
| 20 |
+
"switch_threshold" : 900,
|
| 21 |
+
"guidance_scale" : 3.5,
|
| 22 |
+
"guidance2_scale" : 3.5,
|
| 23 |
+
"flow_shift" : 5
|
| 24 |
+
|
| 25 |
+
}
|
wan-gp-defaults.json/i2v_2_2_multitalk.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "Wan2.2 Multitalk 14B",
|
| 5 |
+
"architecture" : "i2v_2_2_multitalk",
|
| 6 |
+
"description": "The Multitalk module of Wan 2.1 has been combined with the Wan 2.2 image 2 video. It lets you have up to two people have a conversation.",
|
| 7 |
+
"modules": ["multitalk"],
|
| 8 |
+
"URLs": "i2v_2_2",
|
| 9 |
+
"URLs2": "i2v_2_2",
|
| 10 |
+
"group": "wan2_2",
|
| 11 |
+
"visible": false
|
| 12 |
+
},
|
| 13 |
+
"switch_threshold" : 900,
|
| 14 |
+
"guidance_scale" : 3.5,
|
| 15 |
+
"guidance2_scale" : 3.5,
|
| 16 |
+
"flow_shift" : 5
|
| 17 |
+
|
| 18 |
+
}
|
wan-gp-defaults.json/i2v_720p.json
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "Wan2.1 Image2video 720p 14B",
|
| 5 |
+
"architecture" : "i2v",
|
| 6 |
+
"description": "The standard Wan Image 2 Video specialized to generate 720p images. It also offers Start and End Image support (End Image is not supported in the original model but seems to work well).",
|
| 7 |
+
"URLs": [
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_image2video_720p_14B_mbf16.safetensors",
|
| 9 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_image2video_720p_14B_quanto_mbf16_int8.safetensors",
|
| 10 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_image2video_720p_14B_quanto_mfp16_int8.safetensors"
|
| 11 |
+
]
|
| 12 |
+
},
|
| 13 |
+
"resolution": "1280x720"
|
| 14 |
+
}
|
wan-gp-defaults.json/i2v_fusionix.json
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "Wan2.1 Image2video 480p FusioniX 14B",
|
| 5 |
+
"architecture" : "i2v",
|
| 6 |
+
"description": "A powerful merged image-to-video model based on the original WAN 2.1 I2V model, enhanced using multiple open-source components and LoRAs to boost motion realism, temporal consistency, and expressive detail.",
|
| 7 |
+
"URLs": "i2v",
|
| 8 |
+
"settings_dir": [ "" ],
|
| 9 |
+
"loras": ["https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/loras_accelerators/Wan2.1_I2V_14B_FusionX_LoRA.safetensors"]
|
| 10 |
+
}
|
| 11 |
+
}
|
wan-gp-defaults.json/infinitetalk.json
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": {
|
| 3 |
+
"name": "Infinitetalk Single Speaker 480p 14B",
|
| 4 |
+
"architecture": "infinitetalk",
|
| 5 |
+
"modules": [
|
| 6 |
+
[
|
| 7 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_infinitetalk_single_14B_mbf16.safetensors",
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_infinitetalk_single_14B_quanto_mbf16_int8.safetensors",
|
| 9 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_infinitetalk_single_14B_quanto_mfp16_int8.safetensors"
|
| 10 |
+
]
|
| 11 |
+
],
|
| 12 |
+
"description": "The Infinitetalk model is an improved version of Multitalk that supports very long videos. This is the single speaker version. Sliding Window size must be 81 frames to get smooth transitions between shots.",
|
| 13 |
+
"one_speaker_only": true,
|
| 14 |
+
"URLs": "i2v"
|
| 15 |
+
}
|
| 16 |
+
}
|
wan-gp-defaults.json/infinitetalk_multi.json
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": {
|
| 3 |
+
"name": "Infinitetalk Multi Speakers 480p 14B",
|
| 4 |
+
"architecture": "infinitetalk",
|
| 5 |
+
"modules": [
|
| 6 |
+
[
|
| 7 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_infinitetalk_multi_14B_mbf16.safetensors",
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_infinitetalk_multi_14B_quanto_mfp16_int8.safetensors",
|
| 9 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_infinitetalk_multi_14B_quanto_mbf16_int8.safetensors"
|
| 10 |
+
]
|
| 11 |
+
],
|
| 12 |
+
"description": "The Infinitetalk model is an improved version of Multitalk that supports very long videos. This is the multi speakers version.Sliding Window size must be 81 frames to get smooth transitions between shots",
|
| 13 |
+
"multi_speakers_only": true,
|
| 14 |
+
"URLs": "i2v"
|
| 15 |
+
}
|
| 16 |
+
}
|
wan-gp-defaults.json/ltxv_13B.json
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "LTX Video 0.9.8 13B",
|
| 5 |
+
"architecture" : "ltxv_13B",
|
| 6 |
+
"description": "LTX Video is a fast model that can be used to generate very very long videos (up to 1800 frames !).It is recommended to keep the number of steps to 30 or you will need to update the file 'ltxv_video/configs/ltxv-13b-0.9.8-dev.yaml'.The LTX Video model expects very long prompts, so don't hesitate to use the Prompt Enhancer.",
|
| 7 |
+
"URLs": [
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/LTX_Video/resolve/main/ltxv_0.9.8_13B_dev_bf16.safetensors",
|
| 9 |
+
"https://huggingface.co/DeepBeepMeep/LTX_Video/resolve/main/ltxv_0.9.8_13B_dev_quanto_bf16_int8.safetensors"
|
| 10 |
+
],
|
| 11 |
+
"preload_URLs" : [
|
| 12 |
+
"https://huggingface.co/DeepBeepMeep/LTX_Video/resolve/main/ltxv-097-ic-lora-pose-control-diffusers.safetensors",
|
| 13 |
+
"https://huggingface.co/DeepBeepMeep/LTX_Video/resolve/main/ltxv-097-ic-lora-depth-control-diffusers.safetensors",
|
| 14 |
+
"https://huggingface.co/DeepBeepMeep/LTX_Video/resolve/main/ltxv-097-ic-lora-canny-control-diffusers.safetensors"
|
| 15 |
+
],
|
| 16 |
+
"LTXV_config": "models/ltx_video/configs/ltxv-13b-0.9.8-dev.yaml"
|
| 17 |
+
},
|
| 18 |
+
"num_inference_steps": 30
|
| 19 |
+
}
|
wan-gp-defaults.json/ltxv_distilled.json
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "LTX Video 0.9.8 Distilled 13B",
|
| 5 |
+
"architecture" : "ltxv_13B",
|
| 6 |
+
"description": "LTX Video is a fast model that can be used to generate very long videos (up to 1800 frames !).This distilled version is a very fast version and retains a high level of quality. The LTX Video model expects very long prompts, so don't hesitate to use the Prompt Enhancer.",
|
| 7 |
+
"URLs": [
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/LTX_Video/resolve/main/ltxv_0.9.8_13B_distilled_bf16.safetensors",
|
| 9 |
+
"https://huggingface.co/DeepBeepMeep/LTX_Video/resolve/main/ltxv_0.9.8_13B_distilled_quanto_bf16_int8.safetensors"
|
| 10 |
+
],
|
| 11 |
+
"preload_URLs" : "ltxv_13B",
|
| 12 |
+
"LTXV_config": "models/ltx_video/configs/ltxv-13b-0.9.8-distilled.yaml"
|
| 13 |
+
},
|
| 14 |
+
"num_inference_steps": 6
|
| 15 |
+
}
|
wan-gp-defaults.json/lucy_edit.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": {
|
| 3 |
+
"name": "Wan2.2 Lucy Edit 5B",
|
| 4 |
+
"architecture": "lucy_edit",
|
| 5 |
+
"description": "Lucy Edit is a video editing model that performs instruction-guided edits on videos using free-text prompts. It supports a variety of edits, such as clothing & accessory changes, character changes, object insertions, and scene replacements while preserving the motion and composition perfectly.",
|
| 6 |
+
"URLs": [
|
| 7 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_lucy_edit_mbf16.safetensors",
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_lucy_edit_quanto_mbf16_int8.safetensors",
|
| 9 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_lucy_edit_quanto_mfp16_int8.safetensors"
|
| 10 |
+
],
|
| 11 |
+
"settings_dir": "ti2v_2_2",
|
| 12 |
+
"group": "wan2_2"
|
| 13 |
+
},
|
| 14 |
+
"prompt": "change the clothes to red",
|
| 15 |
+
"video_length": 81,
|
| 16 |
+
"guidance_scale": 5,
|
| 17 |
+
"flow_shift": 5,
|
| 18 |
+
"num_inference_steps": 30,
|
| 19 |
+
"resolution": "1280x720"
|
| 20 |
+
}
|
wan-gp-defaults.json/lucy_edit_fastwan.json
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": {
|
| 3 |
+
"name": "Wan2.2 Lucy Edit FastWan 5B",
|
| 4 |
+
"architecture": "lucy_edit",
|
| 5 |
+
"description": "Lucy Edit is a video editing model that performs instruction-guided edits on videos using free-text prompts. It supports a variety of edits, such as clothing & accessory changes, character changes, object insertions, and scene replacements while preserving the motion and composition perfectly. This is the FastWan version for faster generation.",
|
| 6 |
+
"URLs": "lucy_edit",
|
| 7 |
+
"group": "wan2_2",
|
| 8 |
+
"settings_dir": [ "" ],
|
| 9 |
+
"loras": "ti2v_2_2_fastwan"
|
| 10 |
+
},
|
| 11 |
+
"prompt": "change the clothes to red",
|
| 12 |
+
"video_length": 81,
|
| 13 |
+
"guidance_scale": 1,
|
| 14 |
+
"flow_shift": 3,
|
| 15 |
+
"num_inference_steps": 5,
|
| 16 |
+
"resolution": "1280x720"
|
| 17 |
+
}
|
wan-gp-defaults.json/lynx.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": {
|
| 3 |
+
"name": "Wan2.1 Lynx 14B",
|
| 4 |
+
"modules": [
|
| 5 |
+
[
|
| 6 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_lynx_full_module_14B_bf16.safetensors",
|
| 7 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_lynx_full_module_14B_quanto_bf16_int8.safetensors",
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_lynx_full_module_14B_quanto_fp16_int8.safetensors"
|
| 9 |
+
]
|
| 10 |
+
],
|
| 11 |
+
"architecture": "lynx",
|
| 12 |
+
"description": "The Lynx ControlNet offers State of the Art Identity Preservation. You need to provide a Reference Image which is a close up of a person face to transfer this person in the Video.",
|
| 13 |
+
"URLs": "t2v",
|
| 14 |
+
"preload_URLs": [
|
| 15 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_lynx_full_arc_resampler.safetensors"
|
| 16 |
+
]
|
| 17 |
+
}
|
| 18 |
+
}
|
wan-gp-defaults.json/moviigen.json
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "MoviiGen 1080p 14B",
|
| 5 |
+
"architecture" : "t2v",
|
| 6 |
+
"description": "MoviiGen 1.1, a cutting-edge video generation model that excels in cinematic aesthetics and visual quality. Use it to generate videos in 720p or 1080p in the 21:9 ratio.",
|
| 7 |
+
"URLs": [
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_moviigen1.1_14B_mbf16.safetensors",
|
| 9 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_moviigen1.1_14B_quanto_mbf16_int8.safetensors",
|
| 10 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_moviigen1.1_14B_quanto_mfp16_int8.safetensors"
|
| 11 |
+
],
|
| 12 |
+
"auto_quantize": true
|
| 13 |
+
},
|
| 14 |
+
"resolution": "1280x720",
|
| 15 |
+
"video_length": 81
|
| 16 |
+
}
|
wan-gp-defaults.json/multitalk.json
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "Multitalk 480p 14B",
|
| 5 |
+
"architecture" : "multitalk",
|
| 6 |
+
"modules": [
|
| 7 |
+
["https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_multitalk_14B_mbf16.safetensors",
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_multitalk_14B_quanto_mbf16_int8.safetensors",
|
| 9 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_multitalk_14B_quanto_mfp16_int8.safetensors"]
|
| 10 |
+
],
|
| 11 |
+
"description": "The Multitalk model corresponds to the original Wan image 2 video model combined with the Multitalk module. It lets you have up to two people have a conversation.",
|
| 12 |
+
"URLs": "i2v",
|
| 13 |
+
"teacache_coefficients" : [-3.02331670e+02, 2.23948934e+02, -5.25463970e+01, 5.87348440e+00, -2.01973289e-01]
|
| 14 |
+
}
|
| 15 |
+
}
|
wan-gp-defaults.json/multitalk_720p.json
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "Multitalk 720p 14B",
|
| 5 |
+
"architecture" : "multitalk",
|
| 6 |
+
"modules": ["multitalk"],
|
| 7 |
+
"description": "The Multitalk model corresponds to the original Wan image 2 video 720p model combined with the Multitalk module. It lets you have up to two people have a conversation.",
|
| 8 |
+
"URLs": "i2v_720p",
|
| 9 |
+
"teacache_coefficients" : [-114.36346466, 65.26524496, -18.82220707, 4.91518089, -0.23412683],
|
| 10 |
+
"auto_quantize": true
|
| 11 |
+
},
|
| 12 |
+
"resolution": "1280x720"
|
| 13 |
+
}
|
wan-gp-defaults.json/phantom_1.3B.json
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "Phantom 1.3B",
|
| 5 |
+
"architecture" : "phantom_1.3B",
|
| 6 |
+
"description": "The Phantom model is specialized in transferring people or objects of your choice into a generated Video. It produces very nice results when used at 720p.",
|
| 7 |
+
"URLs": [
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2_1_phantom_1.3B_mbf16.safetensors"
|
| 9 |
+
]
|
| 10 |
+
}
|
| 11 |
+
}
|
wan-gp-defaults.json/phantom_14B.json
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "Phantom 14B",
|
| 5 |
+
"architecture" : "phantom_14B",
|
| 6 |
+
"description": "The Phantom model is specialized in transferring people or objects of your choice into a generated Video. It produces very nice results when used at 720p.",
|
| 7 |
+
"URLs": [
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_phantom_14B_mbf16.safetensors",
|
| 9 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_phantom_14B_quanto_mbf16_int8.safetensors",
|
| 10 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_phantom_14B_quanto_mfp16_int8.safetensors"
|
| 11 |
+
]
|
| 12 |
+
}
|
| 13 |
+
}
|
wan-gp-defaults.json/qwen_image_20B.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": {
|
| 3 |
+
"name": "Qwen Image 20B",
|
| 4 |
+
"architecture": "qwen_image_20B",
|
| 5 |
+
"description": "Qwen Image is generative model that will generate very high quality images. It is one of the few models capable to generate in the image very long texts.",
|
| 6 |
+
"URLs": [
|
| 7 |
+
"https://huggingface.co/DeepBeepMeep/Qwen_image/resolve/main/qwen_image_20B_bf16.safetensors",
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Qwen_image/resolve/main/qwen_image_20B_quanto_bf16_int8.safetensors"
|
| 9 |
+
],
|
| 10 |
+
"xresolutions": [ ["1328x1328 (1:1)", "1328x1328"],
|
| 11 |
+
["1664x928 (16:9)", "1664x928"],
|
| 12 |
+
["928x1664 (9:16)", "928x1664"],
|
| 13 |
+
["1472x1140 (4:3)", "1472x1140"],
|
| 14 |
+
["1140x1472 (3:4)", "1140x1472"]],
|
| 15 |
+
"attention": {"<89" : "sdpa"},
|
| 16 |
+
"image_outputs": true
|
| 17 |
+
},
|
| 18 |
+
"prompt": "draw a hat",
|
| 19 |
+
"resolution": "1280x720",
|
| 20 |
+
"batch_size": 1
|
| 21 |
+
}
|
wan-gp-defaults.json/qwen_image_edit_20B.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": {
|
| 3 |
+
"name": "Qwen Image Edit 20B",
|
| 4 |
+
"architecture": "qwen_image_edit_20B",
|
| 5 |
+
"description": "Qwen Image Edit is a generative model that can generate very high quality images with long texts in it. Best results will be at 720p. Use it to edit a Subject or combine multiple Subjects. ",
|
| 6 |
+
"URLs": [
|
| 7 |
+
"https://huggingface.co/DeepBeepMeep/Qwen_image/resolve/main/qwen_image_edit_20B_bf16.safetensors",
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Qwen_image/resolve/main/qwen_image_edit_20B_quanto_bf16_int8.safetensors"
|
| 9 |
+
],
|
| 10 |
+
"preload_URLs": ["https://huggingface.co/DeepBeepMeep/Qwen_image/resolve/main/qwen_image_edit_inpainting.safetensors"],
|
| 11 |
+
"attention": {
|
| 12 |
+
"<89": "sdpa"
|
| 13 |
+
}
|
| 14 |
+
},
|
| 15 |
+
"prompt": "add a hat",
|
| 16 |
+
"resolution": "1280x720",
|
| 17 |
+
"batch_size": 1
|
| 18 |
+
}
|
wan-gp-defaults.json/qwen_image_edit_plus_20B.json
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": {
|
| 3 |
+
"name": "Qwen Image Edit Plus 20B",
|
| 4 |
+
"architecture": "qwen_image_edit_plus_20B",
|
| 5 |
+
"description": "Qwen Image Edit Plus is a generative model that can generate very high quality images with long texts in it. Best results will be at 720p. This model is optimized to combine multiple Subjects & Objects.",
|
| 6 |
+
"URLs": [
|
| 7 |
+
"https://huggingface.co/DeepBeepMeep/Qwen_image/resolve/main/qwen_image_edit_plus_20B_quanto_bf16_int8.safetensors"
|
| 8 |
+
],
|
| 9 |
+
"preload_URLs": "qwen_image_edit_20B",
|
| 10 |
+
"attention": {
|
| 11 |
+
"<89": "sdpa"
|
| 12 |
+
}
|
| 13 |
+
},
|
| 14 |
+
"prompt": "add a hat",
|
| 15 |
+
"resolution": "1024x1024",
|
| 16 |
+
"batch_size": 1
|
| 17 |
+
}
|
wan-gp-defaults.json/recam_1.3B.json
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "ReCamMaster 1.3B",
|
| 5 |
+
"architecture" : "recam_1.3B",
|
| 6 |
+
"description": "The Recam Master in theory should allow you to replay a video by applying a different camera movement. The model supports only video that are at least 81 frames long (any frame beyond will be ignored)",
|
| 7 |
+
"URLs": [
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_recammaster_1.3B_bf16.safetensors"
|
| 9 |
+
]
|
| 10 |
+
}
|
| 11 |
+
}
|
wan-gp-defaults.json/sky_df_1.3B.json
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "SkyReels2 Diffusion Forcing 1.3B",
|
| 5 |
+
"architecture" : "sky_df_1.3B",
|
| 6 |
+
"description": "The SkyReels 2 Diffusion Forcing model has been designed to generate very long videos that exceeds the usual 5s limit. You can also use this model to extend any existing video.",
|
| 7 |
+
"URLs": [
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/sky_reels2_diffusion_forcing_1.3B_mbf16.safetensors"
|
| 9 |
+
]
|
| 10 |
+
}
|
| 11 |
+
}
|
wan-gp-defaults.json/sky_df_14B.json
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model":
|
| 3 |
+
{
|
| 4 |
+
"name": "SkyReels2 Diffusion Forcing 540p 14B",
|
| 5 |
+
"architecture" : "sky_df_14B",
|
| 6 |
+
"description": "The SkyReels 2 Diffusion Forcing model has been designed to generate very long videos that exceeds the usual 5s limit. You can also use this model to extend any existing video.",
|
| 7 |
+
"URLs": [
|
| 8 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/sky_reels2_diffusion_forcing_14B_bf16.safetensors",
|
| 9 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/sky_reels2_diffusion_forcing_14B_quanto_int8.safetensors",
|
| 10 |
+
"https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/sky_reels2_diffusion_forcing_14B_quanto_fp16_int8.safetensors"
|
| 11 |
+
]
|
| 12 |
+
}
|
| 13 |
+
}
|