diff --git a/wan-gp-defaults.json/ReadMe.txt b/wan-gp-defaults.json/ReadMe.txt new file mode 100644 index 0000000000000000000000000000000000000000..ef8cbf69c6e612cd0b50a131300e6a086400e6db --- /dev/null +++ b/wan-gp-defaults.json/ReadMe.txt @@ -0,0 +1,13 @@ +Please dot not modify any file in this Folder. + +If you want to change a property of a default model, copy the corrresponding model file in the ./finetunes folder and modify the properties you want to change in the new file. +If a property is not in the new file, it will be inherited automatically from the default file that matches the same name file. + +For instance to hide a model: + +{ + "model": + { + "visible": false + } +} diff --git a/wan-gp-defaults.json/alpha.json b/wan-gp-defaults.json/alpha.json new file mode 100644 index 0000000000000000000000000000000000000000..269c2596c7d9eea0e65fc84b4b796bfaadefac09 --- /dev/null +++ b/wan-gp-defaults.json/alpha.json @@ -0,0 +1,19 @@ +{ + "model": + { + "name": "Wan2.1 Alpha 14B", + "architecture" : "alpha", + "description": "This model successfully generates various scenes with accurate and clearly rendered transparency. Notably, it can synthesize diverse semi-transparent objects, glowing effects, and fine-grained details such as hair. For each video generated you will find a Zip file with the same name that will contain the corresponding RGBA images.", + "URLs": "t2v", + "preload_URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan_alpha_2.1_vae_rgb_channel.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan_alpha_2.1_vae_alpha_channel.safetensors" + ], + "loras": [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan_alpha_2.1_dora.safetensors" + ], + "loras_multipliers": [ 1 ] + }, + "prompt": "A large orange octopus is seen resting. The background of the video is transparent." + +} \ No newline at end of file diff --git a/wan-gp-defaults.json/alpha_sf.json b/wan-gp-defaults.json/alpha_sf.json new file mode 100644 index 0000000000000000000000000000000000000000..21e826c77e960e4dcc8b262c6c28460620d0fde2 --- /dev/null +++ b/wan-gp-defaults.json/alpha_sf.json @@ -0,0 +1,17 @@ +{ + "model": + { + "name": "Wan2.1 Alpha Lightning 14B", + "architecture" : "alpha", + "description": "This model is accelerated by the Lightning / SelfForcing process. It successfully generates various scenes with accurate and clearly rendered transparency. Notably, it can synthesize diverse semi-transparent objects, glowing effects, and fine-grained details such as hair. For each video generated you will find a Zip file with the same name that will contain the corresponding RGBA images.", + "URLs": "t2v_sf", + "preload_URLs": "alpha", + "loras": "alpha", + "loras_multipliers": [ 1 ], + "profiles_dir" : [""] + }, + "prompt": "A large orange octopus is seen resting. The background of the video is transparent.", + "num_inference_steps": 4, + "guidance_scale": 1, + "flow_shift": 3 +} \ No newline at end of file diff --git a/wan-gp-defaults.json/animate.json b/wan-gp-defaults.json/animate.json new file mode 100644 index 0000000000000000000000000000000000000000..f385c3b69a15fb830af75a42d37676b6179bc3d5 --- /dev/null +++ b/wan-gp-defaults.json/animate.json @@ -0,0 +1,17 @@ +{ + "model": { + "name": "Wan2.2 Animate 14B", + "architecture": "animate", + "description": "Wan-Animate takes a video and a character image as input, and generates a video in either 'Animation' or 'Replacement' mode. Sliding Window of 81 frames at least are recommeded to obtain the best Style continuity.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_animate_14B_bf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_animate_14B_quanto_fp16_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_animate_14B_quanto_bf16_int8.safetensors" + ], + "preload_URLs" : + [ + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_animate_relighting_lora.safetensors" + ], + "group": "wan2_2" + } +} \ No newline at end of file diff --git a/wan-gp-defaults.json/chatterbox.json b/wan-gp-defaults.json/chatterbox.json new file mode 100644 index 0000000000000000000000000000000000000000..8c45e1dfce2997bf96fb36a091f70208b212bd4a --- /dev/null +++ b/wan-gp-defaults.json/chatterbox.json @@ -0,0 +1,18 @@ +{ + "model": { + "name": "TTS Chatterbox Multilingual", + "architecture": "chatterbox", + "description": "Resemble AI's open multilingual TTS with language selection via model mode.", + "URLs": [] + }, + "prompt": "Welcome to Chatterbox !", + "negative_prompt": "", + "audio_prompt_type": "A", + "model_mode": "en", + "repeat_generation": 1, + "video_length": 0, + "num_inference_steps": 0, + "pace": 0.5, + "exaggeration": 0.5, + "temperature": 0.8 +} diff --git a/wan-gp-defaults.json/fantasy.json b/wan-gp-defaults.json/fantasy.json new file mode 100644 index 0000000000000000000000000000000000000000..c35db4fee9900efe9c8adb3e6104f4817bcccd44 --- /dev/null +++ b/wan-gp-defaults.json/fantasy.json @@ -0,0 +1,11 @@ +{ + "model": + { + "name": "Fantasy Talking 720p 14B", + "architecture" : "fantasy", + "modules": [ ["https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_fantasy_speaking_14B_bf16.safetensors"]], + "description": "The Fantasy Talking model corresponds to the original Wan image 2 video model combined with the Fantasy Speaking module to process an audio Input.", + "URLs": "i2v_720p" + }, + "resolution": "1280x720" +} diff --git a/wan-gp-defaults.json/flf2v_720p.json b/wan-gp-defaults.json/flf2v_720p.json new file mode 100644 index 0000000000000000000000000000000000000000..e55fbf4c67fe4105bf485ad188bad6335651c930 --- /dev/null +++ b/wan-gp-defaults.json/flf2v_720p.json @@ -0,0 +1,16 @@ +{ + "model": + { + "name": "First Last Frame to Video 720p (FLF2V) 14B", + "architecture" : "flf2v_720p", + "visible" : true, + "description": "The First Last Frame 2 Video model is the official model Image 2 Video model that supports Start and End frames.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_FLF2V_720p_14B_mbf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_FLF2V_720p_14B_quanto_mbf16_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_FLF2V_720p_14B_quanto_mfp16_int8.safetensors" + ], + "auto_quantize": true + }, + "resolution": "1280x720" +} \ No newline at end of file diff --git a/wan-gp-defaults.json/flux.json b/wan-gp-defaults.json/flux.json new file mode 100644 index 0000000000000000000000000000000000000000..d9fafb2c0c206ab2c3e8e5ca71def10a889ba3be --- /dev/null +++ b/wan-gp-defaults.json/flux.json @@ -0,0 +1,15 @@ +{ + "model": { + "name": "Flux 1 Dev 12B", + "architecture": "flux", + "description": "FLUX.1 Dev is a 12 billion parameter rectified flow transformer capable of generating images from text descriptions.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-dev_bf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-dev_quanto_bf16_int8.safetensors" + ], + "image_outputs": true + }, + "prompt": "draw a hat", + "resolution": "1280x720", + "batch_size": 1 +} \ No newline at end of file diff --git a/wan-gp-defaults.json/flux_chroma.json b/wan-gp-defaults.json/flux_chroma.json new file mode 100644 index 0000000000000000000000000000000000000000..dea6b1a1ed96bcaee1cec104b05e661ee0b6b08b --- /dev/null +++ b/wan-gp-defaults.json/flux_chroma.json @@ -0,0 +1,17 @@ +{ + "model": { + "name": "Flux 1 Chroma 1 HD 8.9B", + "architecture": "flux_chroma", + "description": "FLUX.1 Chroma is a 8.9 billion parameters model. As a base model, Chroma1 is intentionally designed to be an excellent starting point for finetuning. It provides a strong, neutral foundation for developers, researchers, and artists to create specialized models..", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-chroma_hd_bf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-chroma_hd_quanto_bf16_int8.safetensors" + ], + "image_outputs": true + }, + "prompt": "draw a hat", + "resolution": "1280x720", + "guidance_scale": 3.0, + "num_inference_steps": 20, + "batch_size": 1 +} \ No newline at end of file diff --git a/wan-gp-defaults.json/flux_dev_kontext.json b/wan-gp-defaults.json/flux_dev_kontext.json new file mode 100644 index 0000000000000000000000000000000000000000..4b845005b0af019d394b5ef2da6269b493eff403 --- /dev/null +++ b/wan-gp-defaults.json/flux_dev_kontext.json @@ -0,0 +1,16 @@ +{ + "model": { + "name": "Flux 1 Dev Kontext 12B", + "architecture": "flux_dev_kontext", + "description": "FLUX.1 Kontext is a 12 billion parameter rectified flow transformer capable of editing images based on instructions stored in the Prompt. Please be aware that Flux Kontext is picky on the resolution of the input image and the output dimensions may not match the dimensions of the input image.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1_kontext_dev_bf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1_kontext_dev_quanto_bf16_int8.safetensors" + ] + }, + "prompt": "add a hat", + "resolution": "1280x720", + "batch_size": 1 +} + + \ No newline at end of file diff --git a/wan-gp-defaults.json/flux_dev_kontext_dreamomni2.json b/wan-gp-defaults.json/flux_dev_kontext_dreamomni2.json new file mode 100644 index 0000000000000000000000000000000000000000..6c78d2a008f75aed7c0552ef9224cd48fbbebdd0 --- /dev/null +++ b/wan-gp-defaults.json/flux_dev_kontext_dreamomni2.json @@ -0,0 +1,19 @@ +{ + "model": { + "name": "Flux 1 DreamOmni2 12B", + "architecture": "flux_dev_kontext_dreamomni2", + "description": "DreamOmni2 is a Multimodal Instruction-based Editing and Generation Model", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1_kontext_dev_bf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1_kontext_dev_quanto_bf16_int8.safetensors" + ], + "preload_URLs": [ "https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux_dreamomni2_edit_lora.safetensors", + "https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux_dreamomni2_gen_lora.safetensors" + ] + }, + "prompt": "In the scene, the character from the first image stands on the left, and the character from the second image stands on the right. They are shaking hands against the backdrop of a spaceship interior.", + "resolution": "1280x720", + "batch_size": 1 +} + + \ No newline at end of file diff --git a/wan-gp-defaults.json/flux_dev_umo.json b/wan-gp-defaults.json/flux_dev_umo.json new file mode 100644 index 0000000000000000000000000000000000000000..c248e517f260fbf9ae5ab6578595f13e13062d8b --- /dev/null +++ b/wan-gp-defaults.json/flux_dev_umo.json @@ -0,0 +1,23 @@ +{ + "model": { + "name": "Flux 1 UMO Dev 12B", + "architecture": "flux_dev_umo", + "description": "FLUX.1 UMO Dev is a model that can Edit Images with a specialization in combining multiple image references (resized internally at 512x512 max) to produce an Image output. Best Image preservation at 768x768 Resolution Output.", + "URLs": "flux", + "loras": ["https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-dev-UMO_dit_lora_bf16.safetensors"], + "resolutions": [ ["1024x1024 (1:1)", "1024x1024"], + ["768x1024 (3:4)", "768x1024"], + ["1024x768 (4:3)", "1024x768"], + ["512x1024 (1:2)", "512x1024"], + ["1024x512 (2:1)", "1024x512"], + ["768x768 (1:1)", "768x768"], + ["768x512 (3:2)", "768x512"], + ["512x768 (2:3)", "512x768"]] + }, + "prompt": "the man is wearing a hat", + "embedded_guidance_scale": 4, + "resolution": "768x768", + "batch_size": 1 +} + + \ No newline at end of file diff --git a/wan-gp-defaults.json/flux_dev_uso.json b/wan-gp-defaults.json/flux_dev_uso.json new file mode 100644 index 0000000000000000000000000000000000000000..93cf2aecefe6eaad736b7f259f419ed44092a5ab --- /dev/null +++ b/wan-gp-defaults.json/flux_dev_uso.json @@ -0,0 +1,16 @@ +{ + "model": { + "name": "Flux 1 USO Dev 12B", + "architecture": "flux_dev_uso", + "description": "FLUX.1 USO Dev is a model that can Edit Images with a specialization in Style Transfers (up to two).", + "modules": [ ["https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-dev-USO_projector_bf16.safetensors"]], + "URLs": "flux", + "loras": ["https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-dev-USO_dit_lora_bf16.safetensors"] + }, + "prompt": "the man is wearing a hat", + "embedded_guidance_scale": 4, + "resolution": "1024x1024", + "batch_size": 1 +} + + \ No newline at end of file diff --git a/wan-gp-defaults.json/flux_krea.json b/wan-gp-defaults.json/flux_krea.json new file mode 100644 index 0000000000000000000000000000000000000000..642278d294b4cab7766d16f00932c48a30181960 --- /dev/null +++ b/wan-gp-defaults.json/flux_krea.json @@ -0,0 +1,15 @@ +{ + "model": { + "name": "Flux 1 Dev Krea 12B", + "architecture": "flux", + "description": "Cutting-edge output quality, with a focus on aesthetic photography..", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-krea-dev_bf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-krea-dev_quanto_bf16_int8.safetensors" + ], + "image_outputs": true + }, + "prompt": "draw a hat", + "resolution": "1280x720", + "batch_size": 1 +} \ No newline at end of file diff --git a/wan-gp-defaults.json/flux_schnell.json b/wan-gp-defaults.json/flux_schnell.json new file mode 100644 index 0000000000000000000000000000000000000000..ac7b98351cd7b1e9d261755afc9e5e88d1ba8f8f --- /dev/null +++ b/wan-gp-defaults.json/flux_schnell.json @@ -0,0 +1,16 @@ +{ + "model": { + "name": "Flux 1 Schnell 12B", + "architecture": "flux_schnell", + "description": "FLUX.1 Schnell is a 12 billion parameter rectified flow transformer capable of generating images from text descriptions. As a distilled model it requires fewer denoising steps.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-schnell_bf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-schnell_quanto_bf16_int8.safetensors" + ], + "image_outputs": true + }, + "prompt": "draw a hat", + "resolution": "1280x720", + "num_inference_steps": 10, + "batch_size": 1 +} \ No newline at end of file diff --git a/wan-gp-defaults.json/flux_srpo.json b/wan-gp-defaults.json/flux_srpo.json new file mode 100644 index 0000000000000000000000000000000000000000..1e1a2a94f26fd674e309afeb17e4a31bd71295c5 --- /dev/null +++ b/wan-gp-defaults.json/flux_srpo.json @@ -0,0 +1,14 @@ +{ + "model": { + "name": "Flux 1 Dev SRPO 12B", + "architecture": "flux", + "description": "By fine-tuning the FLUX.1.dev model with optimized denoising and online reward adjustment, SRPO improves its human-evaluated realism and aesthetic quality by over 3x.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-srpo-dev_bf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Flux/resolve/main/flux1-srpo-dev_quanto_bf16_int8.safetensors" + ] + }, + "prompt": "draw a hat", + "resolution": "1024x1024", + "batch_size": 1 +} \ No newline at end of file diff --git a/wan-gp-defaults.json/flux_srpo_uso.json b/wan-gp-defaults.json/flux_srpo_uso.json new file mode 100644 index 0000000000000000000000000000000000000000..acd8831f546c02ebdc45e7052bd0db3cd31637f2 --- /dev/null +++ b/wan-gp-defaults.json/flux_srpo_uso.json @@ -0,0 +1,16 @@ +{ + "model": { + "name": "Flux 1 USO SRPO 12B", + "architecture": "flux_dev_uso", + "description": "FLUX.1 USO SRPO is a model that can Edit Images with a specialization in Style Transfers (up to two). It leverages the improved Image quality brought by the SRPO process", + "modules": [ "flux_dev_uso"], + "URLs": "flux_srpo", + "loras": "flux_dev_uso" + }, + "prompt": "the man is wearing a hat", + "embedded_guidance_scale": 4, + "resolution": "1024x1024", + "batch_size": 1 +} + + \ No newline at end of file diff --git a/wan-gp-defaults.json/fun_inp.json b/wan-gp-defaults.json/fun_inp.json new file mode 100644 index 0000000000000000000000000000000000000000..ac8dfb49ffe9c77d7e4d5e70785b933a9bc4ee7d --- /dev/null +++ b/wan-gp-defaults.json/fun_inp.json @@ -0,0 +1,13 @@ +{ + "model": + { + "name": "Fun InP image2video 14B", + "architecture" : "fun_inp", + "description": "The Fun model is an alternative image 2 video that supports out the box End Image fixing (contrary to the original Wan image 2 video model).", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_Fun_InP_14B_bf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_Fun_InP_14B_quanto_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_Fun_InP_14B_quanto_fp16_int8.safetensors" + ] + } +} diff --git a/wan-gp-defaults.json/fun_inp_1.3B.json b/wan-gp-defaults.json/fun_inp_1.3B.json new file mode 100644 index 0000000000000000000000000000000000000000..dd4c22c6eb2e1da676026f166f84f2e6c494d75e --- /dev/null +++ b/wan-gp-defaults.json/fun_inp_1.3B.json @@ -0,0 +1,11 @@ +{ + "model": + { + "name": "Fun InP image2video 1.3B", + "architecture" : "fun_inp_1.3B", + "description": "The Fun model is an alternative image 2 video that supports out the box End Image fixing (contrary to the original Wan image 2 video model). The 1.3B adds also image 2 to video capability to the 1.3B model.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_Fun_InP_1.3B_bf16.safetensors" + ] + } +} \ No newline at end of file diff --git a/wan-gp-defaults.json/hunyuan.json b/wan-gp-defaults.json/hunyuan.json new file mode 100644 index 0000000000000000000000000000000000000000..a28676427468ae90e617c9c753573c41e4eb4929 --- /dev/null +++ b/wan-gp-defaults.json/hunyuan.json @@ -0,0 +1,12 @@ +{ + "model": + { + "name": "Hunyuan Video Text2video 720p 13B", + "architecture" : "hunyuan", + "description": "Probably the best text 2 video model available.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/hunyuan_video_720_bf16.safetensors", + "https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/hunyuan_video_720_quanto_int8.safetensors" + ] + } +} \ No newline at end of file diff --git a/wan-gp-defaults.json/hunyuan_avatar.json b/wan-gp-defaults.json/hunyuan_avatar.json new file mode 100644 index 0000000000000000000000000000000000000000..54120c00bffbb54bb383b0de842ed8ccaabfa114 --- /dev/null +++ b/wan-gp-defaults.json/hunyuan_avatar.json @@ -0,0 +1,12 @@ +{ + "model": + { + "name": "Hunyuan Video Avatar 720p 13B", + "architecture" : "hunyuan_avatar", + "description": "With the Hunyuan Video Avatar model you can animate a person based on the content of an audio input. Please note that the video generator works by processing 128 frames segment at a time (even if you ask less). The good news is that it will concatenate multiple segments for long video generation (max 3 segments recommended as the quality will get worse).", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/hunyuan_video_avatar_720_bf16.safetensors", + "https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/hunyuan_video_avatar_720_quanto_bf16_int8.safetensors" + ] + } +} \ No newline at end of file diff --git a/wan-gp-defaults.json/hunyuan_custom.json b/wan-gp-defaults.json/hunyuan_custom.json new file mode 100644 index 0000000000000000000000000000000000000000..ffaa8196c1fb7302b15491ff220ff4fb00dc9d8e --- /dev/null +++ b/wan-gp-defaults.json/hunyuan_custom.json @@ -0,0 +1,12 @@ +{ + "model": + { + "name": "Hunyuan Video Custom 720p 13B", + "architecture" : "hunyuan_custom", + "description": "The Hunyuan Video Custom model is probably the best model to transfer people (only people for the moment) as it is quite good to keep their identity. However it is slow as to get good results, you need to generate 720p videos with 30 steps.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/hunyuan_video_custom_720_bf16.safetensors", + "https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/hunyuan_video_custom_720_quanto_bf16_int8.safetensors" + ] + } +} \ No newline at end of file diff --git a/wan-gp-defaults.json/hunyuan_custom_audio.json b/wan-gp-defaults.json/hunyuan_custom_audio.json new file mode 100644 index 0000000000000000000000000000000000000000..7026b81e22566d60ec03d4b845e3c50dc212887b --- /dev/null +++ b/wan-gp-defaults.json/hunyuan_custom_audio.json @@ -0,0 +1,12 @@ +{ + "model": + { + "name": "Hunyuan Video Custom Audio 720p 13B", + "architecture" : "hunyuan_custom_audio", + "description": "The Hunyuan Video Custom Audio model can be used to generate scenes of a person speaking given a Reference Image and a Recorded Voice or Song. The reference image is not a start image and therefore one can represent the person in a different context.The video length can be anything up to 10s. It is also quite good to generate no sound Video based on a person.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/hunyuan_video_custom_audio_720_bf16.safetensors", + "https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/hunyuan_video_custom_audio_720_quanto_bf16_int8.safetensors" + ] + } +} \ No newline at end of file diff --git a/wan-gp-defaults.json/hunyuan_custom_edit.json b/wan-gp-defaults.json/hunyuan_custom_edit.json new file mode 100644 index 0000000000000000000000000000000000000000..477b3012a7d1e20b7ea5584b3a4c55bf3fe89fc4 --- /dev/null +++ b/wan-gp-defaults.json/hunyuan_custom_edit.json @@ -0,0 +1,12 @@ +{ + "model": + { + "name": "Hunyuan Video Custom Edit 720p 13B", + "architecture" : "hunyuan_custom_edit", + "description": "The Hunyuan Video Custom Edit model can be used to do Video inpainting on a person (add accessories or completely replace the person). You will need in any case to define a Video Mask which will indicate which area of the Video should be edited.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/hunyuan_video_custom_edit_720_bf16.safetensors", + "https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/hunyuan_video_custom_edit_720_quanto_bf16_int8.safetensors" + ] + } +} \ No newline at end of file diff --git a/wan-gp-defaults.json/hunyuan_i2v.json b/wan-gp-defaults.json/hunyuan_i2v.json new file mode 100644 index 0000000000000000000000000000000000000000..d15cd83667ee4eb9a4874afbb85971c6a8258f23 --- /dev/null +++ b/wan-gp-defaults.json/hunyuan_i2v.json @@ -0,0 +1,12 @@ +{ + "model": + { + "name": "Hunyuan Video Image2video 720p 13B", + "architecture" : "hunyuan_i2v", + "description": "A good looking image 2 video model, but not so good in prompt adherence.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/hunyuan_video_i2v_720_bf16v2.safetensors", + "https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/hunyuan_video_i2v_720_quanto_int8v2.safetensors" + ] + } +} \ No newline at end of file diff --git a/wan-gp-defaults.json/hunyuan_t2v_accvideo.json b/wan-gp-defaults.json/hunyuan_t2v_accvideo.json new file mode 100644 index 0000000000000000000000000000000000000000..51ad674f60a5ba9ea813d5766777e2939823e627 --- /dev/null +++ b/wan-gp-defaults.json/hunyuan_t2v_accvideo.json @@ -0,0 +1,30 @@ +{ + "model": { + "name": "Hunyuan Video Text2video 720p AccVideo 13B", + "architecture": "hunyuan", + "description": " AccVideo is a novel efficient distillation method to accelerate video diffusion models with synthetic datset. Our method is 8.5x faster than HunyuanVideo.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/accvideo_hunyuan_video_720_quanto_int8.safetensors" + ], + "preload_URLs": [ + ], + "auto_quantize": true + }, + "negative_prompt": "", + "resolution": "832x480", + "video_length": 81, + "seed": 42, + "num_inference_steps": 5, + "flow_shift": 7, + "embedded_guidance_scale": 6, + "repeat_generation": 1, + "loras_multipliers": "", + "temporal_upsampling": "", + "spatial_upsampling": "", + "RIFLEx_setting": 0, + "slg_start_perc": 10, + "slg_end_perc": 90, + "prompt_enhancer": "", + "activated_loras": [ + ] +} \ No newline at end of file diff --git a/wan-gp-defaults.json/hunyuan_t2v_fast.json b/wan-gp-defaults.json/hunyuan_t2v_fast.json new file mode 100644 index 0000000000000000000000000000000000000000..13c4c956320186953a8edbb9b2857915eb4dc344 --- /dev/null +++ b/wan-gp-defaults.json/hunyuan_t2v_fast.json @@ -0,0 +1,32 @@ +{ + "model": { + "name": "Hunyuan Video Text2video 720p FastHunyuan 13B", + "architecture": "hunyuan", + "description": "Fast Hunyuan is an accelerated HunyuanVideo model. It can sample high quality videos with 6 diffusion steps.", + "settings_dir": [ "" ], + "URLs": [ + "https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/fast_hunyuan_video_720_quanto_int8.safetensors" + ], + "preload_URLs": [ + "https://huggingface.co/DeepBeepMeep/HunyuanVideo/resolve/main/fast_hunyuan_video_720_quanto_int8_map.json" + ], + "auto_quantize": true + }, + "negative_prompt": "", + "resolution": "832x480", + "video_length": 81, + "seed": 42, + "num_inference_steps": 6, + "flow_shift": 17, + "embedded_guidance_scale": 6, + "repeat_generation": 1, + "loras_multipliers": "", + "temporal_upsampling": "", + "spatial_upsampling": "", + "RIFLEx_setting": 0, + "slg_start_perc": 10, + "slg_end_perc": 90, + "prompt_enhancer": "", + "activated_loras": [ + ] +} \ No newline at end of file diff --git a/wan-gp-defaults.json/i2v.json b/wan-gp-defaults.json/i2v.json new file mode 100644 index 0000000000000000000000000000000000000000..f9592c0851994c3260c5ad0b22737597387df80f --- /dev/null +++ b/wan-gp-defaults.json/i2v.json @@ -0,0 +1,13 @@ +{ + "model": + { + "name": "Wan2.1 Image2video 480p 14B", + "architecture" : "i2v", + "description": "The standard Wan Image 2 Video specialized to generate 480p images. It also offers Start and End Image support (End Image is not supported in the original model but seems to work well)", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_image2video_480p_14B_mbf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_image2video_480p_14B_quanto_mbf16_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_image2video_480p_14B_quanto_mfp16_int8.safetensors" + ] + } +} \ No newline at end of file diff --git a/wan-gp-defaults.json/i2v_2_2.json b/wan-gp-defaults.json/i2v_2_2.json new file mode 100644 index 0000000000000000000000000000000000000000..1e222f477143c03c34d2d89fed590def54f81e0f --- /dev/null +++ b/wan-gp-defaults.json/i2v_2_2.json @@ -0,0 +1,25 @@ +{ + "model": + { + "name": "Wan2.2 Image2video 14B", + "architecture" : "i2v_2_2", + "description": "Wan 2.2 Image 2 Video model. Contrary to the Wan Image2video 2.1 this model is structurally close to the t2v model. You will need consequently to store Loras for this model in the t2v Lora Folder.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_image2video_14B_high_mbf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_image2video_14B_high_quanto_mbf16_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_image2video_14B_high_quanto_mfp16_int8.safetensors" + ], + "URLs2": [ + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_image2video_14B_low_mbf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_image2video_14B_low_quanto_mbf16_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_image2video_14B_low_quanto_mfp16_int8.safetensors" + ], + "group": "wan2_2" + }, + "guidance_phases": 2, + "switch_threshold" : 900, + "guidance_scale" : 3.5, + "guidance2_scale" : 3.5, + "flow_shift" : 5 + +} \ No newline at end of file diff --git a/wan-gp-defaults.json/i2v_2_2_multitalk.json b/wan-gp-defaults.json/i2v_2_2_multitalk.json new file mode 100644 index 0000000000000000000000000000000000000000..212562887b8675322226af24fa83c289f68acc67 --- /dev/null +++ b/wan-gp-defaults.json/i2v_2_2_multitalk.json @@ -0,0 +1,18 @@ +{ + "model": + { + "name": "Wan2.2 Multitalk 14B", + "architecture" : "i2v_2_2_multitalk", + "description": "The Multitalk module of Wan 2.1 has been combined with the Wan 2.2 image 2 video. It lets you have up to two people have a conversation.", + "modules": ["multitalk"], + "URLs": "i2v_2_2", + "URLs2": "i2v_2_2", + "group": "wan2_2", + "visible": false + }, + "switch_threshold" : 900, + "guidance_scale" : 3.5, + "guidance2_scale" : 3.5, + "flow_shift" : 5 + +} \ No newline at end of file diff --git a/wan-gp-defaults.json/i2v_720p.json b/wan-gp-defaults.json/i2v_720p.json new file mode 100644 index 0000000000000000000000000000000000000000..9ccee6b0fe1e5cc046388607d688343ad8293cd0 --- /dev/null +++ b/wan-gp-defaults.json/i2v_720p.json @@ -0,0 +1,14 @@ +{ + "model": + { + "name": "Wan2.1 Image2video 720p 14B", + "architecture" : "i2v", + "description": "The standard Wan Image 2 Video specialized to generate 720p images. It also offers Start and End Image support (End Image is not supported in the original model but seems to work well).", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_image2video_720p_14B_mbf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_image2video_720p_14B_quanto_mbf16_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_image2video_720p_14B_quanto_mfp16_int8.safetensors" + ] + }, + "resolution": "1280x720" +} \ No newline at end of file diff --git a/wan-gp-defaults.json/i2v_fusionix.json b/wan-gp-defaults.json/i2v_fusionix.json new file mode 100644 index 0000000000000000000000000000000000000000..d4939bfbcab4d1f83713964fd824b52b99976800 --- /dev/null +++ b/wan-gp-defaults.json/i2v_fusionix.json @@ -0,0 +1,11 @@ +{ + "model": + { + "name": "Wan2.1 Image2video 480p FusioniX 14B", + "architecture" : "i2v", + "description": "A powerful merged image-to-video model based on the original WAN 2.1 I2V model, enhanced using multiple open-source components and LoRAs to boost motion realism, temporal consistency, and expressive detail.", + "URLs": "i2v", + "settings_dir": [ "" ], + "loras": ["https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/loras_accelerators/Wan2.1_I2V_14B_FusionX_LoRA.safetensors"] + } +} \ No newline at end of file diff --git a/wan-gp-defaults.json/infinitetalk.json b/wan-gp-defaults.json/infinitetalk.json new file mode 100644 index 0000000000000000000000000000000000000000..441be8cfe87e0daf6f173860877522774ad08ac6 --- /dev/null +++ b/wan-gp-defaults.json/infinitetalk.json @@ -0,0 +1,16 @@ +{ + "model": { + "name": "Infinitetalk Single Speaker 480p 14B", + "architecture": "infinitetalk", + "modules": [ + [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_infinitetalk_single_14B_mbf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_infinitetalk_single_14B_quanto_mbf16_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_infinitetalk_single_14B_quanto_mfp16_int8.safetensors" + ] + ], + "description": "The Infinitetalk model is an improved version of Multitalk that supports very long videos. This is the single speaker version. Sliding Window size must be 81 frames to get smooth transitions between shots.", + "one_speaker_only": true, + "URLs": "i2v" + } +} \ No newline at end of file diff --git a/wan-gp-defaults.json/infinitetalk_multi.json b/wan-gp-defaults.json/infinitetalk_multi.json new file mode 100644 index 0000000000000000000000000000000000000000..260a3048b18c78776fe7be110dbb3f52e6966523 --- /dev/null +++ b/wan-gp-defaults.json/infinitetalk_multi.json @@ -0,0 +1,16 @@ +{ + "model": { + "name": "Infinitetalk Multi Speakers 480p 14B", + "architecture": "infinitetalk", + "modules": [ + [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_infinitetalk_multi_14B_mbf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_infinitetalk_multi_14B_quanto_mfp16_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_infinitetalk_multi_14B_quanto_mbf16_int8.safetensors" + ] + ], + "description": "The Infinitetalk model is an improved version of Multitalk that supports very long videos. This is the multi speakers version.Sliding Window size must be 81 frames to get smooth transitions between shots", + "multi_speakers_only": true, + "URLs": "i2v" + } +} \ No newline at end of file diff --git a/wan-gp-defaults.json/ltxv_13B.json b/wan-gp-defaults.json/ltxv_13B.json new file mode 100644 index 0000000000000000000000000000000000000000..09168d59e6462739d4f1744651d73d7d66240cfe --- /dev/null +++ b/wan-gp-defaults.json/ltxv_13B.json @@ -0,0 +1,19 @@ +{ + "model": + { + "name": "LTX Video 0.9.8 13B", + "architecture" : "ltxv_13B", + "description": "LTX Video is a fast model that can be used to generate very very long videos (up to 1800 frames !).It is recommended to keep the number of steps to 30 or you will need to update the file 'ltxv_video/configs/ltxv-13b-0.9.8-dev.yaml'.The LTX Video model expects very long prompts, so don't hesitate to use the Prompt Enhancer.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/LTX_Video/resolve/main/ltxv_0.9.8_13B_dev_bf16.safetensors", + "https://huggingface.co/DeepBeepMeep/LTX_Video/resolve/main/ltxv_0.9.8_13B_dev_quanto_bf16_int8.safetensors" + ], + "preload_URLs" : [ + "https://huggingface.co/DeepBeepMeep/LTX_Video/resolve/main/ltxv-097-ic-lora-pose-control-diffusers.safetensors", + "https://huggingface.co/DeepBeepMeep/LTX_Video/resolve/main/ltxv-097-ic-lora-depth-control-diffusers.safetensors", + "https://huggingface.co/DeepBeepMeep/LTX_Video/resolve/main/ltxv-097-ic-lora-canny-control-diffusers.safetensors" + ], + "LTXV_config": "models/ltx_video/configs/ltxv-13b-0.9.8-dev.yaml" + }, + "num_inference_steps": 30 +} diff --git a/wan-gp-defaults.json/ltxv_distilled.json b/wan-gp-defaults.json/ltxv_distilled.json new file mode 100644 index 0000000000000000000000000000000000000000..cb54338c71bcb99efee8501b3e6fdf0433972060 --- /dev/null +++ b/wan-gp-defaults.json/ltxv_distilled.json @@ -0,0 +1,15 @@ +{ + "model": + { + "name": "LTX Video 0.9.8 Distilled 13B", + "architecture" : "ltxv_13B", + "description": "LTX Video is a fast model that can be used to generate very long videos (up to 1800 frames !).This distilled version is a very fast version and retains a high level of quality. The LTX Video model expects very long prompts, so don't hesitate to use the Prompt Enhancer.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/LTX_Video/resolve/main/ltxv_0.9.8_13B_distilled_bf16.safetensors", + "https://huggingface.co/DeepBeepMeep/LTX_Video/resolve/main/ltxv_0.9.8_13B_distilled_quanto_bf16_int8.safetensors" + ], + "preload_URLs" : "ltxv_13B", + "LTXV_config": "models/ltx_video/configs/ltxv-13b-0.9.8-distilled.yaml" + }, + "num_inference_steps": 6 +} diff --git a/wan-gp-defaults.json/lucy_edit.json b/wan-gp-defaults.json/lucy_edit.json new file mode 100644 index 0000000000000000000000000000000000000000..955175618c0b49398db6036dc0b520748e05e76a --- /dev/null +++ b/wan-gp-defaults.json/lucy_edit.json @@ -0,0 +1,20 @@ +{ + "model": { + "name": "Wan2.2 Lucy Edit 5B", + "architecture": "lucy_edit", + "description": "Lucy Edit is a video editing model that performs instruction-guided edits on videos using free-text prompts. It supports a variety of edits, such as clothing & accessory changes, character changes, object insertions, and scene replacements while preserving the motion and composition perfectly.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_lucy_edit_mbf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_lucy_edit_quanto_mbf16_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_lucy_edit_quanto_mfp16_int8.safetensors" + ], + "settings_dir": "ti2v_2_2", + "group": "wan2_2" + }, + "prompt": "change the clothes to red", + "video_length": 81, + "guidance_scale": 5, + "flow_shift": 5, + "num_inference_steps": 30, + "resolution": "1280x720" +} \ No newline at end of file diff --git a/wan-gp-defaults.json/lucy_edit_fastwan.json b/wan-gp-defaults.json/lucy_edit_fastwan.json new file mode 100644 index 0000000000000000000000000000000000000000..48ac958b702f45ef0137dab4de6e3a1557a818da --- /dev/null +++ b/wan-gp-defaults.json/lucy_edit_fastwan.json @@ -0,0 +1,17 @@ +{ + "model": { + "name": "Wan2.2 Lucy Edit FastWan 5B", + "architecture": "lucy_edit", + "description": "Lucy Edit is a video editing model that performs instruction-guided edits on videos using free-text prompts. It supports a variety of edits, such as clothing & accessory changes, character changes, object insertions, and scene replacements while preserving the motion and composition perfectly. This is the FastWan version for faster generation.", + "URLs": "lucy_edit", + "group": "wan2_2", + "settings_dir": [ "" ], + "loras": "ti2v_2_2_fastwan" + }, + "prompt": "change the clothes to red", + "video_length": 81, + "guidance_scale": 1, + "flow_shift": 3, + "num_inference_steps": 5, + "resolution": "1280x720" +} \ No newline at end of file diff --git a/wan-gp-defaults.json/lynx.json b/wan-gp-defaults.json/lynx.json new file mode 100644 index 0000000000000000000000000000000000000000..7de06b0ebdd6c768250831b2fa313133da9fd08b --- /dev/null +++ b/wan-gp-defaults.json/lynx.json @@ -0,0 +1,18 @@ +{ + "model": { + "name": "Wan2.1 Lynx 14B", + "modules": [ + [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_lynx_full_module_14B_bf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_lynx_full_module_14B_quanto_bf16_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_lynx_full_module_14B_quanto_fp16_int8.safetensors" + ] + ], + "architecture": "lynx", + "description": "The Lynx ControlNet offers State of the Art Identity Preservation. You need to provide a Reference Image which is a close up of a person face to transfer this person in the Video.", + "URLs": "t2v", + "preload_URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_lynx_full_arc_resampler.safetensors" + ] + } +} \ No newline at end of file diff --git a/wan-gp-defaults.json/moviigen.json b/wan-gp-defaults.json/moviigen.json new file mode 100644 index 0000000000000000000000000000000000000000..ea049594e3936ac1b082178c3bcfffc543f9ea37 --- /dev/null +++ b/wan-gp-defaults.json/moviigen.json @@ -0,0 +1,16 @@ +{ + "model": + { + "name": "MoviiGen 1080p 14B", + "architecture" : "t2v", + "description": "MoviiGen 1.1, a cutting-edge video generation model that excels in cinematic aesthetics and visual quality. Use it to generate videos in 720p or 1080p in the 21:9 ratio.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_moviigen1.1_14B_mbf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_moviigen1.1_14B_quanto_mbf16_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_moviigen1.1_14B_quanto_mfp16_int8.safetensors" + ], + "auto_quantize": true + }, + "resolution": "1280x720", + "video_length": 81 +} \ No newline at end of file diff --git a/wan-gp-defaults.json/multitalk.json b/wan-gp-defaults.json/multitalk.json new file mode 100644 index 0000000000000000000000000000000000000000..80197fa11049157a08899c56450bf98f4f43160f --- /dev/null +++ b/wan-gp-defaults.json/multitalk.json @@ -0,0 +1,15 @@ +{ + "model": + { + "name": "Multitalk 480p 14B", + "architecture" : "multitalk", + "modules": [ + ["https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_multitalk_14B_mbf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_multitalk_14B_quanto_mbf16_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_multitalk_14B_quanto_mfp16_int8.safetensors"] + ], + "description": "The Multitalk model corresponds to the original Wan image 2 video model combined with the Multitalk module. It lets you have up to two people have a conversation.", + "URLs": "i2v", + "teacache_coefficients" : [-3.02331670e+02, 2.23948934e+02, -5.25463970e+01, 5.87348440e+00, -2.01973289e-01] + } +} \ No newline at end of file diff --git a/wan-gp-defaults.json/multitalk_720p.json b/wan-gp-defaults.json/multitalk_720p.json new file mode 100644 index 0000000000000000000000000000000000000000..5ab3383dd7bc6b10af7f7b1e4ffb7a4c9e774b13 --- /dev/null +++ b/wan-gp-defaults.json/multitalk_720p.json @@ -0,0 +1,13 @@ +{ + "model": + { + "name": "Multitalk 720p 14B", + "architecture" : "multitalk", + "modules": ["multitalk"], + "description": "The Multitalk model corresponds to the original Wan image 2 video 720p model combined with the Multitalk module. It lets you have up to two people have a conversation.", + "URLs": "i2v_720p", + "teacache_coefficients" : [-114.36346466, 65.26524496, -18.82220707, 4.91518089, -0.23412683], + "auto_quantize": true + }, + "resolution": "1280x720" +} diff --git a/wan-gp-defaults.json/phantom_1.3B.json b/wan-gp-defaults.json/phantom_1.3B.json new file mode 100644 index 0000000000000000000000000000000000000000..076a1b584c03bec1cc4cfe45359ab93b0c46e56a --- /dev/null +++ b/wan-gp-defaults.json/phantom_1.3B.json @@ -0,0 +1,11 @@ +{ + "model": + { + "name": "Phantom 1.3B", + "architecture" : "phantom_1.3B", + "description": "The Phantom model is specialized in transferring people or objects of your choice into a generated Video. It produces very nice results when used at 720p.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2_1_phantom_1.3B_mbf16.safetensors" + ] + } +} \ No newline at end of file diff --git a/wan-gp-defaults.json/phantom_14B.json b/wan-gp-defaults.json/phantom_14B.json new file mode 100644 index 0000000000000000000000000000000000000000..002b66d1056313787a32295be2aec98dbd37c3b7 --- /dev/null +++ b/wan-gp-defaults.json/phantom_14B.json @@ -0,0 +1,13 @@ +{ + "model": + { + "name": "Phantom 14B", + "architecture" : "phantom_14B", + "description": "The Phantom model is specialized in transferring people or objects of your choice into a generated Video. It produces very nice results when used at 720p.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_phantom_14B_mbf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_phantom_14B_quanto_mbf16_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_phantom_14B_quanto_mfp16_int8.safetensors" + ] + } +} \ No newline at end of file diff --git a/wan-gp-defaults.json/qwen_image_20B.json b/wan-gp-defaults.json/qwen_image_20B.json new file mode 100644 index 0000000000000000000000000000000000000000..8fe802bb2c8ec525b96bee4b83dc3d66ecc46440 --- /dev/null +++ b/wan-gp-defaults.json/qwen_image_20B.json @@ -0,0 +1,21 @@ +{ + "model": { + "name": "Qwen Image 20B", + "architecture": "qwen_image_20B", + "description": "Qwen Image is generative model that will generate very high quality images. It is one of the few models capable to generate in the image very long texts.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Qwen_image/resolve/main/qwen_image_20B_bf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Qwen_image/resolve/main/qwen_image_20B_quanto_bf16_int8.safetensors" + ], + "xresolutions": [ ["1328x1328 (1:1)", "1328x1328"], + ["1664x928 (16:9)", "1664x928"], + ["928x1664 (9:16)", "928x1664"], + ["1472x1140 (4:3)", "1472x1140"], + ["1140x1472 (3:4)", "1140x1472"]], + "attention": {"<89" : "sdpa"}, + "image_outputs": true + }, + "prompt": "draw a hat", + "resolution": "1280x720", + "batch_size": 1 +} \ No newline at end of file diff --git a/wan-gp-defaults.json/qwen_image_edit_20B.json b/wan-gp-defaults.json/qwen_image_edit_20B.json new file mode 100644 index 0000000000000000000000000000000000000000..0ad3d02c358bf1f6f0e64ffae9ee10f0e75b739f --- /dev/null +++ b/wan-gp-defaults.json/qwen_image_edit_20B.json @@ -0,0 +1,18 @@ +{ + "model": { + "name": "Qwen Image Edit 20B", + "architecture": "qwen_image_edit_20B", + "description": "Qwen Image Edit is a generative model that can generate very high quality images with long texts in it. Best results will be at 720p. Use it to edit a Subject or combine multiple Subjects. ", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Qwen_image/resolve/main/qwen_image_edit_20B_bf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Qwen_image/resolve/main/qwen_image_edit_20B_quanto_bf16_int8.safetensors" + ], + "preload_URLs": ["https://huggingface.co/DeepBeepMeep/Qwen_image/resolve/main/qwen_image_edit_inpainting.safetensors"], + "attention": { + "<89": "sdpa" + } + }, + "prompt": "add a hat", + "resolution": "1280x720", + "batch_size": 1 +} \ No newline at end of file diff --git a/wan-gp-defaults.json/qwen_image_edit_plus_20B.json b/wan-gp-defaults.json/qwen_image_edit_plus_20B.json new file mode 100644 index 0000000000000000000000000000000000000000..a8fb4ba943a9cb4a1888edd64328b9fef01c4adc --- /dev/null +++ b/wan-gp-defaults.json/qwen_image_edit_plus_20B.json @@ -0,0 +1,17 @@ +{ + "model": { + "name": "Qwen Image Edit Plus 20B", + "architecture": "qwen_image_edit_plus_20B", + "description": "Qwen Image Edit Plus is a generative model that can generate very high quality images with long texts in it. Best results will be at 720p. This model is optimized to combine multiple Subjects & Objects.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Qwen_image/resolve/main/qwen_image_edit_plus_20B_quanto_bf16_int8.safetensors" + ], + "preload_URLs": "qwen_image_edit_20B", + "attention": { + "<89": "sdpa" + } + }, + "prompt": "add a hat", + "resolution": "1024x1024", + "batch_size": 1 +} \ No newline at end of file diff --git a/wan-gp-defaults.json/recam_1.3B.json b/wan-gp-defaults.json/recam_1.3B.json new file mode 100644 index 0000000000000000000000000000000000000000..5ae142e69c47601cf60090a73620e160a1ea63a0 --- /dev/null +++ b/wan-gp-defaults.json/recam_1.3B.json @@ -0,0 +1,11 @@ +{ + "model": + { + "name": "ReCamMaster 1.3B", + "architecture" : "recam_1.3B", + "description": "The Recam Master in theory should allow you to replay a video by applying a different camera movement. The model supports only video that are at least 81 frames long (any frame beyond will be ignored)", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_recammaster_1.3B_bf16.safetensors" + ] + } +} \ No newline at end of file diff --git a/wan-gp-defaults.json/sky_df_1.3B.json b/wan-gp-defaults.json/sky_df_1.3B.json new file mode 100644 index 0000000000000000000000000000000000000000..d7bc719c06a4ae4fda36e6d68f4dff69c398cea6 --- /dev/null +++ b/wan-gp-defaults.json/sky_df_1.3B.json @@ -0,0 +1,11 @@ +{ + "model": + { + "name": "SkyReels2 Diffusion Forcing 1.3B", + "architecture" : "sky_df_1.3B", + "description": "The SkyReels 2 Diffusion Forcing model has been designed to generate very long videos that exceeds the usual 5s limit. You can also use this model to extend any existing video.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/sky_reels2_diffusion_forcing_1.3B_mbf16.safetensors" + ] + } +} \ No newline at end of file diff --git a/wan-gp-defaults.json/sky_df_14B.json b/wan-gp-defaults.json/sky_df_14B.json new file mode 100644 index 0000000000000000000000000000000000000000..8d5958286ef68ca316b438a6b4decb019ce30009 --- /dev/null +++ b/wan-gp-defaults.json/sky_df_14B.json @@ -0,0 +1,13 @@ +{ + "model": + { + "name": "SkyReels2 Diffusion Forcing 540p 14B", + "architecture" : "sky_df_14B", + "description": "The SkyReels 2 Diffusion Forcing model has been designed to generate very long videos that exceeds the usual 5s limit. You can also use this model to extend any existing video.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/sky_reels2_diffusion_forcing_14B_bf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/sky_reels2_diffusion_forcing_14B_quanto_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/sky_reels2_diffusion_forcing_14B_quanto_fp16_int8.safetensors" + ] + } +} \ No newline at end of file diff --git a/wan-gp-defaults.json/sky_df_720p_14B.json b/wan-gp-defaults.json/sky_df_720p_14B.json new file mode 100644 index 0000000000000000000000000000000000000000..f7a5251018608d11de6a3bbff03520b85700a37c --- /dev/null +++ b/wan-gp-defaults.json/sky_df_720p_14B.json @@ -0,0 +1,14 @@ +{ + "model": + { + "name": "SkyReels2 Diffusion Forcing 720p 14B", + "architecture" : "sky_df_14B", + "description": "The SkyReels 2 Diffusion Forcing model has been designed to generate very long videos that exceeds the usual 5s limit. You can also use this model to extend any existing video.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/sky_reels2_diffusion_forcing_720p_14B_mbf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/sky_reels2_diffusion_forcing_720p_14B_quanto_mbf16_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/sky_reels2_diffusion_forcing_720p_14B_quanto_mfp16_int8.safetensors" + ] + }, + "resolution": "1280x720" +} \ No newline at end of file diff --git a/wan-gp-defaults.json/standin.json b/wan-gp-defaults.json/standin.json new file mode 100644 index 0000000000000000000000000000000000000000..1a367a320b0b73c0477e7ab2343ef29dadfb38f7 --- /dev/null +++ b/wan-gp-defaults.json/standin.json @@ -0,0 +1,10 @@ +{ + "model": + { + "name": "Wan2.1 Standin 14B", + "modules": [ ["https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/Stand-In_wan2.1_T2V_14B_ver1.0_bf16.safetensors"]], + "architecture" : "standin", + "description": "The original Wan Text 2 Video model combined with the StandIn module to improve Identity Preservation. You need to provide a Reference Image with white background which is a close up of a person face to transfer this person in the Video.", + "URLs": "t2v" + } +} \ No newline at end of file diff --git a/wan-gp-defaults.json/t2v.json b/wan-gp-defaults.json/t2v.json new file mode 100644 index 0000000000000000000000000000000000000000..6b1e3e15e803b6fa55e95d15ef90d52b20928d85 --- /dev/null +++ b/wan-gp-defaults.json/t2v.json @@ -0,0 +1,13 @@ +{ + "model": + { + "name": "Wan2.1 Text2video 14B", + "architecture" : "t2v", + "description": "The original Wan Text 2 Video model. Most other models have been built on top of it", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_text2video_14B_mbf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_text2video_14B_quanto_mbf16_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_text2video_14B_quanto_mfp16_int8.safetensors" + ] + } +} \ No newline at end of file diff --git a/wan-gp-defaults.json/t2v_1.3B.json b/wan-gp-defaults.json/t2v_1.3B.json new file mode 100644 index 0000000000000000000000000000000000000000..a947947235dd52e393d549340c852b040cd717fb --- /dev/null +++ b/wan-gp-defaults.json/t2v_1.3B.json @@ -0,0 +1,11 @@ +{ + "model": + { + "name": "Wan2.1 Text2video 1.3B", + "architecture" : "t2v_1.3B", + "description": "The light version of the original Wan Text 2 Video model. Most other models have been built on top of it", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_text2video_1.3B_mbf16.safetensors" + ] + } +} \ No newline at end of file diff --git a/wan-gp-defaults.json/t2v_2_2.json b/wan-gp-defaults.json/t2v_2_2.json new file mode 100644 index 0000000000000000000000000000000000000000..0a3e79ea5995771208534942c5a5818eaad7cec1 --- /dev/null +++ b/wan-gp-defaults.json/t2v_2_2.json @@ -0,0 +1,25 @@ +{ + "model": + { + "name": "Wan2.2 Text2video 14B", + "architecture" : "t2v_2_2", + "description": "Wan 2.2 Text 2 Video model", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_text2video_14B_high_mbf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_text2video_14B_high_quanto_mbf16_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_text2video_14B_high_quanto_mfp16_int8.safetensors" + ], + "URLs2": [ + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_text2video_14B_low_mbf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_text2video_14B_low_quanto_mbf16_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_text2video_14B_low_quanto_mfp16_int8.safetensors" + ], + "group": "wan2_2" + }, + "guidance_phases": 2, + "switch_threshold" : 875, + "guidance_scale" : 4, + "guidance2_scale" : 3, + "flow_shift" : 12 + +} \ No newline at end of file diff --git a/wan-gp-defaults.json/t2v_fusionix.json b/wan-gp-defaults.json/t2v_fusionix.json new file mode 100644 index 0000000000000000000000000000000000000000..203c603998a7ced85b5bf1974992e3f9c6ffdf6e --- /dev/null +++ b/wan-gp-defaults.json/t2v_fusionix.json @@ -0,0 +1,38 @@ +{ + "model": + { + "name": "Wan2.1 Text2video FusioniX 14B", + "architecture" : "t2v", + "description": "A powerful merged text-to-video model based on the original WAN 2.1 T2V model, enhanced using multiple open-source components and LoRAs to boost motion realism, temporal consistency, and expressive detail.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/Wan14BT2VFusioniX_fp16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/Wan14BT2VFusioniX_quanto_fp16_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/Wan14BT2VFusioniX_quanto_bf16_int8.safetensors" + ], + "auto_quantize": true + }, + "negative_prompt": "", + "prompt": "", + "resolution": "832x480", + "video_length": 81, + "seed": -1, + "num_inference_steps": 8, + "guidance_scale": 1, + "flow_shift": 5, + "embedded_guidance_scale": 6, + "repeat_generation": 1, + "multi_images_gen_type": 0, + "tea_cache_setting": 0, + "tea_cache_start_step_perc": 0, + "loras_multipliers": "", + "temporal_upsampling": "", + "spatial_upsampling": "", + "RIFLEx_setting": 0, + "slg_switch": 0, + "slg_start_perc": 10, + "slg_end_perc": 90, + "cfg_star_switch": 0, + "cfg_zero_step": -1, + "prompt_enhancer": "", + "activated_loras": [] +} \ No newline at end of file diff --git a/wan-gp-defaults.json/t2v_sf.json b/wan-gp-defaults.json/t2v_sf.json new file mode 100644 index 0000000000000000000000000000000000000000..a67c4fcd25f6fb9d514ee49a85fa7f55d0f730ca --- /dev/null +++ b/wan-gp-defaults.json/t2v_sf.json @@ -0,0 +1,17 @@ +{ + "model": { + "name": "Wan2.1 Text2video Lightning 14B", + "architecture": "t2v", + "description": "This model is an advanced text-to-video generation model. The Lightning / SelfForcing process allows the model to generate videos with significantly fewer inference steps (4 or 8 steps) and without classifier-free guidance, substantially reducing video generation time while maintaining high quality outputs.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_StepDistill-CfgDistill_14B_bf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_StepDistill-CfgDistill_14B_quanto_bf16_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_StepDistill-CfgDistill_14B_quanto_fp16_int8.safetensors" + ], + "author": "https://huggingface.co/lightx2v/Wan2.1-T2V-14B-StepDistill-CfgDistill", + "auto_quantize": true + }, + "num_inference_steps": 4, + "guidance_scale": 1, + "flow_shift": 3 +} \ No newline at end of file diff --git a/wan-gp-defaults.json/ti2v_2_2.json b/wan-gp-defaults.json/ti2v_2_2.json new file mode 100644 index 0000000000000000000000000000000000000000..a24fd8b99d55dc5ce48e3a6852645c97b01a76ba --- /dev/null +++ b/wan-gp-defaults.json/ti2v_2_2.json @@ -0,0 +1,18 @@ +{ + "model": { + "name": "Wan2.2 TextImage2video 5B", + "architecture": "ti2v_2_2", + "description": "Wan 2.2 Text 2 Video model 5B", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_text2video_5B_mbf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/wan2.2_text2video_5B_quanto_mbf16_int8.safetensors" + ], + "settings_dir": [ "wan2_2_5B" ], + "group": "wan2_2" + }, + "video_length": 121, + "guidance_scale": 5, + "flow_shift": 5, + "num_inference_steps": 50, + "resolution": "1280x720" +} \ No newline at end of file diff --git a/wan-gp-defaults.json/ti2v_2_2_fastwan.json b/wan-gp-defaults.json/ti2v_2_2_fastwan.json new file mode 100644 index 0000000000000000000000000000000000000000..e4604c8d3bc8ba124dfba85c8a847d3d68251e9c --- /dev/null +++ b/wan-gp-defaults.json/ti2v_2_2_fastwan.json @@ -0,0 +1,17 @@ +{ + "model": { + "name": "Wan2.2 TextImage2video FastWan 5B", + "architecture": "ti2v_2_2", + "description": "FastWan2.2-TI2V-5B-Full-Diffusers is built upon Wan-AI/Wan2.2-TI2V-5B-Diffusers. It supports efficient 3-step inference and produces high-quality videos at 121×704×1280 resolution", + "URLs": "ti2v_2_2", + "settings_dir": [ "" ], + "loras": ["https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/loras_accelerators/Wan2_2_5B_FastWanFullAttn_lora_rank_128_bf16.safetensors"], + "group": "wan2_2" + }, + "prompt" : "Put the person into a clown outfit.", + "video_length": 121, + "guidance_scale": 1, + "flow_shift": 3, + "num_inference_steps": 3, + "resolution": "1280x720" +} \ No newline at end of file diff --git a/wan-gp-defaults.json/vace_1.3B.json b/wan-gp-defaults.json/vace_1.3B.json new file mode 100644 index 0000000000000000000000000000000000000000..0e86659169d6f4f2e777960164d73d88d85f4299 --- /dev/null +++ b/wan-gp-defaults.json/vace_1.3B.json @@ -0,0 +1,12 @@ +{ + "model": + { + "name": "Vace 1.3B", + "architecture" : "vace_1.3B", + "modules": [ + ["https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_Vace_1_3B_module.safetensors"] + ], + "description": "The Vace ControlNet model is a powerful model that allows you to control the content of the generated video based of additional custom data : pose or depth video, images or objects you want to see in the video.", + "URLs": "t2v_1.3B" + } +} \ No newline at end of file diff --git a/wan-gp-defaults.json/vace_14B.json b/wan-gp-defaults.json/vace_14B.json new file mode 100644 index 0000000000000000000000000000000000000000..1967e028caff2d497292beec5926aa0aaa153a6c --- /dev/null +++ b/wan-gp-defaults.json/vace_14B.json @@ -0,0 +1,13 @@ +{ + "model": { + "name": "Vace 14B", + "architecture": "vace_14B", + "modules": [ + ["https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_Vace_14B_module_mbf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_Vace_14B_module_quanto_mbf16_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_Vace_14B_module_quanto_mfp16_int8.safetensors"] + ], + "description": "The Vace ControlNet model is a powerful model that allows you to control the content of the generated video based of additional custom data : pose or depth video, images or objects you want to see in the video.", + "URLs": "t2v" + } +} \ No newline at end of file diff --git a/wan-gp-defaults.json/vace_14B_2_2.json b/wan-gp-defaults.json/vace_14B_2_2.json new file mode 100644 index 0000000000000000000000000000000000000000..1f8a2124b6b29a7b277aca70ebf95a166c38eee8 --- /dev/null +++ b/wan-gp-defaults.json/vace_14B_2_2.json @@ -0,0 +1,17 @@ +{ + "model": { + "name": "Wan2.2 Vace 14B", + "architecture": "vace_14B_2_2", + "modules": [ + "vace_14B" + ], + "description": "There is so far only PARTIAL support of Vace 2.1 which is currently used.", + "URLs": "t2v_2_2", + "URLs2": "t2v_2_2" + }, + "guidance_phases": 2, + "guidance_scale": 1, + "guidance2_scale": 1, + "flow_shift": 2, + "switch_threshold" : 875 +} \ No newline at end of file diff --git a/wan-gp-defaults.json/vace_14B_cocktail.json b/wan-gp-defaults.json/vace_14B_cocktail.json new file mode 100644 index 0000000000000000000000000000000000000000..262e80680f3bc5ccb9183f06da1c8307c38dc5e7 --- /dev/null +++ b/wan-gp-defaults.json/vace_14B_cocktail.json @@ -0,0 +1,22 @@ +{ + "model": { + "name": "Vace Cocktail 14B", + "architecture": "vace_14B", + "modules": [ + "vace_14B" + ], + "description": "This model has been created on the fly using the Wan text 2 video model and the Loras of FusioniX. The weight of the Detail Enhancer Lora has been reduced to improve identity preservation. Copy the model def in the finetune folder to change the Cocktail composition.", + "URLs": "t2v", + "settings_dir": [ "" ], + "loras": [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/loras_accelerators/Wan21_CausVid_14B_T2V_lora_rank32_v2.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/loras_accelerators/DetailEnhancerV1.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/loras_accelerators/Wan21_AccVid_T2V_14B_lora_rank32_fp16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/loras_accelerators/Wan21_T2V_14B_MoviiGen_lora_rank32_fp16.safetensors" + ], + "loras_multipliers": [1, 0.5, 0.5, 0.5] + }, + "num_inference_steps": 10, + "guidance_scale": 1, + "flow_shift": 2 +} \ No newline at end of file diff --git a/wan-gp-defaults.json/vace_14B_cocktail_2_2.json b/wan-gp-defaults.json/vace_14B_cocktail_2_2.json new file mode 100644 index 0000000000000000000000000000000000000000..b658105d5d9828890913f48d0375761514ec3b5b --- /dev/null +++ b/wan-gp-defaults.json/vace_14B_cocktail_2_2.json @@ -0,0 +1,27 @@ +{ + "model": { + "name": "Wan2.2 Vace Experimental Cocktail 14B", + "architecture": "vace_14B_2_2", + "modules": [ + "vace_14B" + ], + "description": "This model has been created on the fly using the Wan text 2.2 video model and the Loras of FusioniX. The weight of the Detail Enhancer Lora has been reduced to improve identity preservation. There is so far only PARTIAL support of Vace 2.1 which is currently used.", + "URLs": "t2v_2_2", + "URLs2": "t2v_2_2", + "loras": [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/loras_accelerators/Wan21_CausVid_14B_T2V_lora_rank32_v2.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/loras_accelerators/DetailEnhancerV1.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/loras_accelerators/Wan21_AccVid_T2V_14B_lora_rank32_fp16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/loras_accelerators/Wan21_T2V_14B_MoviiGen_lora_rank32_fp16.safetensors" + ], + "profiles_dir": [ "" ], + "loras_multipliers": [1, 0.2, 0.5, 0.5], + "group": "wan2_2" + }, + "guidance_phases": 2, + "num_inference_steps": 10, + "guidance_scale": 1, + "guidance2_scale": 1, + "flow_shift": 2, + "switch_threshold" : 875 +} \ No newline at end of file diff --git a/wan-gp-defaults.json/vace_14B_fusionix.json b/wan-gp-defaults.json/vace_14B_fusionix.json new file mode 100644 index 0000000000000000000000000000000000000000..e74b2e951fc7e70c45d275c47188597e16fe0164 --- /dev/null +++ b/wan-gp-defaults.json/vace_14B_fusionix.json @@ -0,0 +1,36 @@ +{ + "model": { + "name": "Vace FusioniX 14B", + "architecture": "vace_14B", + "modules": [ + "vace_14B" + ], + "description": "Vace control model enhanced using multiple open-source components and LoRAs to boost motion realism, temporal consistency, and expressive detail.", + "profiles_dir": [ "" ], + "URLs": "t2v_fusionix" + }, + "negative_prompt": "", + "prompt": "", + "resolution": "832x480", + "video_length": 81, + "seed": -1, + "num_inference_steps": 10, + "guidance_scale": 1, + "flow_shift": 2, + "embedded_guidance_scale": 6, + "repeat_generation": 1, + "multi_images_gen_type": 0, + "tea_cache_setting": 0, + "tea_cache_start_step_perc": 0, + "loras_multipliers": "", + "temporal_upsampling": "", + "spatial_upsampling": "", + "RIFLEx_setting": 0, + "slg_switch": 0, + "slg_start_perc": 10, + "slg_end_perc": 90, + "cfg_star_switch": 0, + "cfg_zero_step": -1, + "prompt_enhancer": "", + "activated_loras": [] +} \ No newline at end of file diff --git a/wan-gp-defaults.json/vace_14B_lightning_3p_2_2.json b/wan-gp-defaults.json/vace_14B_lightning_3p_2_2.json new file mode 100644 index 0000000000000000000000000000000000000000..cea12f1e6650ed7edaa25ff70682b8db76ef6a6a --- /dev/null +++ b/wan-gp-defaults.json/vace_14B_lightning_3p_2_2.json @@ -0,0 +1,29 @@ +{ + "model": { + "name": "Wan2.2 Vace Lightning 3 Phases 14B", + "architecture": "vace_14B_2_2", + "modules": [ + "vace_14B" + ], + "description": "This finetune uses the Lightning 4 steps Loras Accelerator for Wan 2.2 but extend them to 8 steps in order to insert a CFG phase before the 2 accelerated phases with no Guidance. The ultimate goal is reduce the slow motion effect of these Loras Accelerators.", + "URLs": "t2v_2_2", + "URLs2": "t2v_2_2", + "loras": [ + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/loras_accelerators/Wan2.2-Lightning_T2V-v1.1-A14B-4steps-lora_HIGH_fp16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/loras_accelerators/Wan2.2-Lightning_T2V-v1.1-A14B-4steps-lora_LOW_fp16.safetensors" + ], + "loras_multipliers": ["0;1;0", "0;0;1"], + "lock_guidance_phases": true, + "group": "wan2_2" + }, + "num_inference_steps": 8, + "guidance_phases": 3, + "guidance_scale": 3.5, + "guidance2_scale": 1, + "guidance3_scale": 1, + "switch_threshold": 965, + "switch_threshold2": 800, + "model_switch_phase": 2, + "flow_shift": 3, + "sample_solver": "euler" +} \ No newline at end of file diff --git a/wan-gp-defaults.json/vace_14B_sf.json b/wan-gp-defaults.json/vace_14B_sf.json new file mode 100644 index 0000000000000000000000000000000000000000..185f005c599b590790f012152dec17bd411e7b14 --- /dev/null +++ b/wan-gp-defaults.json/vace_14B_sf.json @@ -0,0 +1,41 @@ +{ + "model": { + "name": "Vace Self-Forcing 14B", + "architecture": "vace_14B", + "modules": [ + "vace_14B" + ], + "description": "This model is a combination of Vace and an advanced text-to-video generation model. This approach allows the model to generate videos with significantly fewer inference steps (4 or 8 steps) and without classifier-free guidance, substantially reducing video generation time while maintaining high quality outputs.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_StepDistill-CfgDistill_14B_bf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_StepDistill-CfgDistill_14B_quanto_bf16_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/wan2.1_StepDistill-CfgDistill_14B_quanto_fp16_int8.safetensors" + ], + "author": "https://huggingface.co/lightx2v/Wan2.1-T2V-14B-StepDistill-CfgDistill", + "auto_quantize": true + }, + "negative_prompt": "", + "prompt": "", + "resolution": "832x480", + "video_length": 81, + "seed": -1, + "num_inference_steps": 4, + "guidance_scale": 1, + "flow_shift": 3, + "embedded_guidance_scale": 6, + "repeat_generation": 1, + "multi_images_gen_type": 0, + "tea_cache_setting": 0, + "tea_cache_start_step_perc": 0, + "loras_multipliers": "", + "temporal_upsampling": "", + "spatial_upsampling": "", + "RIFLEx_setting": 0, + "slg_switch": 0, + "slg_start_perc": 10, + "slg_end_perc": 90, + "cfg_star_switch": 0, + "cfg_zero_step": -1, + "prompt_enhancer": "", + "activated_loras": [] +} \ No newline at end of file diff --git a/wan-gp-defaults.json/vace_fun_14B_2_2.json b/wan-gp-defaults.json/vace_fun_14B_2_2.json new file mode 100644 index 0000000000000000000000000000000000000000..135c9a94ed25ccd0f149b76a7c9e1b9436f381ae --- /dev/null +++ b/wan-gp-defaults.json/vace_fun_14B_2_2.json @@ -0,0 +1,24 @@ +{ + "model": { + "name": "Wan2.2 Vace Fun 14B", + "architecture": "vace_14B_2_2", + "description": "This is the Fun Vace 2.2 version, that is not the official Vace 2.2", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/Wan2_2_Fun_VACE_A14B_HIGH_mbf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/Wan2_2_Fun_VACE_A14B_HIGH_quanto_mbf16_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/Wan2_2_Fun_VACE_A14B_HIGH_quanto_mfp16_int8.safetensors" + ], + "URLs2": [ + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/Wan2_2_Fun_VACE_A14B_LOW_mbf16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/Wan2_2_Fun_VACE_A14B_LOW_quanto_mbf16_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.2/resolve/main/Wan2_2_Fun_VACE_A14B_LOW_quanto_mfp16_int8.safetensors" + ], + "group": "wan2_2" + }, + "guidance_phases": 2, + "num_inference_steps": 30, + "guidance_scale": 1, + "guidance2_scale": 1, + "flow_shift": 2, + "switch_threshold": 875 +} \ No newline at end of file diff --git a/wan-gp-defaults.json/vace_fun_14B_cocktail_2_2.json b/wan-gp-defaults.json/vace_fun_14B_cocktail_2_2.json new file mode 100644 index 0000000000000000000000000000000000000000..9f7449bef92eba3801f15703da8810b7d924d3bf --- /dev/null +++ b/wan-gp-defaults.json/vace_fun_14B_cocktail_2_2.json @@ -0,0 +1,29 @@ +{ + "model": { + "name": "Wan2.2 Vace Fun Cocktail 14B", + "architecture": "vace_14B_2_2", + "description": "This model has been created on the fly using the Wan text 2.2 video model and the Loras of FusioniX. The weight of the Detail Enhancer Lora has been reduced to improve identity preservation. This is the Fun Vace 2.2, that is not the official Vace 2.2", + "URLs": "vace_fun_14B_2_2", + "URLs2": "vace_fun_14B_2_2", + "loras": [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/loras_accelerators/Wan21_CausVid_14B_T2V_lora_rank32_v2.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/loras_accelerators/DetailEnhancerV1.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/loras_accelerators/Wan21_AccVid_T2V_14B_lora_rank32_fp16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/loras_accelerators/Wan21_T2V_14B_MoviiGen_lora_rank32_fp16.safetensors" + ], + "loras_multipliers": [ + 1, + 0.2, + 0.5, + 0.5 + ], + "profiles_dir": [""], + "group": "wan2_2" + }, + "guidance_phases": 2, + "num_inference_steps": 10, + "guidance_scale": 1, + "guidance2_scale": 1, + "flow_shift": 2, + "switch_threshold": 875 +} \ No newline at end of file diff --git a/wan-gp-defaults.json/vace_lynx_14B.json b/wan-gp-defaults.json/vace_lynx_14B.json new file mode 100644 index 0000000000000000000000000000000000000000..51fbb1662a00bfdf50406b8dc66deb6f45ecc70e --- /dev/null +++ b/wan-gp-defaults.json/vace_lynx_14B.json @@ -0,0 +1,10 @@ +{ + "model": { + "name": "Vace Lynx 14B", + "architecture": "vace_lynx_14B", + "modules": [ "vace_14B", "lynx"], + "description": "The Vace ControlNet model is a powerful model that allows you to control the content of the generated video based of additional custom data : pose or depth video, images or objects you want to see in the video. The Lynx version is specialized in identity transfer, so the last Image Ref should always contain a close up of the Face of a Person to transfer.", + "URLs": "t2v", + "preload_URLs": "lynx" + } +} \ No newline at end of file diff --git a/wan-gp-defaults.json/vace_multitalk_14B.json b/wan-gp-defaults.json/vace_multitalk_14B.json new file mode 100644 index 0000000000000000000000000000000000000000..60a0bc5dd5a58ec96e640a034aa408445cd325ac --- /dev/null +++ b/wan-gp-defaults.json/vace_multitalk_14B.json @@ -0,0 +1,41 @@ +{ + "model": { + "name": "Vace Multitalk FusioniX 14B", + "architecture": "vace_multitalk_14B", + "modules": [ + "vace_14B", + "multitalk" + ], + "description": "Vace control model enhanced using multiple open-source components and LoRAs to boost motion realism, temporal consistency, and expressive detail. And it that's not sufficient Vace is combined with Multitalk.", + "URLs": [ + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/Wan14BT2VFusioniX_fp16.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/Wan14BT2VFusioniX_quanto_bf16_int8.safetensors", + "https://huggingface.co/DeepBeepMeep/Wan2.1/resolve/main/Wan14BT2VFusioniX_quanto_fp16_int8.safetensors" + ], + "auto_quantize": true + }, + "negative_prompt": "", + "prompt": "", + "resolution": "832x480", + "video_length": 81, + "seed": -1, + "num_inference_steps": 10, + "guidance_scale": 1, + "flow_shift": 5, + "embedded_guidance_scale": 6, + "repeat_generation": 1, + "multi_images_gen_type": 0, + "tea_cache_setting": 0, + "tea_cache_start_step_perc": 0, + "loras_multipliers": "", + "temporal_upsampling": "", + "spatial_upsampling": "", + "RIFLEx_setting": 0, + "slg_switch": 0, + "slg_start_perc": 10, + "slg_end_perc": 90, + "cfg_star_switch": 0, + "cfg_zero_step": -1, + "prompt_enhancer": "", + "activated_loras": [] +} \ No newline at end of file diff --git a/wan-gp-defaults.json/vace_standin_14B.json b/wan-gp-defaults.json/vace_standin_14B.json new file mode 100644 index 0000000000000000000000000000000000000000..922644b82e3c2ce8790d0312ce503338cbe8f7bd --- /dev/null +++ b/wan-gp-defaults.json/vace_standin_14B.json @@ -0,0 +1,9 @@ +{ + "model": { + "name": "Vace Standin 14B", + "architecture": "vace_standin_14B", + "modules": [ "vace_14B", "standin"], + "description": "The Vace ControlNet model is a powerful model that allows you to control the content of the generated video based of additional custom data : pose or depth video, images or objects you want to see in the video. The Standin version is specialized in identity transfer, so the last Image Ref should always contain a close up of the Face of a Person to transfer.", + "URLs": "t2v" + } +} \ No newline at end of file