{"repo_name": "Wan2.1", "file_name": "/Wan2.1/wan/configs/__init__.py", "inference_info": {"prefix_code": "# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.\nimport copy\nimport os\n\nos.environ['TOKENIZERS_PARALLELISM'] = 'false'\n\nfrom .wan_i2v_14B import i2v_14B\nfrom .wan_t2v_1_3B import t2v_1_3B\nfrom .wan_t2v_14B import t2v_14B\n\n# the config of t2i_14B is the same as t2v_14B\nt2i_14B = copy.deepcopy(t2v_14B)\nt2i_14B.__name__ = 'Config: Wan T2I 14B'\n\n# the config of flf2v_14B is the same as i2v_14B\nflf2v_14B = copy.deepcopy(i2v_14B)\nflf2v_14B.__name__ = 'Config: Wan FLF2V 14B'\nflf2v_14B.sample_neg_prompt = \"镜头切换,\" + flf2v_14B.sample_neg_prompt\n\n", "suffix_code": "\n\nSIZE_CONFIGS = {\n '720*1280': (720, 1280),\n '1280*720': (1280, 720),\n '480*832': (480, 832),\n '832*480': (832, 480),\n '1024*1024': (1024, 1024),\n}\n\nMAX_AREA_CONFIGS = {\n '720*1280': 720 * 1280,\n '1280*720': 1280 * 720,\n '480*832': 480 * 832,\n '832*480': 832 * 480,\n}\n\nSUPPORTED_SIZES = {\n 't2v-14B': ('720*1280', '1280*720', '480*832', '832*480'),\n 't2v-1.3B': ('480*832', '832*480'),\n 'i2v-14B': ('720*1280', '1280*720', '480*832', '832*480'),\n 'flf2v-14B': ('720*1280', '1280*720', '480*832', '832*480'),\n 't2i-14B': tuple(SIZE_CONFIGS.keys()),\n 'vace-1.3B': ('480*832', '832*480'),\n 'vace-14B': ('720*1280', '1280*720', '480*832', '832*480')\n}\n", "middle_code": "WAN_CONFIGS = {\n 't2v-14B': t2v_14B,\n 't2v-1.3B': t2v_1_3B,\n 'i2v-14B': i2v_14B,\n 't2i-14B': t2i_14B,\n 'flf2v-14B': flf2v_14B,\n 'vace-1.3B': t2v_1_3B,\n 'vace-14B': t2v_14B,\n}", "code_description": null, "fill_type": "LINE_TYPE", "language_type": "python", "sub_task_type": "expression"}, "context_code": [["/Wan2.1/wan/configs/wan_i2v_14B.py", "# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.\nimport torch\nfrom easydict import EasyDict\n\nfrom .shared_config import wan_shared_cfg\n\n#------------------------ Wan I2V 14B ------------------------#\n\ni2v_14B = EasyDict(__name__='Config: Wan I2V 14B')\ni2v_14B.update(wan_shared_cfg)\ni2v_14B.sample_neg_prompt = \"镜头晃动,\" + i2v_14B.sample_neg_prompt\n\ni2v_14B.t5_checkpoint = 'models_t5_umt5-xxl-enc-bf16.pth'\ni2v_14B.t5_tokenizer = 'google/umt5-xxl'\n\n# clip\ni2v_14B.clip_model = 'clip_xlm_roberta_vit_h_14'\ni2v_14B.clip_dtype = torch.float16\ni2v_14B.clip_checkpoint = 'models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth'\ni2v_14B.clip_tokenizer = 'xlm-roberta-large'\n\n# vae\ni2v_14B.vae_checkpoint = 'Wan2.1_VAE.pth'\ni2v_14B.vae_stride = (4, 8, 8)\n\n# transformer\ni2v_14B.patch_size = (1, 2, 2)\ni2v_14B.dim = 5120\ni2v_14B.ffn_dim = 13824\ni2v_14B.freq_dim = 256\ni2v_14B.num_heads = 40\ni2v_14B.num_layers = 40\ni2v_14B.window_size = (-1, -1)\ni2v_14B.qk_norm = True\ni2v_14B.cross_attn_norm = True\ni2v_14B.eps = 1e-6\n"], ["/Wan2.1/wan/configs/wan_t2v_1_3B.py", "# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.\nfrom easydict import EasyDict\n\nfrom .shared_config import wan_shared_cfg\n\n#------------------------ Wan T2V 1.3B ------------------------#\n\nt2v_1_3B = EasyDict(__name__='Config: Wan T2V 1.3B')\nt2v_1_3B.update(wan_shared_cfg)\n\n# t5\nt2v_1_3B.t5_checkpoint = 'models_t5_umt5-xxl-enc-bf16.pth'\nt2v_1_3B.t5_tokenizer = 'google/umt5-xxl'\n\n# vae\nt2v_1_3B.vae_checkpoint = 'Wan2.1_VAE.pth'\nt2v_1_3B.vae_stride = (4, 8, 8)\n\n# transformer\nt2v_1_3B.patch_size = (1, 2, 2)\nt2v_1_3B.dim = 1536\nt2v_1_3B.ffn_dim = 8960\nt2v_1_3B.freq_dim = 256\nt2v_1_3B.num_heads = 12\nt2v_1_3B.num_layers = 30\nt2v_1_3B.window_size = (-1, -1)\nt2v_1_3B.qk_norm = True\nt2v_1_3B.cross_attn_norm = True\nt2v_1_3B.eps = 1e-6\n"], ["/Wan2.1/wan/configs/wan_t2v_14B.py", "# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.\nfrom easydict import EasyDict\n\nfrom .shared_config import wan_shared_cfg\n\n#------------------------ Wan T2V 14B ------------------------#\n\nt2v_14B = EasyDict(__name__='Config: Wan T2V 14B')\nt2v_14B.update(wan_shared_cfg)\n\n# t5\nt2v_14B.t5_checkpoint = 'models_t5_umt5-xxl-enc-bf16.pth'\nt2v_14B.t5_tokenizer = 'google/umt5-xxl'\n\n# vae\nt2v_14B.vae_checkpoint = 'Wan2.1_VAE.pth'\nt2v_14B.vae_stride = (4, 8, 8)\n\n# transformer\nt2v_14B.patch_size = (1, 2, 2)\nt2v_14B.dim = 5120\nt2v_14B.ffn_dim = 13824\nt2v_14B.freq_dim = 256\nt2v_14B.num_heads = 40\nt2v_14B.num_layers = 40\nt2v_14B.window_size = (-1, -1)\nt2v_14B.qk_norm = True\nt2v_14B.cross_attn_norm = True\nt2v_14B.eps = 1e-6\n"], ["/Wan2.1/wan/modules/model.py", "# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.\nimport math\n\nimport torch\nimport torch.cuda.amp as amp\nimport torch.nn as nn\nfrom diffusers.configuration_utils import ConfigMixin, register_to_config\nfrom diffusers.models.modeling_utils import ModelMixin\n\nfrom .attention import flash_attention\n\n__all__ = ['WanModel']\n\nT5_CONTEXT_TOKEN_NUMBER = 512\nFIRST_LAST_FRAME_CONTEXT_TOKEN_NUMBER = 257 * 2\n\n\ndef sinusoidal_embedding_1d(dim, position):\n # preprocess\n assert dim % 2 == 0\n half = dim // 2\n position = position.type(torch.float64)\n\n # calculation\n sinusoid = torch.outer(\n position, torch.pow(10000, -torch.arange(half).to(position).div(half)))\n x = torch.cat([torch.cos(sinusoid), torch.sin(sinusoid)], dim=1)\n return x\n\n\n@amp.autocast(enabled=False)\ndef rope_params(max_seq_len, dim, theta=10000):\n assert dim % 2 == 0\n freqs = torch.outer(\n torch.arange(max_seq_len),\n 1.0 / torch.pow(theta,\n torch.arange(0, dim, 2).to(torch.float64).div(dim)))\n freqs = torch.polar(torch.ones_like(freqs), freqs)\n return freqs\n\n\n@amp.autocast(enabled=False)\ndef rope_apply(x, grid_sizes, freqs):\n n, c = x.size(2), x.size(3) // 2\n\n # split freqs\n freqs = freqs.split([c - 2 * (c // 3), c // 3, c // 3], dim=1)\n\n # loop over samples\n output = []\n for i, (f, h, w) in enumerate(grid_sizes.tolist()):\n seq_len = f * h * w\n\n # precompute multipliers\n x_i = torch.view_as_complex(x[i, :seq_len].to(torch.float64).reshape(\n seq_len, n, -1, 2))\n freqs_i = torch.cat([\n freqs[0][:f].view(f, 1, 1, -1).expand(f, h, w, -1),\n freqs[1][:h].view(1, h, 1, -1).expand(f, h, w, -1),\n freqs[2][:w].view(1, 1, w, -1).expand(f, h, w, -1)\n ],\n dim=-1).reshape(seq_len, 1, -1)\n\n # apply rotary embedding\n x_i = torch.view_as_real(x_i * freqs_i).flatten(2)\n x_i = torch.cat([x_i, x[i, seq_len:]])\n\n # append to collection\n output.append(x_i)\n return torch.stack(output).float()\n\n\nclass WanRMSNorm(nn.Module):\n\n def __init__(self, dim, eps=1e-5):\n super().__init__()\n self.dim = dim\n self.eps = eps\n self.weight = nn.Parameter(torch.ones(dim))\n\n def forward(self, x):\n r\"\"\"\n Args:\n x(Tensor): Shape [B, L, C]\n \"\"\"\n return self._norm(x.float()).type_as(x) * self.weight\n\n def _norm(self, x):\n return x * torch.rsqrt(x.pow(2).mean(dim=-1, keepdim=True) + self.eps)\n\n\nclass WanLayerNorm(nn.LayerNorm):\n\n def __init__(self, dim, eps=1e-6, elementwise_affine=False):\n super().__init__(dim, elementwise_affine=elementwise_affine, eps=eps)\n\n def forward(self, x):\n r\"\"\"\n Args:\n x(Tensor): Shape [B, L, C]\n \"\"\"\n return super().forward(x.float()).type_as(x)\n\n\nclass WanSelfAttention(nn.Module):\n\n def __init__(self,\n dim,\n num_heads,\n window_size=(-1, -1),\n qk_norm=True,\n eps=1e-6):\n assert dim % num_heads == 0\n super().__init__()\n self.dim = dim\n self.num_heads = num_heads\n self.head_dim = dim // num_heads\n self.window_size = window_size\n self.qk_norm = qk_norm\n self.eps = eps\n\n # layers\n self.q = nn.Linear(dim, dim)\n self.k = nn.Linear(dim, dim)\n self.v = nn.Linear(dim, dim)\n self.o = nn.Linear(dim, dim)\n self.norm_q = WanRMSNorm(dim, eps=eps) if qk_norm else nn.Identity()\n self.norm_k = WanRMSNorm(dim, eps=eps) if qk_norm else nn.Identity()\n\n def forward(self, x, seq_lens, grid_sizes, freqs):\n r\"\"\"\n Args:\n x(Tensor): Shape [B, L, num_heads, C / num_heads]\n seq_lens(Tensor): Shape [B]\n grid_sizes(Tensor): Shape [B, 3], the second dimension contains (F, H, W)\n freqs(Tensor): Rope freqs, shape [1024, C / num_heads / 2]\n \"\"\"\n b, s, n, d = *x.shape[:2], self.num_heads, self.head_dim\n\n # query, key, value function\n def qkv_fn(x):\n q = self.norm_q(self.q(x)).view(b, s, n, d)\n k = self.norm_k(self.k(x)).view(b, s, n, d)\n v = self.v(x).view(b, s, n, d)\n return q, k, v\n\n q, k, v = qkv_fn(x)\n\n x = flash_attention(\n q=rope_apply(q, grid_sizes, freqs),\n k=rope_apply(k, grid_sizes, freqs),\n v=v,\n k_lens=seq_lens,\n window_size=self.window_size)\n\n # output\n x = x.flatten(2)\n x = self.o(x)\n return x\n\n\nclass WanT2VCrossAttention(WanSelfAttention):\n\n def forward(self, x, context, context_lens):\n r\"\"\"\n Args:\n x(Tensor): Shape [B, L1, C]\n context(Tensor): Shape [B, L2, C]\n context_lens(Tensor): Shape [B]\n \"\"\"\n b, n, d = x.size(0), self.num_heads, self.head_dim\n\n # compute query, key, value\n q = self.norm_q(self.q(x)).view(b, -1, n, d)\n k = self.norm_k(self.k(context)).view(b, -1, n, d)\n v = self.v(context).view(b, -1, n, d)\n\n # compute attention\n x = flash_attention(q, k, v, k_lens=context_lens)\n\n # output\n x = x.flatten(2)\n x = self.o(x)\n return x\n\n\nclass WanI2VCrossAttention(WanSelfAttention):\n\n def __init__(self,\n dim,\n num_heads,\n window_size=(-1, -1),\n qk_norm=True,\n eps=1e-6):\n super().__init__(dim, num_heads, window_size, qk_norm, eps)\n\n self.k_img = nn.Linear(dim, dim)\n self.v_img = nn.Linear(dim, dim)\n # self.alpha = nn.Parameter(torch.zeros((1, )))\n self.norm_k_img = WanRMSNorm(dim, eps=eps) if qk_norm else nn.Identity()\n\n def forward(self, x, context, context_lens):\n r\"\"\"\n Args:\n x(Tensor): Shape [B, L1, C]\n context(Tensor): Shape [B, L2, C]\n context_lens(Tensor): Shape [B]\n \"\"\"\n image_context_length = context.shape[1] - T5_CONTEXT_TOKEN_NUMBER\n context_img = context[:, :image_context_length]\n context = context[:, image_context_length:]\n b, n, d = x.size(0), self.num_heads, self.head_dim\n\n # compute query, key, value\n q = self.norm_q(self.q(x)).view(b, -1, n, d)\n k = self.norm_k(self.k(context)).view(b, -1, n, d)\n v = self.v(context).view(b, -1, n, d)\n k_img = self.norm_k_img(self.k_img(context_img)).view(b, -1, n, d)\n v_img = self.v_img(context_img).view(b, -1, n, d)\n img_x = flash_attention(q, k_img, v_img, k_lens=None)\n # compute attention\n x = flash_attention(q, k, v, k_lens=context_lens)\n\n # output\n x = x.flatten(2)\n img_x = img_x.flatten(2)\n x = x + img_x\n x = self.o(x)\n return x\n\n\nWAN_CROSSATTENTION_CLASSES = {\n 't2v_cross_attn': WanT2VCrossAttention,\n 'i2v_cross_attn': WanI2VCrossAttention,\n}\n\n\nclass WanAttentionBlock(nn.Module):\n\n def __init__(self,\n cross_attn_type,\n dim,\n ffn_dim,\n num_heads,\n window_size=(-1, -1),\n qk_norm=True,\n cross_attn_norm=False,\n eps=1e-6):\n super().__init__()\n self.dim = dim\n self.ffn_dim = ffn_dim\n self.num_heads = num_heads\n self.window_size = window_size\n self.qk_norm = qk_norm\n self.cross_attn_norm = cross_attn_norm\n self.eps = eps\n\n # layers\n self.norm1 = WanLayerNorm(dim, eps)\n self.self_attn = WanSelfAttention(dim, num_heads, window_size, qk_norm,\n eps)\n self.norm3 = WanLayerNorm(\n dim, eps,\n elementwise_affine=True) if cross_attn_norm else nn.Identity()\n self.cross_attn = WAN_CROSSATTENTION_CLASSES[cross_attn_type](dim,\n num_heads,\n (-1, -1),\n qk_norm,\n eps)\n self.norm2 = WanLayerNorm(dim, eps)\n self.ffn = nn.Sequential(\n nn.Linear(dim, ffn_dim), nn.GELU(approximate='tanh'),\n nn.Linear(ffn_dim, dim))\n\n # modulation\n self.modulation = nn.Parameter(torch.randn(1, 6, dim) / dim**0.5)\n\n def forward(\n self,\n x,\n e,\n seq_lens,\n grid_sizes,\n freqs,\n context,\n context_lens,\n ):\n r\"\"\"\n Args:\n x(Tensor): Shape [B, L, C]\n e(Tensor): Shape [B, 6, C]\n seq_lens(Tensor): Shape [B], length of each sequence in batch\n grid_sizes(Tensor): Shape [B, 3], the second dimension contains (F, H, W)\n freqs(Tensor): Rope freqs, shape [1024, C / num_heads / 2]\n \"\"\"\n assert e.dtype == torch.float32\n with amp.autocast(dtype=torch.float32):\n e = (self.modulation + e).chunk(6, dim=1)\n assert e[0].dtype == torch.float32\n\n # self-attention\n y = self.self_attn(\n self.norm1(x).float() * (1 + e[1]) + e[0], seq_lens, grid_sizes,\n freqs)\n with amp.autocast(dtype=torch.float32):\n x = x + y * e[2]\n\n # cross-attention & ffn function\n def cross_attn_ffn(x, context, context_lens, e):\n x = x + self.cross_attn(self.norm3(x), context, context_lens)\n y = self.ffn(self.norm2(x).float() * (1 + e[4]) + e[3])\n with amp.autocast(dtype=torch.float32):\n x = x + y * e[5]\n return x\n\n x = cross_attn_ffn(x, context, context_lens, e)\n return x\n\n\nclass Head(nn.Module):\n\n def __init__(self, dim, out_dim, patch_size, eps=1e-6):\n super().__init__()\n self.dim = dim\n self.out_dim = out_dim\n self.patch_size = patch_size\n self.eps = eps\n\n # layers\n out_dim = math.prod(patch_size) * out_dim\n self.norm = WanLayerNorm(dim, eps)\n self.head = nn.Linear(dim, out_dim)\n\n # modulation\n self.modulation = nn.Parameter(torch.randn(1, 2, dim) / dim**0.5)\n\n def forward(self, x, e):\n r\"\"\"\n Args:\n x(Tensor): Shape [B, L1, C]\n e(Tensor): Shape [B, C]\n \"\"\"\n assert e.dtype == torch.float32\n with amp.autocast(dtype=torch.float32):\n e = (self.modulation + e.unsqueeze(1)).chunk(2, dim=1)\n x = (self.head(self.norm(x) * (1 + e[1]) + e[0]))\n return x\n\n\nclass MLPProj(torch.nn.Module):\n\n def __init__(self, in_dim, out_dim, flf_pos_emb=False):\n super().__init__()\n\n self.proj = torch.nn.Sequential(\n torch.nn.LayerNorm(in_dim), torch.nn.Linear(in_dim, in_dim),\n torch.nn.GELU(), torch.nn.Linear(in_dim, out_dim),\n torch.nn.LayerNorm(out_dim))\n if flf_pos_emb: # NOTE: we only use this for `flf2v`\n self.emb_pos = nn.Parameter(\n torch.zeros(1, FIRST_LAST_FRAME_CONTEXT_TOKEN_NUMBER, 1280))\n\n def forward(self, image_embeds):\n if hasattr(self, 'emb_pos'):\n bs, n, d = image_embeds.shape\n image_embeds = image_embeds.view(-1, 2 * n, d)\n image_embeds = image_embeds + self.emb_pos\n clip_extra_context_tokens = self.proj(image_embeds)\n return clip_extra_context_tokens\n\n\nclass WanModel(ModelMixin, ConfigMixin):\n r\"\"\"\n Wan diffusion backbone supporting both text-to-video and image-to-video.\n \"\"\"\n\n ignore_for_config = [\n 'patch_size', 'cross_attn_norm', 'qk_norm', 'text_dim', 'window_size'\n ]\n _no_split_modules = ['WanAttentionBlock']\n\n @register_to_config\n def __init__(self,\n model_type='t2v',\n patch_size=(1, 2, 2),\n text_len=512,\n in_dim=16,\n dim=2048,\n ffn_dim=8192,\n freq_dim=256,\n text_dim=4096,\n out_dim=16,\n num_heads=16,\n num_layers=32,\n window_size=(-1, -1),\n qk_norm=True,\n cross_attn_norm=True,\n eps=1e-6):\n r\"\"\"\n Initialize the diffusion model backbone.\n\n Args:\n model_type (`str`, *optional*, defaults to 't2v'):\n Model variant - 't2v' (text-to-video) or 'i2v' (image-to-video) or 'flf2v' (first-last-frame-to-video) or 'vace'\n patch_size (`tuple`, *optional*, defaults to (1, 2, 2)):\n 3D patch dimensions for video embedding (t_patch, h_patch, w_patch)\n text_len (`int`, *optional*, defaults to 512):\n Fixed length for text embeddings\n in_dim (`int`, *optional*, defaults to 16):\n Input video channels (C_in)\n dim (`int`, *optional*, defaults to 2048):\n Hidden dimension of the transformer\n ffn_dim (`int`, *optional*, defaults to 8192):\n Intermediate dimension in feed-forward network\n freq_dim (`int`, *optional*, defaults to 256):\n Dimension for sinusoidal time embeddings\n text_dim (`int`, *optional*, defaults to 4096):\n Input dimension for text embeddings\n out_dim (`int`, *optional*, defaults to 16):\n Output video channels (C_out)\n num_heads (`int`, *optional*, defaults to 16):\n Number of attention heads\n num_layers (`int`, *optional*, defaults to 32):\n Number of transformer blocks\n window_size (`tuple`, *optional*, defaults to (-1, -1)):\n Window size for local attention (-1 indicates global attention)\n qk_norm (`bool`, *optional*, defaults to True):\n Enable query/key normalization\n cross_attn_norm (`bool`, *optional*, defaults to False):\n Enable cross-attention normalization\n eps (`float`, *optional*, defaults to 1e-6):\n Epsilon value for normalization layers\n \"\"\"\n\n super().__init__()\n\n assert model_type in ['t2v', 'i2v', 'flf2v', 'vace']\n self.model_type = model_type\n\n self.patch_size = patch_size\n self.text_len = text_len\n self.in_dim = in_dim\n self.dim = dim\n self.ffn_dim = ffn_dim\n self.freq_dim = freq_dim\n self.text_dim = text_dim\n self.out_dim = out_dim\n self.num_heads = num_heads\n self.num_layers = num_layers\n self.window_size = window_size\n self.qk_norm = qk_norm\n self.cross_attn_norm = cross_attn_norm\n self.eps = eps\n\n # embeddings\n self.patch_embedding = nn.Conv3d(\n in_dim, dim, kernel_size=patch_size, stride=patch_size)\n self.text_embedding = nn.Sequential(\n nn.Linear(text_dim, dim), nn.GELU(approximate='tanh'),\n nn.Linear(dim, dim))\n\n self.time_embedding = nn.Sequential(\n nn.Linear(freq_dim, dim), nn.SiLU(), nn.Linear(dim, dim))\n self.time_projection = nn.Sequential(nn.SiLU(), nn.Linear(dim, dim * 6))\n\n # blocks\n cross_attn_type = 't2v_cross_attn' if model_type == 't2v' else 'i2v_cross_attn'\n self.blocks = nn.ModuleList([\n WanAttentionBlock(cross_attn_type, dim, ffn_dim, num_heads,\n window_size, qk_norm, cross_attn_norm, eps)\n for _ in range(num_layers)\n ])\n\n # head\n self.head = Head(dim, out_dim, patch_size, eps)\n\n # buffers (don't use register_buffer otherwise dtype will be changed in to())\n assert (dim % num_heads) == 0 and (dim // num_heads) % 2 == 0\n d = dim // num_heads\n self.freqs = torch.cat([\n rope_params(1024, d - 4 * (d // 6)),\n rope_params(1024, 2 * (d // 6)),\n rope_params(1024, 2 * (d // 6))\n ],\n dim=1)\n\n if model_type == 'i2v' or model_type == 'flf2v':\n self.img_emb = MLPProj(1280, dim, flf_pos_emb=model_type == 'flf2v')\n\n # initialize weights\n self.init_weights()\n\n def forward(\n self,\n x,\n t,\n context,\n seq_len,\n clip_fea=None,\n y=None,\n ):\n r\"\"\"\n Forward pass through the diffusion model\n\n Args:\n x (List[Tensor]):\n List of input video tensors, each with shape [C_in, F, H, W]\n t (Tensor):\n Diffusion timesteps tensor of shape [B]\n context (List[Tensor]):\n List of text embeddings each with shape [L, C]\n seq_len (`int`):\n Maximum sequence length for positional encoding\n clip_fea (Tensor, *optional*):\n CLIP image features for image-to-video mode or first-last-frame-to-video mode\n y (List[Tensor], *optional*):\n Conditional video inputs for image-to-video mode, same shape as x\n\n Returns:\n List[Tensor]:\n List of denoised video tensors with original input shapes [C_out, F, H / 8, W / 8]\n \"\"\"\n if self.model_type == 'i2v' or self.model_type == 'flf2v':\n assert clip_fea is not None and y is not None\n # params\n device = self.patch_embedding.weight.device\n if self.freqs.device != device:\n self.freqs = self.freqs.to(device)\n\n if y is not None:\n x = [torch.cat([u, v], dim=0) for u, v in zip(x, y)]\n\n # embeddings\n x = [self.patch_embedding(u.unsqueeze(0)) for u in x]\n grid_sizes = torch.stack(\n [torch.tensor(u.shape[2:], dtype=torch.long) for u in x])\n x = [u.flatten(2).transpose(1, 2) for u in x]\n seq_lens = torch.tensor([u.size(1) for u in x], dtype=torch.long)\n assert seq_lens.max() <= seq_len\n x = torch.cat([\n torch.cat([u, u.new_zeros(1, seq_len - u.size(1), u.size(2))],\n dim=1) for u in x\n ])\n\n # time embeddings\n with amp.autocast(dtype=torch.float32):\n e = self.time_embedding(\n sinusoidal_embedding_1d(self.freq_dim, t).float())\n e0 = self.time_projection(e).unflatten(1, (6, self.dim))\n assert e.dtype == torch.float32 and e0.dtype == torch.float32\n\n # context\n context_lens = None\n context = self.text_embedding(\n torch.stack([\n torch.cat(\n [u, u.new_zeros(self.text_len - u.size(0), u.size(1))])\n for u in context\n ]))\n\n if clip_fea is not None:\n context_clip = self.img_emb(clip_fea) # bs x 257 (x2) x dim\n context = torch.concat([context_clip, context], dim=1)\n\n # arguments\n kwargs = dict(\n e=e0,\n seq_lens=seq_lens,\n grid_sizes=grid_sizes,\n freqs=self.freqs,\n context=context,\n context_lens=context_lens)\n\n for block in self.blocks:\n x = block(x, **kwargs)\n\n # head\n x = self.head(x, e)\n\n # unpatchify\n x = self.unpatchify(x, grid_sizes)\n return [u.float() for u in x]\n\n def unpatchify(self, x, grid_sizes):\n r\"\"\"\n Reconstruct video tensors from patch embeddings.\n\n Args:\n x (List[Tensor]):\n List of patchified features, each with shape [L, C_out * prod(patch_size)]\n grid_sizes (Tensor):\n Original spatial-temporal grid dimensions before patching,\n shape [B, 3] (3 dimensions correspond to F_patches, H_patches, W_patches)\n\n Returns:\n List[Tensor]:\n Reconstructed video tensors with shape [C_out, F, H / 8, W / 8]\n \"\"\"\n\n c = self.out_dim\n out = []\n for u, v in zip(x, grid_sizes.tolist()):\n u = u[:math.prod(v)].view(*v, *self.patch_size, c)\n u = torch.einsum('fhwpqrc->cfphqwr', u)\n u = u.reshape(c, *[i * j for i, j in zip(v, self.patch_size)])\n out.append(u)\n return out\n\n def init_weights(self):\n r\"\"\"\n Initialize model parameters using Xavier initialization.\n \"\"\"\n\n # basic init\n for m in self.modules():\n if isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight)\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n\n # init embeddings\n nn.init.xavier_uniform_(self.patch_embedding.weight.flatten(1))\n for m in self.text_embedding.modules():\n if isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, std=.02)\n for m in self.time_embedding.modules():\n if isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, std=.02)\n\n # init output layer\n nn.init.zeros_(self.head.head.weight)\n"], ["/Wan2.1/generate.py", "# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.\nimport argparse\nimport logging\nimport os\nimport sys\nimport warnings\nfrom datetime import datetime\n\nwarnings.filterwarnings('ignore')\n\nimport random\n\nimport torch\nimport torch.distributed as dist\nfrom PIL import Image\n\nimport wan\nfrom wan.configs import MAX_AREA_CONFIGS, SIZE_CONFIGS, SUPPORTED_SIZES, WAN_CONFIGS\nfrom wan.utils.prompt_extend import DashScopePromptExpander, QwenPromptExpander\nfrom wan.utils.utils import cache_image, cache_video, str2bool\n\n\nEXAMPLE_PROMPT = {\n \"t2v-1.3B\": {\n \"prompt\":\n \"Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage.\",\n },\n \"t2v-14B\": {\n \"prompt\":\n \"Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage.\",\n },\n \"t2i-14B\": {\n \"prompt\": \"一个朴素端庄的美人\",\n },\n \"i2v-14B\": {\n \"prompt\":\n \"Summer beach vacation style, a white cat wearing sunglasses sits on a surfboard. The fluffy-furred feline gazes directly at the camera with a relaxed expression. Blurred beach scenery forms the background featuring crystal-clear waters, distant green hills, and a blue sky dotted with white clouds. The cat assumes a naturally relaxed posture, as if savoring the sea breeze and warm sunlight. A close-up shot highlights the feline's intricate details and the refreshing atmosphere of the seaside.\",\n \"image\":\n \"examples/i2v_input.JPG\",\n },\n \"flf2v-14B\": {\n \"prompt\":\n \"CG动画风格,一只蓝色的小鸟从地面起飞,煽动翅膀。小鸟羽毛细腻,胸前有独特的花纹,背景是蓝天白云,阳光明媚。镜跟随小鸟向上移动,展现出小鸟飞翔的姿态和天空的广阔。近景,仰视视角。\",\n \"first_frame\":\n \"examples/flf2v_input_first_frame.png\",\n \"last_frame\":\n \"examples/flf2v_input_last_frame.png\",\n },\n \"vace-1.3B\": {\n \"src_ref_images\":\n 'examples/girl.png,examples/snake.png',\n \"prompt\":\n \"在一个欢乐而充满节日气氛的场景中,穿着鲜艳红色春服的小女孩正与她的可爱卡通蛇嬉戏。她的春服上绣着金色吉祥图案,散发着喜庆的气息,脸上洋溢着灿烂的笑容。蛇身呈现出亮眼的绿色,形状圆润,宽大的眼睛让它显得既友善又幽默。小女孩欢快地用手轻轻抚摸着蛇的头部,共同享受着这温馨的时刻。周围五彩斑斓的灯笼和彩带装饰着环境,阳光透过洒在她们身上,营造出一个充满友爱与幸福的新年氛围。\"\n },\n \"vace-14B\": {\n \"src_ref_images\":\n 'examples/girl.png,examples/snake.png',\n \"prompt\":\n \"在一个欢乐而充满节日气氛的场景中,穿着鲜艳红色春服的小女孩正与她的可爱卡通蛇嬉戏。她的春服上绣着金色吉祥图案,散发着喜庆的气息,脸上洋溢着灿烂的笑容。蛇身呈现出亮眼的绿色,形状圆润,宽大的眼睛让它显得既友善又幽默。小女孩欢快地用手轻轻抚摸着蛇的头部,共同享受着这温馨的时刻。周围五彩斑斓的灯笼和彩带装饰着环境,阳光透过洒在她们身上,营造出一个充满友爱与幸福的新年氛围。\"\n }\n}\n\n\ndef _validate_args(args):\n # Basic check\n assert args.ckpt_dir is not None, \"Please specify the checkpoint directory.\"\n assert args.task in WAN_CONFIGS, f\"Unsupport task: {args.task}\"\n assert args.task in EXAMPLE_PROMPT, f\"Unsupport task: {args.task}\"\n\n # The default sampling steps are 40 for image-to-video tasks and 50 for text-to-video tasks.\n if args.sample_steps is None:\n args.sample_steps = 50\n if \"i2v\" in args.task:\n args.sample_steps = 40\n\n if args.sample_shift is None:\n args.sample_shift = 5.0\n if \"i2v\" in args.task and args.size in [\"832*480\", \"480*832\"]:\n args.sample_shift = 3.0\n elif \"flf2v\" in args.task or \"vace\" in args.task:\n args.sample_shift = 16\n\n # The default number of frames are 1 for text-to-image tasks and 81 for other tasks.\n if args.frame_num is None:\n args.frame_num = 1 if \"t2i\" in args.task else 81\n\n # T2I frame_num check\n if \"t2i\" in args.task:\n assert args.frame_num == 1, f\"Unsupport frame_num {args.frame_num} for task {args.task}\"\n\n args.base_seed = args.base_seed if args.base_seed >= 0 else random.randint(\n 0, sys.maxsize)\n # Size check\n assert args.size in SUPPORTED_SIZES[\n args.\n task], f\"Unsupport size {args.size} for task {args.task}, supported sizes are: {', '.join(SUPPORTED_SIZES[args.task])}\"\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser(\n description=\"Generate a image or video from a text prompt or image using Wan\"\n )\n parser.add_argument(\n \"--task\",\n type=str,\n default=\"t2v-14B\",\n choices=list(WAN_CONFIGS.keys()),\n help=\"The task to run.\")\n parser.add_argument(\n \"--size\",\n type=str,\n default=\"1280*720\",\n choices=list(SIZE_CONFIGS.keys()),\n help=\"The area (width*height) of the generated video. For the I2V task, the aspect ratio of the output video will follow that of the input image.\"\n )\n parser.add_argument(\n \"--frame_num\",\n type=int,\n default=None,\n help=\"How many frames to sample from a image or video. The number should be 4n+1\"\n )\n parser.add_argument(\n \"--ckpt_dir\",\n type=str,\n default=None,\n help=\"The path to the checkpoint directory.\")\n parser.add_argument(\n \"--offload_model\",\n type=str2bool,\n default=None,\n help=\"Whether to offload the model to CPU after each model forward, reducing GPU memory usage.\"\n )\n parser.add_argument(\n \"--ulysses_size\",\n type=int,\n default=1,\n help=\"The size of the ulysses parallelism in DiT.\")\n parser.add_argument(\n \"--ring_size\",\n type=int,\n default=1,\n help=\"The size of the ring attention parallelism in DiT.\")\n parser.add_argument(\n \"--t5_fsdp\",\n action=\"store_true\",\n default=False,\n help=\"Whether to use FSDP for T5.\")\n parser.add_argument(\n \"--t5_cpu\",\n action=\"store_true\",\n default=False,\n help=\"Whether to place T5 model on CPU.\")\n parser.add_argument(\n \"--dit_fsdp\",\n action=\"store_true\",\n default=False,\n help=\"Whether to use FSDP for DiT.\")\n parser.add_argument(\n \"--save_file\",\n type=str,\n default=None,\n help=\"The file to save the generated image or video to.\")\n parser.add_argument(\n \"--src_video\",\n type=str,\n default=None,\n help=\"The file of the source video. Default None.\")\n parser.add_argument(\n \"--src_mask\",\n type=str,\n default=None,\n help=\"The file of the source mask. Default None.\")\n parser.add_argument(\n \"--src_ref_images\",\n type=str,\n default=None,\n help=\"The file list of the source reference images. Separated by ','. Default None.\"\n )\n parser.add_argument(\n \"--prompt\",\n type=str,\n default=None,\n help=\"The prompt to generate the image or video from.\")\n parser.add_argument(\n \"--use_prompt_extend\",\n action=\"store_true\",\n default=False,\n help=\"Whether to use prompt extend.\")\n parser.add_argument(\n \"--prompt_extend_method\",\n type=str,\n default=\"local_qwen\",\n choices=[\"dashscope\", \"local_qwen\"],\n help=\"The prompt extend method to use.\")\n parser.add_argument(\n \"--prompt_extend_model\",\n type=str,\n default=None,\n help=\"The prompt extend model to use.\")\n parser.add_argument(\n \"--prompt_extend_target_lang\",\n type=str,\n default=\"zh\",\n choices=[\"zh\", \"en\"],\n help=\"The target language of prompt extend.\")\n parser.add_argument(\n \"--base_seed\",\n type=int,\n default=-1,\n help=\"The seed to use for generating the image or video.\")\n parser.add_argument(\n \"--image\",\n type=str,\n default=None,\n help=\"[image to video] The image to generate the video from.\")\n parser.add_argument(\n \"--first_frame\",\n type=str,\n default=None,\n help=\"[first-last frame to video] The image (first frame) to generate the video from.\"\n )\n parser.add_argument(\n \"--last_frame\",\n type=str,\n default=None,\n help=\"[first-last frame to video] The image (last frame) to generate the video from.\"\n )\n parser.add_argument(\n \"--sample_solver\",\n type=str,\n default='unipc',\n choices=['unipc', 'dpm++'],\n help=\"The solver used to sample.\")\n parser.add_argument(\n \"--sample_steps\", type=int, default=None, help=\"The sampling steps.\")\n parser.add_argument(\n \"--sample_shift\",\n type=float,\n default=None,\n help=\"Sampling shift factor for flow matching schedulers.\")\n parser.add_argument(\n \"--sample_guide_scale\",\n type=float,\n default=5.0,\n help=\"Classifier free guidance scale.\")\n\n args = parser.parse_args()\n\n _validate_args(args)\n\n return args\n\n\ndef _init_logging(rank):\n # logging\n if rank == 0:\n # set format\n logging.basicConfig(\n level=logging.INFO,\n format=\"[%(asctime)s] %(levelname)s: %(message)s\",\n handlers=[logging.StreamHandler(stream=sys.stdout)])\n else:\n logging.basicConfig(level=logging.ERROR)\n\n\ndef generate(args):\n rank = int(os.getenv(\"RANK\", 0))\n world_size = int(os.getenv(\"WORLD_SIZE\", 1))\n local_rank = int(os.getenv(\"LOCAL_RANK\", 0))\n device = local_rank\n _init_logging(rank)\n\n if args.offload_model is None:\n args.offload_model = False if world_size > 1 else True\n logging.info(\n f\"offload_model is not specified, set to {args.offload_model}.\")\n if world_size > 1:\n torch.cuda.set_device(local_rank)\n dist.init_process_group(\n backend=\"nccl\",\n init_method=\"env://\",\n rank=rank,\n world_size=world_size)\n else:\n assert not (\n args.t5_fsdp or args.dit_fsdp\n ), f\"t5_fsdp and dit_fsdp are not supported in non-distributed environments.\"\n assert not (\n args.ulysses_size > 1 or args.ring_size > 1\n ), f\"context parallel are not supported in non-distributed environments.\"\n\n if args.ulysses_size > 1 or args.ring_size > 1:\n assert args.ulysses_size * args.ring_size == world_size, f\"The number of ulysses_size and ring_size should be equal to the world size.\"\n from xfuser.core.distributed import (\n init_distributed_environment,\n initialize_model_parallel,\n )\n init_distributed_environment(\n rank=dist.get_rank(), world_size=dist.get_world_size())\n\n initialize_model_parallel(\n sequence_parallel_degree=dist.get_world_size(),\n ring_degree=args.ring_size,\n ulysses_degree=args.ulysses_size,\n )\n\n if args.use_prompt_extend:\n if args.prompt_extend_method == \"dashscope\":\n prompt_expander = DashScopePromptExpander(\n model_name=args.prompt_extend_model,\n is_vl=\"i2v\" in args.task or \"flf2v\" in args.task)\n elif args.prompt_extend_method == \"local_qwen\":\n prompt_expander = QwenPromptExpander(\n model_name=args.prompt_extend_model,\n is_vl=\"i2v\" in args.task,\n device=rank)\n else:\n raise NotImplementedError(\n f\"Unsupport prompt_extend_method: {args.prompt_extend_method}\")\n\n cfg = WAN_CONFIGS[args.task]\n if args.ulysses_size > 1:\n assert cfg.num_heads % args.ulysses_size == 0, f\"`{cfg.num_heads=}` cannot be divided evenly by `{args.ulysses_size=}`.\"\n\n logging.info(f\"Generation job args: {args}\")\n logging.info(f\"Generation model config: {cfg}\")\n\n if dist.is_initialized():\n base_seed = [args.base_seed] if rank == 0 else [None]\n dist.broadcast_object_list(base_seed, src=0)\n args.base_seed = base_seed[0]\n\n if \"t2v\" in args.task or \"t2i\" in args.task:\n if args.prompt is None:\n args.prompt = EXAMPLE_PROMPT[args.task][\"prompt\"]\n logging.info(f\"Input prompt: {args.prompt}\")\n if args.use_prompt_extend:\n logging.info(\"Extending prompt ...\")\n if rank == 0:\n prompt_output = prompt_expander(\n args.prompt,\n tar_lang=args.prompt_extend_target_lang,\n seed=args.base_seed)\n if prompt_output.status == False:\n logging.info(\n f\"Extending prompt failed: {prompt_output.message}\")\n logging.info(\"Falling back to original prompt.\")\n input_prompt = args.prompt\n else:\n input_prompt = prompt_output.prompt\n input_prompt = [input_prompt]\n else:\n input_prompt = [None]\n if dist.is_initialized():\n dist.broadcast_object_list(input_prompt, src=0)\n args.prompt = input_prompt[0]\n logging.info(f\"Extended prompt: {args.prompt}\")\n\n logging.info(\"Creating WanT2V pipeline.\")\n wan_t2v = wan.WanT2V(\n config=cfg,\n checkpoint_dir=args.ckpt_dir,\n device_id=device,\n rank=rank,\n t5_fsdp=args.t5_fsdp,\n dit_fsdp=args.dit_fsdp,\n use_usp=(args.ulysses_size > 1 or args.ring_size > 1),\n t5_cpu=args.t5_cpu,\n )\n\n logging.info(\n f\"Generating {'image' if 't2i' in args.task else 'video'} ...\")\n video = wan_t2v.generate(\n args.prompt,\n size=SIZE_CONFIGS[args.size],\n frame_num=args.frame_num,\n shift=args.sample_shift,\n sample_solver=args.sample_solver,\n sampling_steps=args.sample_steps,\n guide_scale=args.sample_guide_scale,\n seed=args.base_seed,\n offload_model=args.offload_model)\n\n elif \"i2v\" in args.task:\n if args.prompt is None:\n args.prompt = EXAMPLE_PROMPT[args.task][\"prompt\"]\n if args.image is None:\n args.image = EXAMPLE_PROMPT[args.task][\"image\"]\n logging.info(f\"Input prompt: {args.prompt}\")\n logging.info(f\"Input image: {args.image}\")\n\n img = Image.open(args.image).convert(\"RGB\")\n if args.use_prompt_extend:\n logging.info(\"Extending prompt ...\")\n if rank == 0:\n prompt_output = prompt_expander(\n args.prompt,\n tar_lang=args.prompt_extend_target_lang,\n image=img,\n seed=args.base_seed)\n if prompt_output.status == False:\n logging.info(\n f\"Extending prompt failed: {prompt_output.message}\")\n logging.info(\"Falling back to original prompt.\")\n input_prompt = args.prompt\n else:\n input_prompt = prompt_output.prompt\n input_prompt = [input_prompt]\n else:\n input_prompt = [None]\n if dist.is_initialized():\n dist.broadcast_object_list(input_prompt, src=0)\n args.prompt = input_prompt[0]\n logging.info(f\"Extended prompt: {args.prompt}\")\n\n logging.info(\"Creating WanI2V pipeline.\")\n wan_i2v = wan.WanI2V(\n config=cfg,\n checkpoint_dir=args.ckpt_dir,\n device_id=device,\n rank=rank,\n t5_fsdp=args.t5_fsdp,\n dit_fsdp=args.dit_fsdp,\n use_usp=(args.ulysses_size > 1 or args.ring_size > 1),\n t5_cpu=args.t5_cpu,\n )\n\n logging.info(\"Generating video ...\")\n video = wan_i2v.generate(\n args.prompt,\n img,\n max_area=MAX_AREA_CONFIGS[args.size],\n frame_num=args.frame_num,\n shift=args.sample_shift,\n sample_solver=args.sample_solver,\n sampling_steps=args.sample_steps,\n guide_scale=args.sample_guide_scale,\n seed=args.base_seed,\n offload_model=args.offload_model)\n elif \"flf2v\" in args.task:\n if args.prompt is None:\n args.prompt = EXAMPLE_PROMPT[args.task][\"prompt\"]\n if args.first_frame is None or args.last_frame is None:\n args.first_frame = EXAMPLE_PROMPT[args.task][\"first_frame\"]\n args.last_frame = EXAMPLE_PROMPT[args.task][\"last_frame\"]\n logging.info(f\"Input prompt: {args.prompt}\")\n logging.info(f\"Input first frame: {args.first_frame}\")\n logging.info(f\"Input last frame: {args.last_frame}\")\n first_frame = Image.open(args.first_frame).convert(\"RGB\")\n last_frame = Image.open(args.last_frame).convert(\"RGB\")\n if args.use_prompt_extend:\n logging.info(\"Extending prompt ...\")\n if rank == 0:\n prompt_output = prompt_expander(\n args.prompt,\n tar_lang=args.prompt_extend_target_lang,\n image=[first_frame, last_frame],\n seed=args.base_seed)\n if prompt_output.status == False:\n logging.info(\n f\"Extending prompt failed: {prompt_output.message}\")\n logging.info(\"Falling back to original prompt.\")\n input_prompt = args.prompt\n else:\n input_prompt = prompt_output.prompt\n input_prompt = [input_prompt]\n else:\n input_prompt = [None]\n if dist.is_initialized():\n dist.broadcast_object_list(input_prompt, src=0)\n args.prompt = input_prompt[0]\n logging.info(f\"Extended prompt: {args.prompt}\")\n\n logging.info(\"Creating WanFLF2V pipeline.\")\n wan_flf2v = wan.WanFLF2V(\n config=cfg,\n checkpoint_dir=args.ckpt_dir,\n device_id=device,\n rank=rank,\n t5_fsdp=args.t5_fsdp,\n dit_fsdp=args.dit_fsdp,\n use_usp=(args.ulysses_size > 1 or args.ring_size > 1),\n t5_cpu=args.t5_cpu,\n )\n\n logging.info(\"Generating video ...\")\n video = wan_flf2v.generate(\n args.prompt,\n first_frame,\n last_frame,\n max_area=MAX_AREA_CONFIGS[args.size],\n frame_num=args.frame_num,\n shift=args.sample_shift,\n sample_solver=args.sample_solver,\n sampling_steps=args.sample_steps,\n guide_scale=args.sample_guide_scale,\n seed=args.base_seed,\n offload_model=args.offload_model)\n elif \"vace\" in args.task:\n if args.prompt is None:\n args.prompt = EXAMPLE_PROMPT[args.task][\"prompt\"]\n args.src_video = EXAMPLE_PROMPT[args.task].get(\"src_video\", None)\n args.src_mask = EXAMPLE_PROMPT[args.task].get(\"src_mask\", None)\n args.src_ref_images = EXAMPLE_PROMPT[args.task].get(\n \"src_ref_images\", None)\n\n logging.info(f\"Input prompt: {args.prompt}\")\n if args.use_prompt_extend and args.use_prompt_extend != 'plain':\n logging.info(\"Extending prompt ...\")\n if rank == 0:\n prompt = prompt_expander.forward(args.prompt)\n logging.info(\n f\"Prompt extended from '{args.prompt}' to '{prompt}'\")\n input_prompt = [prompt]\n else:\n input_prompt = [None]\n if dist.is_initialized():\n dist.broadcast_object_list(input_prompt, src=0)\n args.prompt = input_prompt[0]\n logging.info(f\"Extended prompt: {args.prompt}\")\n\n logging.info(\"Creating VACE pipeline.\")\n wan_vace = wan.WanVace(\n config=cfg,\n checkpoint_dir=args.ckpt_dir,\n device_id=device,\n rank=rank,\n t5_fsdp=args.t5_fsdp,\n dit_fsdp=args.dit_fsdp,\n use_usp=(args.ulysses_size > 1 or args.ring_size > 1),\n t5_cpu=args.t5_cpu,\n )\n\n src_video, src_mask, src_ref_images = wan_vace.prepare_source(\n [args.src_video], [args.src_mask], [\n None if args.src_ref_images is None else\n args.src_ref_images.split(',')\n ], args.frame_num, SIZE_CONFIGS[args.size], device)\n\n logging.info(f\"Generating video...\")\n video = wan_vace.generate(\n args.prompt,\n src_video,\n src_mask,\n src_ref_images,\n size=SIZE_CONFIGS[args.size],\n frame_num=args.frame_num,\n shift=args.sample_shift,\n sample_solver=args.sample_solver,\n sampling_steps=args.sample_steps,\n guide_scale=args.sample_guide_scale,\n seed=args.base_seed,\n offload_model=args.offload_model)\n else:\n raise ValueError(f\"Unkown task type: {args.task}\")\n\n if rank == 0:\n if args.save_file is None:\n formatted_time = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n formatted_prompt = args.prompt.replace(\" \", \"_\").replace(\"/\",\n \"_\")[:50]\n suffix = '.png' if \"t2i\" in args.task else '.mp4'\n args.save_file = f\"{args.task}_{args.size.replace('*','x') if sys.platform=='win32' else args.size}_{args.ulysses_size}_{args.ring_size}_{formatted_prompt}_{formatted_time}\" + suffix\n\n if \"t2i\" in args.task:\n logging.info(f\"Saving generated image to {args.save_file}\")\n cache_image(\n tensor=video.squeeze(1)[None],\n save_file=args.save_file,\n nrow=1,\n normalize=True,\n value_range=(-1, 1))\n else:\n logging.info(f\"Saving generated video to {args.save_file}\")\n cache_video(\n tensor=video[None],\n save_file=args.save_file,\n fps=cfg.sample_fps,\n nrow=1,\n normalize=True,\n value_range=(-1, 1))\n logging.info(\"Finished.\")\n\n\nif __name__ == \"__main__\":\n args = _parse_args()\n generate(args)\n"], ["/Wan2.1/gradio/i2v_14B_singleGPU.py", "# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.\nimport argparse\nimport gc\nimport os\nimport os.path as osp\nimport sys\nimport warnings\n\nimport gradio as gr\n\nwarnings.filterwarnings('ignore')\n\n# Model\nsys.path.insert(\n 0, os.path.sep.join(osp.realpath(__file__).split(os.path.sep)[:-2]))\nimport wan\nfrom wan.configs import MAX_AREA_CONFIGS, WAN_CONFIGS\nfrom wan.utils.prompt_extend import DashScopePromptExpander, QwenPromptExpander\nfrom wan.utils.utils import cache_video\n\n# Global Var\nprompt_expander = None\nwan_i2v_480P = None\nwan_i2v_720P = None\n\n\n# Button Func\ndef load_model(value):\n global wan_i2v_480P, wan_i2v_720P\n\n if value == '------':\n print(\"No model loaded\")\n return '------'\n\n if value == '720P':\n if args.ckpt_dir_720p is None:\n print(\"Please specify the checkpoint directory for 720P model\")\n return '------'\n if wan_i2v_720P is not None:\n pass\n else:\n del wan_i2v_480P\n gc.collect()\n wan_i2v_480P = None\n\n print(\"load 14B-720P i2v model...\", end='', flush=True)\n cfg = WAN_CONFIGS['i2v-14B']\n wan_i2v_720P = wan.WanI2V(\n config=cfg,\n checkpoint_dir=args.ckpt_dir_720p,\n device_id=0,\n rank=0,\n t5_fsdp=False,\n dit_fsdp=False,\n use_usp=False,\n )\n print(\"done\", flush=True)\n return '720P'\n\n if value == '480P':\n if args.ckpt_dir_480p is None:\n print(\"Please specify the checkpoint directory for 480P model\")\n return '------'\n if wan_i2v_480P is not None:\n pass\n else:\n del wan_i2v_720P\n gc.collect()\n wan_i2v_720P = None\n\n print(\"load 14B-480P i2v model...\", end='', flush=True)\n cfg = WAN_CONFIGS['i2v-14B']\n wan_i2v_480P = wan.WanI2V(\n config=cfg,\n checkpoint_dir=args.ckpt_dir_480p,\n device_id=0,\n rank=0,\n t5_fsdp=False,\n dit_fsdp=False,\n use_usp=False,\n )\n print(\"done\", flush=True)\n return '480P'\n return value\n\n\ndef prompt_enc(prompt, img, tar_lang):\n print('prompt extend...')\n if img is None:\n print('Please upload an image')\n return prompt\n global prompt_expander\n prompt_output = prompt_expander(\n prompt, image=img, tar_lang=tar_lang.lower())\n if prompt_output.status == False:\n return prompt\n else:\n return prompt_output.prompt\n\n\ndef i2v_generation(img2vid_prompt, img2vid_image, resolution, sd_steps,\n guide_scale, shift_scale, seed, n_prompt):\n # print(f\"{img2vid_prompt},{resolution},{sd_steps},{guide_scale},{shift_scale},{seed},{n_prompt}\")\n\n if resolution == '------':\n print(\n 'Please specify at least one resolution ckpt dir or specify the resolution'\n )\n return None\n\n else:\n if resolution == '720P':\n global wan_i2v_720P\n video = wan_i2v_720P.generate(\n img2vid_prompt,\n img2vid_image,\n max_area=MAX_AREA_CONFIGS['720*1280'],\n shift=shift_scale,\n sampling_steps=sd_steps,\n guide_scale=guide_scale,\n n_prompt=n_prompt,\n seed=seed,\n offload_model=True)\n else:\n global wan_i2v_480P\n video = wan_i2v_480P.generate(\n img2vid_prompt,\n img2vid_image,\n max_area=MAX_AREA_CONFIGS['480*832'],\n shift=shift_scale,\n sampling_steps=sd_steps,\n guide_scale=guide_scale,\n n_prompt=n_prompt,\n seed=seed,\n offload_model=True)\n\n cache_video(\n tensor=video[None],\n save_file=\"example.mp4\",\n fps=16,\n nrow=1,\n normalize=True,\n value_range=(-1, 1))\n\n return \"example.mp4\"\n\n\n# Interface\ndef gradio_interface():\n with gr.Blocks() as demo:\n gr.Markdown(\"\"\"\n