half-drop-demo / utils /tiling.py
Sean Powell
Fix origina image width calculation.
f27f339
from typing import Optional
import diffusers
import torch
from torch import Tensor
from torch.nn import functional as F
from torch.nn.modules.utils import _pair
def asymmetric_conv2d_conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]):
"""
Perform a forward pass of a custom 2D convolution with asymmetric circular padding.
This method modifies the standard 2D convolution by applying asymmetric padding
to the input tensor before performing the convolution. The padding is circular,
meaning the input tensor wraps around itself.
Args:
self: An instance of the class containing convolution parameters.
input (Tensor): The input tensor to be convolved.
weight (Tensor): The weight tensor of the convolution kernel.
bias (Optional[Tensor]): An optional bias tensor for the convolution.
Returns:
Tensor: The output tensor after applying the asymmetric circular padding and 2D convolution.
"""
self.paddingX = (self._reversed_padding_repeated_twice[0], self._reversed_padding_repeated_twice[1], 0, 0)
self.paddingY = (0, 0, self._reversed_padding_repeated_twice[2], self._reversed_padding_repeated_twice[3])
working = F.pad(input, self.paddingX, mode='circular')
working = F.pad(working, self.paddingY, mode='circular')
return F.conv2d(working, weight, bias, self.stride, _pair(0), self.dilation, self.groups)
def enable_circular_tiling(targets):
"""
Enable circular tiling on convolutional layers within the given targets.
This function iterates through the given targets (which are parts of a neural
network model) and modifies each convolutional layer to use a custom asymmetric
convolution with circular padding. It is specifically designed for use with
the StableDiffusionXLPipeline to modify its convolution layers for circular tiling.
Args:
targets (list): A list of neural network components (e.g., layers or entire models)
from which to find and modify Conv2d layers.
Returns:
None: The function modifies the convolutional layers in-place.
"""
conv_layers = []
for target in targets:
for module in target.modules():
if isinstance(module, torch.nn.Conv2d):
conv_layers.append(module)
for cl in conv_layers:
if isinstance(cl, diffusers.models.lora.LoRACompatibleConv) and cl.lora_layer is None:
cl.lora_layer = lambda *x: 0
cl._conv_forward = asymmetric_conv2d_conv_forward.__get__(cl, torch.nn.Conv2d)
def compute_input_tile_width_for_desired_output(desired_output):
return round(desired_output * 2)