""" Material Map Generator - Gradio Space Generate Normal, Roughness, and Displacement maps from diffuse textures using AI. Original project by Joey Ballentine: https://github.com/JoeyBallentine/Material-Map-Generator Models: ESRGAN-based "CX-Lite" trained for material map generation Architecture: RRDB-Net from ESRGAN by Xinntao """ import numpy as np import math import torch import torch.nn as nn from collections import OrderedDict # ==================== Architecture (from block.py) ==================== def act(act_type, inplace=True, neg_slope=0.2, n_prelu=1): act_type = act_type.lower() if act_type == 'relu': layer = nn.ReLU(inplace) elif act_type == 'leakyrelu': layer = nn.LeakyReLU(neg_slope, inplace) elif act_type == 'prelu': layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope) else: raise NotImplementedError('activation layer [{:s}] is not found'.format(act_type)) return layer def norm(norm_type, nc): norm_type = norm_type.lower() if norm_type == 'batch': layer = nn.BatchNorm2d(nc, affine=True) elif norm_type == 'instance': layer = nn.InstanceNorm2d(nc, affine=False) else: raise NotImplementedError('normalization layer [{:s}] is not found'.format(norm_type)) return layer def pad(pad_type, padding): pad_type = pad_type.lower() if padding == 0: return None if pad_type == 'reflect': layer = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': layer = nn.ReplicationPad2d(padding) else: raise NotImplementedError('padding layer [{:s}] is not implemented'.format(pad_type)) return layer def get_valid_padding(kernel_size, dilation): kernel_size = kernel_size + (kernel_size - 1) * (dilation - 1) padding = (kernel_size - 1) // 2 return padding class ShortcutBlock(nn.Module): def __init__(self, submodule): super(ShortcutBlock, self).__init__() self.sub = submodule def forward(self, x): output = x + self.sub(x) return output def sequential(*args): if len(args) == 1: if isinstance(args[0], OrderedDict): raise NotImplementedError('sequential does not support OrderedDict input.') return args[0] modules = [] for module in args: if isinstance(module, nn.Sequential): for submodule in module.children(): modules.append(submodule) elif isinstance(module, nn.Module): modules.append(module) return nn.Sequential(*modules) def conv_block(in_nc, out_nc, kernel_size, stride=1, dilation=1, groups=1, bias=True, pad_type='zero', norm_type=None, act_type='relu', mode='CNA'): assert mode in ['CNA', 'NAC', 'CNAC'], 'Wrong conv mode [{:s}]'.format(mode) padding = get_valid_padding(kernel_size, dilation) p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None padding = padding if pad_type == 'zero' else 0 c = nn.Conv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=groups) a = act(act_type) if act_type else None if 'CNA' in mode: n = norm(norm_type, out_nc) if norm_type else None return sequential(p, c, n, a) elif mode == 'NAC': if norm_type is None and act_type is not None: a = act(act_type, inplace=False) n = norm(norm_type, in_nc) if norm_type else None return sequential(n, a, p, c) class ResidualDenseBlock_5C(nn.Module): def __init__(self, nc, kernel_size=3, gc=32, stride=1, bias=True, pad_type='zero', norm_type=None, act_type='leakyrelu', mode='CNA'): super(ResidualDenseBlock_5C, self).__init__() self.conv1 = conv_block(nc, gc, kernel_size, stride, bias=bias, pad_type=pad_type, norm_type=norm_type, act_type=act_type, mode=mode) self.conv2 = conv_block(nc+gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type, norm_type=norm_type, act_type=act_type, mode=mode) self.conv3 = conv_block(nc+2*gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type, norm_type=norm_type, act_type=act_type, mode=mode) self.conv4 = conv_block(nc+3*gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type, norm_type=norm_type, act_type=act_type, mode=mode) if mode == 'CNA': last_act = None else: last_act = act_type self.conv5 = conv_block(nc+4*gc, nc, 3, stride, bias=bias, pad_type=pad_type, norm_type=norm_type, act_type=last_act, mode=mode) def forward(self, x): x1 = self.conv1(x) x2 = self.conv2(torch.cat((x, x1), 1)) x3 = self.conv3(torch.cat((x, x1, x2), 1)) x4 = self.conv4(torch.cat((x, x1, x2, x3), 1)) x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1)) return x5.mul(0.2) + x class RRDB(nn.Module): def __init__(self, nc, kernel_size=3, gc=32, stride=1, bias=True, pad_type='zero', norm_type=None, act_type='leakyrelu', mode='CNA'): super(RRDB, self).__init__() self.RDB1 = ResidualDenseBlock_5C(nc, kernel_size, gc, stride, bias, pad_type, norm_type, act_type, mode) self.RDB2 = ResidualDenseBlock_5C(nc, kernel_size, gc, stride, bias, pad_type, norm_type, act_type, mode) self.RDB3 = ResidualDenseBlock_5C(nc, kernel_size, gc, stride, bias, pad_type, norm_type, act_type, mode) def forward(self, x): out = self.RDB1(x) out = self.RDB2(out) out = self.RDB3(out) return out.mul(0.2) + x def upconv_blcok(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True, pad_type='zero', norm_type=None, act_type='relu', mode='nearest'): upsample = nn.Upsample(scale_factor=upscale_factor, mode=mode) conv = conv_block(in_nc, out_nc, kernel_size, stride, bias=bias, pad_type=pad_type, norm_type=norm_type, act_type=act_type) return sequential(upsample, conv) def pixelshuffle_block(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True, pad_type='zero', norm_type=None, act_type='relu'): conv = conv_block(in_nc, out_nc * (upscale_factor ** 2), kernel_size, stride, bias=bias, pad_type=pad_type, norm_type=None, act_type=None) pixel_shuffle = nn.PixelShuffle(upscale_factor) n = norm(norm_type, out_nc) if norm_type else None a = act(act_type) if act_type else None return sequential(conv, pixel_shuffle, n, a) # ==================== RRDB_Net (from architecture.py) ==================== class RRDB_Net(nn.Module): def __init__(self, in_nc, out_nc, nf, nb, gc=32, upscale=4, norm_type=None, act_type='leakyrelu', mode='CNA', res_scale=1, upsample_mode='upconv'): super(RRDB_Net, self).__init__() n_upscale = int(math.log(upscale, 2)) if upscale == 3: n_upscale = 1 fea_conv = conv_block(in_nc, nf, kernel_size=3, norm_type=None, act_type=None) rb_blocks = [RRDB(nf, kernel_size=3, gc=32, stride=1, bias=True, pad_type='zero', norm_type=norm_type, act_type=act_type, mode='CNA') for _ in range(nb)] LR_conv = conv_block(nf, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode) if upsample_mode == 'upconv': upsample_block = upconv_blcok elif upsample_mode == 'pixelshuffle': upsample_block = pixelshuffle_block else: raise NotImplementedError('upsample mode [%s] is not found' % upsample_mode) if upscale == 3: upsampler = upsample_block(nf, nf, 3, act_type=act_type) else: upsampler = [upsample_block(nf, nf, act_type=act_type) for _ in range(n_upscale)] HR_conv0 = conv_block(nf, nf, kernel_size=3, norm_type=None, act_type=act_type) HR_conv1 = conv_block(nf, out_nc, kernel_size=3, norm_type=None, act_type=None) self.model = sequential(fea_conv, ShortcutBlock(sequential(*rb_blocks, LR_conv)), *upsampler, HR_conv0, HR_conv1) def forward(self, x): x = self.model(x) return x # ==================== Tile Processing (from imgops.py) ==================== def esrgan_launcher_split_merge(input_image, upscale_function, models, scale_factor=4, tile_size=512, tile_padding=0.125): width, height, depth = input_image.shape output_width = width * scale_factor output_height = height * scale_factor output_shape = (output_width, output_height, depth) output_images = [np.zeros(output_shape, np.uint8) for i in range(len(models))] tile_padding = math.ceil(tile_size * tile_padding) tile_size = math.ceil(tile_size / scale_factor) tiles_x = math.ceil(width / tile_size) tiles_y = math.ceil(height / tile_size) for y in range(tiles_y): for x in range(tiles_x): ofs_x = x * tile_size ofs_y = y * tile_size input_start_x = ofs_x input_end_x = min(ofs_x + tile_size, width) input_start_y = ofs_y input_end_y = min(ofs_y + tile_size, height) input_start_x_pad = max(input_start_x - tile_padding, 0) input_end_x_pad = min(input_end_x + tile_padding, width) input_start_y_pad = max(input_start_y - tile_padding, 0) input_end_y_pad = min(input_end_y + tile_padding, height) input_tile_width = input_end_x - input_start_x input_tile_height = input_end_y - input_start_y input_tile = input_image[input_start_x_pad:input_end_x_pad, input_start_y_pad:input_end_y_pad] for idx, model in enumerate(models): output_tile = upscale_function(input_tile, model) output_start_x = input_start_x * scale_factor output_end_x = input_end_x * scale_factor output_start_y = input_start_y * scale_factor output_end_y = input_end_y * scale_factor output_start_x_tile = (input_start_x - input_start_x_pad) * scale_factor output_end_x_tile = output_start_x_tile + input_tile_width * scale_factor output_start_y_tile = (input_start_y - input_start_y_pad) * scale_factor output_end_y_tile = output_start_y_tile + input_tile_height * scale_factor output_images[idx][output_start_x:output_end_x, output_start_y:output_end_y] = \ output_tile[output_start_x_tile:output_end_x_tile, output_start_y_tile:output_end_y_tile] return output_images # ==================== Model Loading ==================== # CPU Optimizations torch.set_num_threads(4) # Use available CPU cores torch.set_grad_enabled(False) # Disable gradient computation globally device = torch.device('cpu') normal_model = None other_model = None def load_models(): global normal_model, other_model if normal_model is None: print("Loading Normal Map model...") normal_model = RRDB_Net(3, 3, 32, 12, gc=32, upscale=1, norm_type=None, act_type='leakyrelu', mode='CNA', upsample_mode='upconv') normal_model.load_state_dict(torch.load('models/1x_NormalMapGenerator-CX-Lite_200000_G.pth', map_location=device, weights_only=True)) normal_model.eval() if other_model is None: print("Loading Roughness/Displacement model...") other_model = RRDB_Net(3, 3, 32, 12, gc=32, upscale=1, norm_type=None, act_type='leakyrelu', mode='CNA', upsample_mode='upconv') other_model.load_state_dict(torch.load('models/1x_FrankenMapGenerator-CX-Lite_215000_G.pth', map_location=device, weights_only=True)) other_model.eval() # ==================== Image Processing ==================== def process_image(img, model): """Process a single image through the model.""" img = np.array(img).astype(np.float32) / 255.0 if len(img.shape) == 2: img = np.stack([img, img, img], axis=-1) if img.shape[2] == 4: img = img[:, :, :3] tensor = torch.from_numpy(np.transpose(img.copy(), (2, 0, 1))).float().unsqueeze(0) with torch.inference_mode(): output = model(tensor).squeeze(0).clamp_(0, 1).numpy() del tensor # Explicit cleanup return (np.transpose(output, (1, 2, 0)) * 255).astype(np.uint8) def generate_maps(input_image): """Generate Normal, Roughness, and Displacement maps from input diffuse texture.""" if input_image is None: return None, None, None load_models() normal_map = process_image(input_image, normal_model) other_output = process_image(input_image, other_model) roughness = other_output[:, :, 1] displacement = other_output[:, :, 2] import gc; gc.collect() # Free memory return normal_map, roughness, displacement # ==================== Gradio Interface / CLI ==================== if __name__ == "__main__": import sys; from PIL import Image if len(sys.argv) > 1: [Image.fromarray(m).save(f"{sys.argv[1].rsplit('.',1)[0]}_{n}.png") for n,m in zip(["Normal","Roughness","Displacement"], generate_maps(Image.open(sys.argv[1])))] else: import gradio as gr with gr.Blocks(title="Material Map Generator") as demo: gr.Markdown("# Material Map Generator\nGenerate Normal, Roughness & Displacement maps from diffuse textures. [Credits: Joey Ballentine](https://github.com/JoeyBallentine/Material-Map-Generator)") with gr.Row(): with gr.Column(scale=1): input_img = gr.Image(type="pil", label="Diffuse Texture", height=200) btn = gr.Button("Generate Maps", variant="primary") gr.Examples(examples=["example.png"], inputs=input_img) with gr.Column(scale=3): with gr.Row(): normal_out = gr.Image(label="Normal", height=180) rough_out = gr.Image(label="Roughness", height=180) disp_out = gr.Image(label="Displacement", height=180) btn.click(fn=generate_maps, inputs=input_img, outputs=[normal_out, rough_out, disp_out]) demo.launch(theme=gr.themes.Soft(), ssr_mode=False)