Spaces:
Build error
Build error
vrevar commited on
Commit ·
04c78c7
1
Parent(s): 5778674
Add application file
Browse files- app.py +167 -0
- capture/__init__.py +6 -0
- capture/__pycache__/__init__.cpython-310.pyc +0 -0
- capture/callbacks/__init__.py +4 -0
- capture/callbacks/__pycache__/__init__.cpython-310.pyc +0 -0
- capture/callbacks/__pycache__/metrics.cpython-310.pyc +0 -0
- capture/callbacks/__pycache__/visualize.cpython-310.pyc +0 -0
- capture/callbacks/metrics.py +29 -0
- capture/callbacks/visualize.py +101 -0
- capture/data/.ipynb_checkpoints/source-checkpoint.py +158 -0
- capture/data/__pycache__/augment.cpython-310.pyc +0 -0
- capture/data/__pycache__/module.cpython-310.pyc +0 -0
- capture/data/__pycache__/source.cpython-310.pyc +0 -0
- capture/data/__pycache__/target.cpython-310.pyc +0 -0
- capture/data/__pycache__/utils.cpython-310.pyc +0 -0
- capture/data/augment.py +155 -0
- capture/data/download.py +39 -0
- capture/data/matlist/ambientcg +1182 -0
- capture/data/matlist/texsd +353 -0
- capture/data/module.py +218 -0
- capture/data/source.py +184 -0
- capture/data/target.py +74 -0
- capture/data/utils.py +47 -0
- capture/predict.yml +26 -0
- capture/render/__init__.py +4 -0
- capture/render/__pycache__/__init__.cpython-310.pyc +0 -0
- capture/render/__pycache__/main.cpython-310.pyc +0 -0
- capture/render/__pycache__/scene.cpython-310.pyc +0 -0
- capture/render/main.py +154 -0
- capture/render/scene.py +105 -0
- capture/source/__init__.py +5 -0
- capture/source/__pycache__/__init__.cpython-310.pyc +0 -0
- capture/source/__pycache__/loss.cpython-310.pyc +0 -0
- capture/source/__pycache__/model.cpython-310.pyc +0 -0
- capture/source/__pycache__/routine.cpython-310.pyc +0 -0
- capture/source/loss.py +147 -0
- capture/source/model.py +233 -0
- capture/source/routine.py +144 -0
- capture/utils/__init__.py +8 -0
- capture/utils/__pycache__/__init__.cpython-310.pyc +0 -0
- capture/utils/__pycache__/cli.cpython-310.pyc +0 -0
- capture/utils/__pycache__/exp.cpython-310.pyc +0 -0
- capture/utils/__pycache__/log.cpython-310.pyc +0 -0
- capture/utils/__pycache__/model.cpython-310.pyc +0 -0
- capture/utils/cli.py +76 -0
- capture/utils/exp.py +77 -0
- capture/utils/log.py +47 -0
- capture/utils/model.py +43 -0
- fabric_diffusion.py +139 -0
- requirements.txt +22 -0
app.py
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from PIL import Image
|
| 3 |
+
import numpy as np
|
| 4 |
+
import cv2
|
| 5 |
+
from zipfile import ZipFile
|
| 6 |
+
# Функция обработки изображения
|
| 7 |
+
import gradio as gr
|
| 8 |
+
from PIL import Image
|
| 9 |
+
import numpy as np
|
| 10 |
+
import os
|
| 11 |
+
|
| 12 |
+
import shutil
|
| 13 |
+
import yaml
|
| 14 |
+
from pathlib import Path
|
| 15 |
+
from fabric_diffusion import FabricDiffusionPipeline
|
| 16 |
+
import urllib.request
|
| 17 |
+
import tarfile
|
| 18 |
+
import capture
|
| 19 |
+
from pytorch_lightning import Trainer
|
| 20 |
+
import random
|
| 21 |
+
import torch
|
| 22 |
+
|
| 23 |
+
ZIP_FOLDER = "./ZIPS"
|
| 24 |
+
os.makedirs(ZIP_FOLDER, exist_ok=True)
|
| 25 |
+
|
| 26 |
+
def zip_folder(folder_path, output_zip):
|
| 27 |
+
with ZipFile(output_zip, 'w') as zipf:
|
| 28 |
+
for root, dirs, files in os.walk(folder_path):
|
| 29 |
+
for file in files:
|
| 30 |
+
file_path = os.path.join(root, file)
|
| 31 |
+
zipf.write(file_path, os.path.relpath(file_path, folder_path))
|
| 32 |
+
|
| 33 |
+
def set_deterministic(seed=42):
|
| 34 |
+
random.seed(seed)
|
| 35 |
+
np.random.seed(seed)
|
| 36 |
+
torch.manual_seed(seed)
|
| 37 |
+
torch.cuda.manual_seed_all(seed)
|
| 38 |
+
|
| 39 |
+
def load_config(config_path):
|
| 40 |
+
with open(config_path, 'r') as file:
|
| 41 |
+
return yaml.safe_load(file)
|
| 42 |
+
|
| 43 |
+
def run_flatten_texture(pipeline, input_image_path, output_path, n_samples=3):
|
| 44 |
+
os.makedirs(output_path, exist_ok=True)
|
| 45 |
+
texture_name = os.path.splitext(os.path.basename(input_image_path))[0]
|
| 46 |
+
texture_patch = pipeline.load_patch_data(input_image_path)
|
| 47 |
+
gen_imgs = pipeline.flatten_texture(texture_patch, n_samples=n_samples)
|
| 48 |
+
for i, gen_img in enumerate(gen_imgs):
|
| 49 |
+
gen_img.save(os.path.join(output_path, f'{texture_name}_gen_{i}.png'))
|
| 50 |
+
|
| 51 |
+
def organize_images_into_structure(source_folder, new_folder):
|
| 52 |
+
os.makedirs(new_folder, exist_ok=True)
|
| 53 |
+
for file_name in os.listdir(source_folder):
|
| 54 |
+
source_file = os.path.join(source_folder, file_name)
|
| 55 |
+
if os.path.isfile(source_file) and file_name.lower().endswith(('.png', '.jpg', '.jpeg')):
|
| 56 |
+
folder_name = os.path.splitext(file_name)[0]
|
| 57 |
+
subfolder_path = os.path.join(new_folder, folder_name, "outputs")
|
| 58 |
+
os.makedirs(subfolder_path, exist_ok=True)
|
| 59 |
+
destination_file = os.path.join(subfolder_path, file_name)
|
| 60 |
+
shutil.copy(source_file, destination_file)
|
| 61 |
+
|
| 62 |
+
# Create a directory for saving if it doesn't exist
|
| 63 |
+
if not os.path.exists("saved_images"):
|
| 64 |
+
os.makedirs("saved_images")
|
| 65 |
+
|
| 66 |
+
def decode_rgba(image: Image.Image) -> Image.Image:
|
| 67 |
+
image_array = np.array(image)
|
| 68 |
+
alpha_channel = image_array[:, :, 3]
|
| 69 |
+
coords = np.argwhere(alpha_channel == 255)
|
| 70 |
+
y_min, x_min = coords.min(axis=0)
|
| 71 |
+
y_max, x_max = coords.max(axis=0) + 1 # Include the max boundary
|
| 72 |
+
cropped_image_array = image_array[y_min:y_max, x_min:x_max]
|
| 73 |
+
cropped_rgb_array = cropped_image_array[:, :, :3]
|
| 74 |
+
|
| 75 |
+
cropped_rgb_image = Image.fromarray(cropped_rgb_array, "RGB")
|
| 76 |
+
return cropped_rgb_image
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def process_image(orig_image, image_data):
|
| 80 |
+
"""Processes and saves the original and cropped images."""
|
| 81 |
+
# Get the original image. Convert to PIL Image if needed
|
| 82 |
+
|
| 83 |
+
# Get the cropped image (composite). Convert to PIL Image if needed
|
| 84 |
+
cropped_image = image_data['composite']
|
| 85 |
+
# print(type(cropped_image), cropped_image.size, np.array(crop_image).shape)
|
| 86 |
+
# Generate unique filenames using timestamps to avoid overwriting
|
| 87 |
+
import datetime
|
| 88 |
+
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
|
| 89 |
+
original_filename = os.path.join("saved_images", f"original_{timestamp}.png")
|
| 90 |
+
cropped_filename = os.path.join("saved_images", f"cropped_{timestamp}.png")
|
| 91 |
+
|
| 92 |
+
orig_image.save(original_filename)
|
| 93 |
+
|
| 94 |
+
decoded_cropped_image = decode_rgba(cropped_image)
|
| 95 |
+
decoded_cropped_image.save(cropped_filename)
|
| 96 |
+
return orig_image, original_filename, decoded_cropped_image, cropped_filename, timestamp
|
| 97 |
+
|
| 98 |
+
def web_main(orig_image, image_data):
|
| 99 |
+
orig_image, original_filename, cropped_image, cropped_filename, timestamp = process_image(orig_image, image_data)
|
| 100 |
+
set_deterministic(seed=42)
|
| 101 |
+
config = load_config("config_demo.yaml")
|
| 102 |
+
device = config["hyperparameters"]["device"]
|
| 103 |
+
texture_checkpoint = config["hyperparameters"]["fb_checkpoint"]
|
| 104 |
+
print_checkpoint = config["hyperparameters"].get("print_checkpoint", None)
|
| 105 |
+
input_image = config["hyperparameters"]["input_image"]
|
| 106 |
+
save_fd_dir = config["hyperparameters"]["save_fd_dir"]
|
| 107 |
+
save_mp_dir = config["hyperparameters"]["save_mp_dir"]
|
| 108 |
+
# save_mp_dir = timestamp
|
| 109 |
+
n_samples = config["hyperparameters"]["n_samples"]
|
| 110 |
+
|
| 111 |
+
pipeline = FabricDiffusionPipeline(device, texture_checkpoint, print_checkpoint=print_checkpoint)
|
| 112 |
+
os.makedirs(save_fd_dir, exist_ok=True)
|
| 113 |
+
run_flatten_texture(pipeline, cropped_filename, output_path=save_fd_dir, n_samples=n_samples)
|
| 114 |
+
|
| 115 |
+
organize_images_into_structure(save_fd_dir, save_mp_dir)
|
| 116 |
+
|
| 117 |
+
data = capture.get_data(predict_dir=Path(save_mp_dir), predict_ds='sd')
|
| 118 |
+
module = capture.get_inference_module(pt=config["hyperparameters"]["checkpoint_name"])
|
| 119 |
+
|
| 120 |
+
decomp = Trainer(default_root_dir=Path(save_mp_dir), accelerator='gpu', devices=1, precision=16)
|
| 121 |
+
decomp.predict(module, data)
|
| 122 |
+
|
| 123 |
+
folder = f"cropped_{timestamp}_gen_0"
|
| 124 |
+
# for folder in os.listdir(save_mp_dir):
|
| 125 |
+
folder_path = os.path.join(save_mp_dir, folder)
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
if os.path.isdir(folder_path):
|
| 129 |
+
target_path = os.path.join(folder_path, "weights", "mask", "an_object_with_azertyuiop_texture",
|
| 130 |
+
"checkpoint-800", "outputs")
|
| 131 |
+
if os.path.exists(target_path):
|
| 132 |
+
for file_name in os.listdir(target_path):
|
| 133 |
+
file_path = os.path.join(target_path, file_name)
|
| 134 |
+
if os.path.isfile(file_path):
|
| 135 |
+
shutil.move(file_path, folder_path)
|
| 136 |
+
|
| 137 |
+
print(f"FOLDER: {folder_path}")
|
| 138 |
+
print(os.path.exists(original_filename))
|
| 139 |
+
shutil.copyfile(
|
| 140 |
+
original_filename,
|
| 141 |
+
os.path.join(folder_path, "outputs", original_filename.split("/")[-1])
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
shutil.copyfile(
|
| 145 |
+
cropped_filename,
|
| 146 |
+
os.path.join(folder_path, "outputs", cropped_filename.split("/")[-1])
|
| 147 |
+
)
|
| 148 |
+
zip_folder(
|
| 149 |
+
folder_path,
|
| 150 |
+
os.path.join(ZIP_FOLDER, f"{folder_path.split('/')[-1]}.zip")
|
| 151 |
+
)
|
| 152 |
+
return orig_image, cropped_image, os.path.join(ZIP_FOLDER, f"{folder_path.split('/')[-1]}.zip")
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
with gr.Blocks() as demo:
|
| 156 |
+
with gr.Row():
|
| 157 |
+
orig_image = gr.Image(label="Orig image", type="pil")
|
| 158 |
+
image_editor = gr.ImageEditor(type="pil", crop_size="1:1", label="Edit Image: Crop the desired element")
|
| 159 |
+
with gr.Row():
|
| 160 |
+
output_image = gr.Image(label="Result")
|
| 161 |
+
crop_image = gr.Image(label="Crop_image")
|
| 162 |
+
zip_file = gr.File(label="Download Zip File")
|
| 163 |
+
|
| 164 |
+
process_button = gr.Button("Crop Element")
|
| 165 |
+
process_button.click(web_main, inputs=[orig_image, image_editor], outputs=[output_image, crop_image, zip_file])
|
| 166 |
+
|
| 167 |
+
demo.launch(share=True, server_port=22856)
|
capture/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from .utils.model import get_inference_module
|
| 3 |
+
from .utils.exp import get_data
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
__all__ = ['get_inference_module', 'get_data']
|
capture/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (285 Bytes). View file
|
|
|
capture/callbacks/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .metrics import MetricLogging
|
| 2 |
+
from .visualize import VisualizeCallback
|
| 3 |
+
|
| 4 |
+
__all__ = ['MetricLogging', 'VisualizeCallback']
|
capture/callbacks/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (293 Bytes). View file
|
|
|
capture/callbacks/__pycache__/metrics.cpython-310.pyc
ADDED
|
Binary file (1.48 kB). View file
|
|
|
capture/callbacks/__pycache__/visualize.cpython-310.pyc
ADDED
|
Binary file (5.17 kB). View file
|
|
|
capture/callbacks/metrics.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from collections import OrderedDict
|
| 4 |
+
|
| 5 |
+
from pytorch_lightning.callbacks import Callback
|
| 6 |
+
|
| 7 |
+
from ..utils.log import append_csv, get_info
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class MetricLogging(Callback):
|
| 11 |
+
def __init__(self, weights: str, test_list: str, outdir: Path):
|
| 12 |
+
super().__init__()
|
| 13 |
+
print(outdir)
|
| 14 |
+
assert outdir.is_dir()
|
| 15 |
+
|
| 16 |
+
self.weights = weights
|
| 17 |
+
self.test_list = test_list
|
| 18 |
+
self.outpath = outdir/'eval.csv'
|
| 19 |
+
|
| 20 |
+
def on_test_end(self, trainer, pl_module):
|
| 21 |
+
weight_name, epoch = get_info(str(self.weights))
|
| 22 |
+
*_, test_set = self.test_list.parts
|
| 23 |
+
|
| 24 |
+
parsed = {k: f'{v}' for k,v in trainer.logged_metrics.items()}
|
| 25 |
+
|
| 26 |
+
odict = OrderedDict(name=weight_name, epoch=epoch, test_set=test_set)
|
| 27 |
+
odict.update(parsed)
|
| 28 |
+
append_csv(self.outpath, odict)
|
| 29 |
+
print(f'logged metrics in: {self.outpath}')
|
capture/callbacks/visualize.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import pytorch_lightning as pl
|
| 5 |
+
from torchvision.utils import make_grid, save_image
|
| 6 |
+
from torchvision.transforms import Resize
|
| 7 |
+
|
| 8 |
+
from capture.render import encode_as_unit_interval, gamma_encode
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class VisualizeCallback(pl.Callback):
|
| 12 |
+
def __init__(self, exist_ok: bool, out_dir: Path, log_every_n_epoch: int, n_batches_shown: int):
|
| 13 |
+
super().__init__()
|
| 14 |
+
|
| 15 |
+
self.out_dir = out_dir/'images_1'
|
| 16 |
+
if not exist_ok and (self.out_dir.is_dir() and len(list(self.out_dir.iterdir())) > 0):
|
| 17 |
+
print(f'directory {out_dir} already exists, press \'y\' to proceed')
|
| 18 |
+
x = input()
|
| 19 |
+
if x != 'y':
|
| 20 |
+
exit(1)
|
| 21 |
+
|
| 22 |
+
self.out_dir.mkdir(parents=True, exist_ok=True)
|
| 23 |
+
|
| 24 |
+
self.log_every_n_epoch = log_every_n_epoch
|
| 25 |
+
self.n_batches_shown = n_batches_shown
|
| 26 |
+
self.resize = Resize(size=[128,128], antialias=True)
|
| 27 |
+
|
| 28 |
+
def setup(self, trainer, module, stage):
|
| 29 |
+
self.logger = trainer.logger
|
| 30 |
+
|
| 31 |
+
def on_train_batch_end(self, *args):
|
| 32 |
+
self._on_batch_end(*args, split='train')
|
| 33 |
+
|
| 34 |
+
def on_validation_batch_end(self, *args):
|
| 35 |
+
self._on_batch_end(*args, split='valid')
|
| 36 |
+
|
| 37 |
+
def _on_batch_end(self, trainer, module, outputs, inputs, batch, *args, split):
|
| 38 |
+
x_src, x_tgt = inputs
|
| 39 |
+
|
| 40 |
+
# optim_idx:0=discr & optim_idx:1=generator
|
| 41 |
+
y_src, y_tgt = outputs[1]['y'] if isinstance(outputs, list) else outputs['y']
|
| 42 |
+
|
| 43 |
+
epoch = trainer.current_epoch
|
| 44 |
+
if epoch % self.log_every_n_epoch == 0 and batch <= self.n_batches_shown:
|
| 45 |
+
if x_src and y_src:
|
| 46 |
+
self._visualize_src(x_src, y_src, split=split, epoch=epoch, batch=batch, ds='src')
|
| 47 |
+
if x_tgt and y_tgt:
|
| 48 |
+
self._visualize_tgt(x_tgt, y_tgt, split=split, epoch=epoch, batch=batch, ds='tgt')
|
| 49 |
+
|
| 50 |
+
def _visualize_src(self, x, y, split, epoch, batch, ds):
|
| 51 |
+
#zipped = zip(x.albedo, x.roughness, x.normals, x.displacement, x.input, x.image,
|
| 52 |
+
# y.albedo, y.roughness, y.normals, y.displacement, y.reco, y.image)
|
| 53 |
+
|
| 54 |
+
zipped = zip(x.albedo, x.roughness, x.normals, x.input, x.image,
|
| 55 |
+
y.albedo, y.roughness, y.normals, y.reco, y.image)
|
| 56 |
+
|
| 57 |
+
grid = [self._visualize_single_src(*z) for z in zipped]
|
| 58 |
+
|
| 59 |
+
name = self.out_dir/f'{split}{epoch:05d}_{ds}_{batch}.jpg'
|
| 60 |
+
save_image(grid, name, nrow=1, padding=5)
|
| 61 |
+
|
| 62 |
+
@torch.no_grad()
|
| 63 |
+
def _visualize_single_src(self, a, r, n, input, mv, a_p, r_p, n_p, reco, mv_p):
|
| 64 |
+
n = encode_as_unit_interval(n)
|
| 65 |
+
n_p = encode_as_unit_interval(n_p)
|
| 66 |
+
|
| 67 |
+
mv_gt = [gamma_encode(o) for o in mv]
|
| 68 |
+
mv_pred = [gamma_encode(o) for o in mv_p]
|
| 69 |
+
reco = gamma_encode(reco)
|
| 70 |
+
|
| 71 |
+
maps = [input, a, r, n] + mv_gt + [reco, a_p, r_p, n_p] + mv_pred
|
| 72 |
+
maps = [self.resize(x.cpu()) for x in maps]
|
| 73 |
+
return make_grid(maps, nrow=len(maps) // 2, padding=0)
|
| 74 |
+
|
| 75 |
+
@torch.no_grad()
|
| 76 |
+
def _visualize_single_src_previous(self, a, r, n, d, input, mv, a_p, r_p, n_p, d_p, reco, mv_p):
|
| 77 |
+
n = encode_as_unit_interval(n)
|
| 78 |
+
n_p = encode_as_unit_interval(n_p)
|
| 79 |
+
|
| 80 |
+
mv_gt = [gamma_encode(o) for o in mv]
|
| 81 |
+
mv_pred = [gamma_encode(o) for o in mv_p]
|
| 82 |
+
reco = gamma_encode(reco)
|
| 83 |
+
|
| 84 |
+
maps = [input, a, r, n, d] + mv_gt + [reco, a_p, r_p, n_p, d_p] + mv_pred
|
| 85 |
+
maps = [self.resize(x.cpu()) for x in maps]
|
| 86 |
+
return make_grid(maps, nrow=len(maps)//2, padding=0)
|
| 87 |
+
|
| 88 |
+
def _visualize_tgt(self, x, y, split, epoch, batch, ds):
|
| 89 |
+
zipped = zip(x.input, y.albedo, y.roughness, y.normals, y.displacement)
|
| 90 |
+
|
| 91 |
+
grid = [self._visualize_single_tgt(*z) for z in zipped]
|
| 92 |
+
|
| 93 |
+
name = self.out_dir/f'{split}{epoch:05d}_{ds}_{batch}.jpg'
|
| 94 |
+
save_image(grid, name, nrow=1, padding=5)
|
| 95 |
+
|
| 96 |
+
@torch.no_grad()
|
| 97 |
+
def _visualize_single_tgt(self, input, a_p, r_p, n_p, d_p):
|
| 98 |
+
n_p = encode_as_unit_interval(n_p)
|
| 99 |
+
maps = [input, a_p, r_p, n_p, d_p]
|
| 100 |
+
maps = [self.resize(x.cpu()) for x in maps]
|
| 101 |
+
return make_grid(maps, nrow=len(maps), padding=0)
|
capture/data/.ipynb_checkpoints/source-checkpoint.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import typing
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from PIL import Image
|
| 4 |
+
import cv2
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from easydict import EasyDict
|
| 8 |
+
import torchvision.transforms.functional as tf
|
| 9 |
+
from torch.utils.data import Dataset
|
| 10 |
+
|
| 11 |
+
from ..utils.log import get_matlist
|
| 12 |
+
from . import augment as Aug
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class FabricsDataset(Dataset):
|
| 16 |
+
def __init__(
|
| 17 |
+
self,
|
| 18 |
+
split,
|
| 19 |
+
transform,
|
| 20 |
+
renderer,
|
| 21 |
+
matlist,
|
| 22 |
+
dir: typing.Optional[Path] = None,
|
| 23 |
+
set_seed_render: bool = False,
|
| 24 |
+
**kwargs
|
| 25 |
+
):
|
| 26 |
+
assert dir.is_dir()
|
| 27 |
+
assert matlist.is_file()
|
| 28 |
+
assert split in ['train', 'valid', 'all']
|
| 29 |
+
self.set_seed_render = set_seed_render
|
| 30 |
+
|
| 31 |
+
files = get_matlist(matlist, dir)
|
| 32 |
+
|
| 33 |
+
# train/val/ split
|
| 34 |
+
self.split = split
|
| 35 |
+
k = int(len(files) * .95)
|
| 36 |
+
if split == 'train':
|
| 37 |
+
self.files = files[:k]
|
| 38 |
+
elif split == 'valid':
|
| 39 |
+
self.files = files[k:]
|
| 40 |
+
elif split == 'all':
|
| 41 |
+
self.files = files
|
| 42 |
+
|
| 43 |
+
print(f'FabricsDataset list={matlist}:{self.split}=[{len(self.files)}/{len(files)}]')
|
| 44 |
+
|
| 45 |
+
dtypes = ['normals', 'albedo', 'input', 'input']
|
| 46 |
+
self.tf = Aug.Pipeline(*transform, dtypes=dtypes)
|
| 47 |
+
self.renderer = renderer
|
| 48 |
+
|
| 49 |
+
def __getitem__(self, index, quick=False):
|
| 50 |
+
folder = self.folders[index]
|
| 51 |
+
|
| 52 |
+
N_path = folder / 'normal.png'
|
| 53 |
+
A_path = folder / 'basecolor.png'
|
| 54 |
+
R_path = folder / 'roughness.png'
|
| 55 |
+
D_path = folder / 'height.png'
|
| 56 |
+
|
| 57 |
+
N = tf.to_tensor(Image.open(N_path).convert('RGB'))
|
| 58 |
+
A = tf.to_tensor(Image.open(A_path).convert('RGB'))
|
| 59 |
+
R = tf.to_tensor(Image.open(R_path).convert('RGB'))
|
| 60 |
+
D_pil = cv2.imread(str(D_path), cv2.IMREAD_GRAYSCALE)
|
| 61 |
+
D = torch.from_numpy(D_pil)[None].repeat(3, 1, 1) / 255
|
| 62 |
+
|
| 63 |
+
# augmentation
|
| 64 |
+
N, A, R, D = self.tf([N, A, R, D])
|
| 65 |
+
|
| 66 |
+
if self.set_seed_render:
|
| 67 |
+
torch.manual_seed(hash(folder.name))
|
| 68 |
+
I, params = self.renderer([N, A, R, D], n_samples=1)
|
| 69 |
+
params = torch.stack(params)
|
| 70 |
+
|
| 71 |
+
# return homogenous object whatever the source: acg or sd
|
| 72 |
+
return EasyDict(
|
| 73 |
+
input=I[0],
|
| 74 |
+
input_params=params[:, 0],
|
| 75 |
+
normals=N,
|
| 76 |
+
albedo=A,
|
| 77 |
+
roughness=R,
|
| 78 |
+
displacement=D,
|
| 79 |
+
name=folder.name,
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
def __len__(self):
|
| 83 |
+
return len(self.files)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
class AmbientCG(Dataset):
|
| 87 |
+
def __init__(
|
| 88 |
+
self,
|
| 89 |
+
split,
|
| 90 |
+
transform,
|
| 91 |
+
renderer,
|
| 92 |
+
matlist,
|
| 93 |
+
dir: typing.Optional[Path] = None,
|
| 94 |
+
set_seed_render: bool = False,
|
| 95 |
+
**kwargs
|
| 96 |
+
):
|
| 97 |
+
assert dir.is_dir()
|
| 98 |
+
assert matlist.is_file()
|
| 99 |
+
assert split in ['train', 'valid', 'all']
|
| 100 |
+
self.set_seed_render = set_seed_render
|
| 101 |
+
|
| 102 |
+
files = get_matlist(matlist, dir)
|
| 103 |
+
|
| 104 |
+
# train/val/ split
|
| 105 |
+
self.split = split
|
| 106 |
+
k = int(len(files) * .95)
|
| 107 |
+
if split == 'train':
|
| 108 |
+
self.files = files[:k]
|
| 109 |
+
elif split == 'valid':
|
| 110 |
+
self.files = files[k:]
|
| 111 |
+
elif split == 'all':
|
| 112 |
+
self.files = files
|
| 113 |
+
|
| 114 |
+
print(f'AmbientCG list={matlist}:{self.split}=[{len(self.files)}/{len(files)}]')
|
| 115 |
+
|
| 116 |
+
dtypes = ['normals', 'albedo', 'input', 'input']
|
| 117 |
+
self.tf = Aug.Pipeline(*transform, dtypes=dtypes)
|
| 118 |
+
self.renderer = renderer
|
| 119 |
+
|
| 120 |
+
def __getitem__(self, index, quick=False):
|
| 121 |
+
path = self.files[index]
|
| 122 |
+
name = path.stem.split('_')[0]
|
| 123 |
+
root = path.parent
|
| 124 |
+
|
| 125 |
+
N_path = root / f'{name}_2K-PNG_NormalGL.png'
|
| 126 |
+
N = tf.to_tensor(Image.open(N_path).convert('RGB'))
|
| 127 |
+
|
| 128 |
+
A_path = root / f'{name}_2K-PNG_Color.png'
|
| 129 |
+
A = tf.to_tensor(Image.open(A_path).convert('RGB'))
|
| 130 |
+
|
| 131 |
+
R_path = root / f'{name}_2K-PNG_Roughness.png'
|
| 132 |
+
R = tf.to_tensor(Image.open(R_path).convert('RGB'))
|
| 133 |
+
|
| 134 |
+
D_path = root / f'{name}_2K-PNG_Displacement.png'
|
| 135 |
+
D_pil = cv2.imread(str(D_path), cv2.IMREAD_GRAYSCALE)
|
| 136 |
+
D = torch.from_numpy(D_pil)[None].repeat(3, 1, 1) / 255
|
| 137 |
+
|
| 138 |
+
# augmentation
|
| 139 |
+
N, A, R, D = self.tf([N, A, R, D])
|
| 140 |
+
|
| 141 |
+
if self.set_seed_render:
|
| 142 |
+
torch.manual_seed(hash(name))
|
| 143 |
+
I, params = self.renderer([N, A, R, D], n_samples=1)
|
| 144 |
+
params = torch.stack(params)
|
| 145 |
+
|
| 146 |
+
# return homogenous object whatever the source: acg or sd
|
| 147 |
+
return EasyDict(
|
| 148 |
+
input=I[0],
|
| 149 |
+
input_params=params[:, 0],
|
| 150 |
+
normals=N,
|
| 151 |
+
albedo=A,
|
| 152 |
+
roughness=R,
|
| 153 |
+
displacement=D,
|
| 154 |
+
name=name,
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
def __len__(self):
|
| 158 |
+
return len(self.files)
|
capture/data/__pycache__/augment.cpython-310.pyc
ADDED
|
Binary file (8.96 kB). View file
|
|
|
capture/data/__pycache__/module.cpython-310.pyc
ADDED
|
Binary file (4.93 kB). View file
|
|
|
capture/data/__pycache__/source.cpython-310.pyc
ADDED
|
Binary file (4.67 kB). View file
|
|
|
capture/data/__pycache__/target.cpython-310.pyc
ADDED
|
Binary file (2.28 kB). View file
|
|
|
capture/data/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (2.42 kB). View file
|
|
|
capture/data/augment.py
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torchvision.transforms as T
|
| 5 |
+
import torchvision.transforms.functional as tf
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class RandomResizedCrop(T.RandomResizedCrop):
|
| 10 |
+
def __init__(self, *args, **kwargs):
|
| 11 |
+
super().__init__(*args, **kwargs)
|
| 12 |
+
|
| 13 |
+
def __call__(self, x, dtypes):
|
| 14 |
+
"""WARNING: torchvision v0.11. Wrapper to T.RandomResizedCrop.__call__"""
|
| 15 |
+
i, j, h, w = self.get_params(x[0], self.scale, self.ratio)
|
| 16 |
+
return [tf.resized_crop(img, i, j, h, w, self.size, self.interpolation) for img in x]
|
| 17 |
+
|
| 18 |
+
class ColorJitter(T.ColorJitter):
|
| 19 |
+
def __init__(self, *args, **kwargs):
|
| 20 |
+
super().__init__(*args, **kwargs)
|
| 21 |
+
|
| 22 |
+
def forward(self, x, dtypes):
|
| 23 |
+
fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor = \
|
| 24 |
+
self.get_params(self.brightness, self.contrast, self.saturation, self.hue)
|
| 25 |
+
|
| 26 |
+
o = []
|
| 27 |
+
for img, dtype in zip(x, dtypes):
|
| 28 |
+
if dtype == 'albedo':
|
| 29 |
+
for fn_id in fn_idx:
|
| 30 |
+
if fn_id == 0 and brightness_factor is not None:
|
| 31 |
+
img = tf.adjust_brightness(img, brightness_factor)
|
| 32 |
+
elif fn_id == 1 and contrast_factor is not None:
|
| 33 |
+
img = tf.adjust_contrast(img, contrast_factor)
|
| 34 |
+
elif fn_id == 2 and saturation_factor is not None:
|
| 35 |
+
img = tf.adjust_saturation(img, saturation_factor)
|
| 36 |
+
elif fn_id == 3 and hue_factor is not None:
|
| 37 |
+
img = tf.adjust_hue(img, hue_factor)
|
| 38 |
+
o.append(img)
|
| 39 |
+
return o
|
| 40 |
+
|
| 41 |
+
class RandomHorizontalFlip(T.RandomHorizontalFlip):
|
| 42 |
+
def __init__(self, *args, **kwargs):
|
| 43 |
+
super().__init__(*args, **kwargs)
|
| 44 |
+
|
| 45 |
+
def flip_x(self, img, dtype):
|
| 46 |
+
if dtype == 'normals':
|
| 47 |
+
img[0] *= -1
|
| 48 |
+
return tf.hflip(img)
|
| 49 |
+
|
| 50 |
+
def forward(self, x, dtypes):
|
| 51 |
+
if torch.rand(1) < self.p:
|
| 52 |
+
return [self.flip_x(img, dtype) for img, dtype in zip(x, dtypes)]
|
| 53 |
+
return x
|
| 54 |
+
|
| 55 |
+
class RandomVerticalFlip(T.RandomVerticalFlip):
|
| 56 |
+
def __init__(self, *args, **kwargs):
|
| 57 |
+
super().__init__(*args, **kwargs)
|
| 58 |
+
|
| 59 |
+
def flip_y(self, img, dtype):
|
| 60 |
+
if dtype == 'normals':
|
| 61 |
+
img[1] *= -1
|
| 62 |
+
return tf.vflip(img)
|
| 63 |
+
|
| 64 |
+
def forward(self, x, dtypes):
|
| 65 |
+
if torch.rand(1) < self.p:
|
| 66 |
+
return [self.flip_y(img, dtype) for img, dtype in zip(x, dtypes)]
|
| 67 |
+
return x
|
| 68 |
+
|
| 69 |
+
def deg0(x, y, z):
|
| 70 |
+
return torch.stack([ x, y, z])
|
| 71 |
+
def deg90(x, y, z):
|
| 72 |
+
return torch.stack([-y, x, z])
|
| 73 |
+
def deg180(x, y, z):
|
| 74 |
+
return torch.stack([-x, -y, z])
|
| 75 |
+
def deg270(x, y, z):
|
| 76 |
+
return torch.stack([ y, -x, z])
|
| 77 |
+
|
| 78 |
+
class RandomIncrementRotate:
|
| 79 |
+
def __init__(self, p):
|
| 80 |
+
self.p = p
|
| 81 |
+
self.angles = [0, 90, 180, 270]
|
| 82 |
+
|
| 83 |
+
# adjusts surface normals vector depending on rotation angle
|
| 84 |
+
self.f = { 0: deg0, 90: deg90, 180: deg180, 270: deg270 }
|
| 85 |
+
|
| 86 |
+
def rotate(self, img, theta, dtype):
|
| 87 |
+
if dtype == 'normals':
|
| 88 |
+
img = self.f[theta](*img)
|
| 89 |
+
return tf.rotate(img, theta)
|
| 90 |
+
|
| 91 |
+
def __call__(self, x, dtypes):
|
| 92 |
+
if torch.rand(1) < self.p:
|
| 93 |
+
theta = random.choice(self.angles)
|
| 94 |
+
return [self.rotate(img, theta, dtype) for img, dtype in zip(x, dtypes)]
|
| 95 |
+
return x
|
| 96 |
+
|
| 97 |
+
class NormalizeGeometry:
|
| 98 |
+
def normalize(self, img, dtype):
|
| 99 |
+
if dtype == 'normals':
|
| 100 |
+
# perform [0, 1] -> [-1, 1] mapping
|
| 101 |
+
img = 2*img - 1
|
| 102 |
+
# normalize vector to unit sphere
|
| 103 |
+
img = F.normalize(img, dim=0)
|
| 104 |
+
return img
|
| 105 |
+
|
| 106 |
+
def __call__(self, x, dtypes):
|
| 107 |
+
return [self.normalize(img, dtype) for img, dtype in zip(x, dtypes)]
|
| 108 |
+
|
| 109 |
+
class RandomCrop(T.RandomCrop):
|
| 110 |
+
def __init__(self, *args, **kwargs):
|
| 111 |
+
super().__init__(*args, **kwargs)
|
| 112 |
+
|
| 113 |
+
def forward(self, x, dtypes):
|
| 114 |
+
img_size = tf.get_image_size(x[0])
|
| 115 |
+
assert all(tf.get_image_size(y) == img_size for y in x)
|
| 116 |
+
i, j, h, w = self.get_params(x[0], self.size)
|
| 117 |
+
return [tf.crop(img, i, j, h, w) for img in x]
|
| 118 |
+
|
| 119 |
+
class CenterCrop:
|
| 120 |
+
def __init__(self, size):
|
| 121 |
+
self.size = size
|
| 122 |
+
|
| 123 |
+
def __call__(self, x, dtypes):
|
| 124 |
+
return [tf.center_crop(img, self.size) for img in x]
|
| 125 |
+
|
| 126 |
+
class Resize(T.Resize):
|
| 127 |
+
def __init__(self, *args, **kwargs):
|
| 128 |
+
super().__init__(*args, **kwargs)
|
| 129 |
+
|
| 130 |
+
def forward(self, x, dtypes):
|
| 131 |
+
return [super(Resize, self).forward(img) for img in x]
|
| 132 |
+
|
| 133 |
+
class Identity():
|
| 134 |
+
def __call__(self, x, dtypes):
|
| 135 |
+
return x
|
| 136 |
+
|
| 137 |
+
class ToTensor:
|
| 138 |
+
def __call__(self, x, dtypes):
|
| 139 |
+
return [tf.to_tensor(img) for img in x]
|
| 140 |
+
|
| 141 |
+
class Pipeline:
|
| 142 |
+
DATA_TYPES = ['input', 'normals', 'albedo']
|
| 143 |
+
|
| 144 |
+
def __init__(self, *transforms, dtypes=None):
|
| 145 |
+
assert all(d in Pipeline.DATA_TYPES for d in dtypes)
|
| 146 |
+
self.dtypes = dtypes
|
| 147 |
+
self.transforms = transforms
|
| 148 |
+
|
| 149 |
+
def __call__(self, x):
|
| 150 |
+
#print(self.dtypes)
|
| 151 |
+
assert len(self.dtypes) == len(x)
|
| 152 |
+
assert all(y.shape[1:] == x[0].shape[1:] for y in x)
|
| 153 |
+
for f in self.transforms:
|
| 154 |
+
x = f(x, self.dtypes)
|
| 155 |
+
return x
|
capture/data/download.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
import argparse
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
from io import BytesIO
|
| 5 |
+
from zipfile import ZipFile
|
| 6 |
+
|
| 7 |
+
from tqdm import tqdm
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
parser = argparse.ArgumentParser()
|
| 11 |
+
parser.add_argument('outdir', type=Path)
|
| 12 |
+
args = parser.parse_args()
|
| 13 |
+
|
| 14 |
+
url = 'https://ambientcg.com/get?file={}_2K-PNG.zip'
|
| 15 |
+
|
| 16 |
+
print(f'saving files into {args.outdir}')
|
| 17 |
+
args.outdir.mkdir(parents=True, exist_ok=True)
|
| 18 |
+
|
| 19 |
+
cwd = Path(__file__).parent/'matlist/ambientcg'
|
| 20 |
+
with open(cwd.resolve(), 'r') as file:
|
| 21 |
+
materials = [line.strip() for line in file]
|
| 22 |
+
|
| 23 |
+
mtypes = ['Color', 'Roughness', 'NormalGL']
|
| 24 |
+
for uid in tqdm(materials):
|
| 25 |
+
maps = [f'{uid}_2K-PNG_{x}.png' for x in mtypes]
|
| 26 |
+
|
| 27 |
+
if all((args.outdir/m).exists() for m in maps):
|
| 28 |
+
continue
|
| 29 |
+
|
| 30 |
+
link = url.format(uid)
|
| 31 |
+
try:
|
| 32 |
+
r = requests.get(link)
|
| 33 |
+
archive = ZipFile(BytesIO(r.content))
|
| 34 |
+
except:
|
| 35 |
+
print(f'{uid},{link}\n')
|
| 36 |
+
continue
|
| 37 |
+
|
| 38 |
+
for m in maps:
|
| 39 |
+
archive.extract(m, args.outdir)
|
capture/data/matlist/ambientcg
ADDED
|
@@ -0,0 +1,1182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
PavingStones063
|
| 2 |
+
Ivory001B
|
| 3 |
+
PavingStones127
|
| 4 |
+
Fabric057
|
| 5 |
+
Tiles095
|
| 6 |
+
Ground022
|
| 7 |
+
Tiles048
|
| 8 |
+
Snow002
|
| 9 |
+
Tiles018
|
| 10 |
+
Tiles025
|
| 11 |
+
Bricks035
|
| 12 |
+
RoofingTiles001
|
| 13 |
+
Gravel022
|
| 14 |
+
Rock049
|
| 15 |
+
Fabric019
|
| 16 |
+
Concrete038
|
| 17 |
+
Rock019
|
| 18 |
+
Tiles014
|
| 19 |
+
PaintedPlaster003
|
| 20 |
+
Bricks031
|
| 21 |
+
Leather034C
|
| 22 |
+
Paper004
|
| 23 |
+
Asphalt024C
|
| 24 |
+
Tiles003
|
| 25 |
+
Tiles053
|
| 26 |
+
Tiles067
|
| 27 |
+
Asphalt023S
|
| 28 |
+
WoodFloor013
|
| 29 |
+
Ground025
|
| 30 |
+
Ground044
|
| 31 |
+
PavingStones105
|
| 32 |
+
Rock003
|
| 33 |
+
Bricks060
|
| 34 |
+
Rock017
|
| 35 |
+
Bricks080C
|
| 36 |
+
Tiles034
|
| 37 |
+
Concrete027
|
| 38 |
+
WoodFloor049
|
| 39 |
+
PavingStones006
|
| 40 |
+
Gravel026
|
| 41 |
+
Rock037
|
| 42 |
+
Rock045
|
| 43 |
+
PaintedPlaster018
|
| 44 |
+
Fabric037
|
| 45 |
+
PaintedWood005
|
| 46 |
+
Tiles098
|
| 47 |
+
Asphalt014
|
| 48 |
+
Ivory002A
|
| 49 |
+
WoodFloor046
|
| 50 |
+
Plastic009
|
| 51 |
+
Facade014
|
| 52 |
+
Rock025
|
| 53 |
+
PaintedBricks004
|
| 54 |
+
Rock018
|
| 55 |
+
Paint005
|
| 56 |
+
WoodSiding005
|
| 57 |
+
Bricks065
|
| 58 |
+
Carpet002
|
| 59 |
+
Cork001
|
| 60 |
+
PavingStones051
|
| 61 |
+
Bricks006
|
| 62 |
+
Marble002
|
| 63 |
+
Tiles043
|
| 64 |
+
Tiles102
|
| 65 |
+
Tiles012
|
| 66 |
+
Tiles074
|
| 67 |
+
Rock042L
|
| 68 |
+
Fabric064
|
| 69 |
+
Leather013
|
| 70 |
+
Asphalt028A
|
| 71 |
+
Leather018
|
| 72 |
+
PavingStones084
|
| 73 |
+
Rock039
|
| 74 |
+
Fabric028
|
| 75 |
+
Tiles066
|
| 76 |
+
Wood071
|
| 77 |
+
Wood015
|
| 78 |
+
Ground058
|
| 79 |
+
Bricks051
|
| 80 |
+
Wood043
|
| 81 |
+
Plastic013A
|
| 82 |
+
PaintedPlaster007
|
| 83 |
+
Leather001
|
| 84 |
+
Fabric032
|
| 85 |
+
PavingStones050
|
| 86 |
+
Concrete023
|
| 87 |
+
PavingStones094
|
| 88 |
+
Bricks017
|
| 89 |
+
Bricks087
|
| 90 |
+
Rock014
|
| 91 |
+
Fabric010
|
| 92 |
+
Plastic008
|
| 93 |
+
Ground042
|
| 94 |
+
PavingStones090
|
| 95 |
+
Fabric067
|
| 96 |
+
Concrete017
|
| 97 |
+
Fabric025
|
| 98 |
+
Bricks063
|
| 99 |
+
Carpet004
|
| 100 |
+
Bricks068
|
| 101 |
+
Porcelain001
|
| 102 |
+
PavingStones013
|
| 103 |
+
Asphalt005
|
| 104 |
+
WoodFloor055
|
| 105 |
+
Bark012
|
| 106 |
+
Granite002B
|
| 107 |
+
Fabric079
|
| 108 |
+
Ground012
|
| 109 |
+
PavingStones098
|
| 110 |
+
Gravel013
|
| 111 |
+
Wicker011B
|
| 112 |
+
WoodFloor016
|
| 113 |
+
Lava003
|
| 114 |
+
PavingStones046
|
| 115 |
+
Asphalt008
|
| 116 |
+
PavingStones061
|
| 117 |
+
Leather035A
|
| 118 |
+
Tiles026
|
| 119 |
+
Gravel009
|
| 120 |
+
Rocks004
|
| 121 |
+
Fabric044
|
| 122 |
+
Concrete025
|
| 123 |
+
Fabric027
|
| 124 |
+
Moss004
|
| 125 |
+
PavingStones053
|
| 126 |
+
Gravel015
|
| 127 |
+
PavingStones132
|
| 128 |
+
Porcelain002
|
| 129 |
+
Leather011
|
| 130 |
+
Plastic005
|
| 131 |
+
Carpet010
|
| 132 |
+
Plastic012B
|
| 133 |
+
Ground020
|
| 134 |
+
Marble024
|
| 135 |
+
WoodFloor040
|
| 136 |
+
Fabric039
|
| 137 |
+
Tiles119
|
| 138 |
+
Rock022
|
| 139 |
+
PavingStones120
|
| 140 |
+
Tiles013
|
| 141 |
+
Asphalt010
|
| 142 |
+
Leather005
|
| 143 |
+
Bricks027
|
| 144 |
+
Wood003
|
| 145 |
+
PavingStones083
|
| 146 |
+
Fabric071
|
| 147 |
+
PavingStones095
|
| 148 |
+
WoodFloor048
|
| 149 |
+
Fabric059
|
| 150 |
+
Concrete019
|
| 151 |
+
Foam003
|
| 152 |
+
Gravel024
|
| 153 |
+
Tiles099
|
| 154 |
+
Tiles065
|
| 155 |
+
PaintedWood006C
|
| 156 |
+
WoodFloor024
|
| 157 |
+
Marble014
|
| 158 |
+
Lava001
|
| 159 |
+
Tiles006
|
| 160 |
+
Ground014
|
| 161 |
+
Tiles129A
|
| 162 |
+
Tiles108
|
| 163 |
+
Ground057
|
| 164 |
+
WoodSiding006
|
| 165 |
+
WoodFloor025
|
| 166 |
+
Fabric041
|
| 167 |
+
Wood066
|
| 168 |
+
PavingStones014
|
| 169 |
+
Grass002
|
| 170 |
+
WoodFloor058
|
| 171 |
+
Ground054
|
| 172 |
+
Gravel032
|
| 173 |
+
Marble023
|
| 174 |
+
Tiles052
|
| 175 |
+
Planks021
|
| 176 |
+
PavingStones091
|
| 177 |
+
Wood060
|
| 178 |
+
PavingStones042
|
| 179 |
+
PavingStones136
|
| 180 |
+
TactilePaving003
|
| 181 |
+
Asphalt016
|
| 182 |
+
WoodFloor008
|
| 183 |
+
Terrazzo007
|
| 184 |
+
Tiles116
|
| 185 |
+
Tiles017
|
| 186 |
+
Asphalt013
|
| 187 |
+
Wood033
|
| 188 |
+
Wood053
|
| 189 |
+
Fabric045
|
| 190 |
+
Tiles068
|
| 191 |
+
Plaster001
|
| 192 |
+
Wallpaper001C
|
| 193 |
+
Wood074
|
| 194 |
+
Concrete020
|
| 195 |
+
Rock042S
|
| 196 |
+
Tiles094
|
| 197 |
+
Rock047
|
| 198 |
+
Lava005
|
| 199 |
+
Gravel036S
|
| 200 |
+
Concrete026
|
| 201 |
+
Fabric062
|
| 202 |
+
WoodFloor036
|
| 203 |
+
PaintedPlaster002
|
| 204 |
+
Bricks020
|
| 205 |
+
Concrete036
|
| 206 |
+
Snow001
|
| 207 |
+
Tatami002
|
| 208 |
+
Wood069
|
| 209 |
+
Tiles051
|
| 210 |
+
Gravel003
|
| 211 |
+
Carpet011
|
| 212 |
+
Concrete012
|
| 213 |
+
Lava004
|
| 214 |
+
Tiles113
|
| 215 |
+
Bricks043
|
| 216 |
+
Asphalt023L
|
| 217 |
+
Asphalt025B
|
| 218 |
+
Chipboard004
|
| 219 |
+
Wood023
|
| 220 |
+
PavingStones128
|
| 221 |
+
Gravel035
|
| 222 |
+
Tiles037
|
| 223 |
+
PavingStones067
|
| 224 |
+
Wood054
|
| 225 |
+
WoodFloor054
|
| 226 |
+
Plastic003
|
| 227 |
+
Snow008C
|
| 228 |
+
WoodFloor014
|
| 229 |
+
WoodFloor015
|
| 230 |
+
Grass001
|
| 231 |
+
Marble016
|
| 232 |
+
Leather021
|
| 233 |
+
Tiles114
|
| 234 |
+
Ground068
|
| 235 |
+
Wood029
|
| 236 |
+
Bark013
|
| 237 |
+
Fabric033
|
| 238 |
+
Plastic017A
|
| 239 |
+
Leather034B
|
| 240 |
+
Grass004
|
| 241 |
+
Rocks020
|
| 242 |
+
Gravel029
|
| 243 |
+
Candy003
|
| 244 |
+
Ground062L
|
| 245 |
+
PavingStones102
|
| 246 |
+
Concrete014
|
| 247 |
+
Tiles122
|
| 248 |
+
Tiles042
|
| 249 |
+
Wood010
|
| 250 |
+
Rock023
|
| 251 |
+
WoodFloor056
|
| 252 |
+
Gravel020
|
| 253 |
+
Snow010C
|
| 254 |
+
PavingStones115C
|
| 255 |
+
Fabric054
|
| 256 |
+
Asphalt029B
|
| 257 |
+
Asphalt009
|
| 258 |
+
Gravel012
|
| 259 |
+
WoodFloor017
|
| 260 |
+
Leather002
|
| 261 |
+
Tiles044
|
| 262 |
+
Rock046L
|
| 263 |
+
PavingStones097
|
| 264 |
+
PavingStones029
|
| 265 |
+
Concrete039
|
| 266 |
+
PavingStones074
|
| 267 |
+
Tatami001
|
| 268 |
+
Fabric056
|
| 269 |
+
Tiles076
|
| 270 |
+
WoodFloor020
|
| 271 |
+
Concrete030
|
| 272 |
+
PavingStones134
|
| 273 |
+
PaintedWood007B
|
| 274 |
+
Gravel017
|
| 275 |
+
Marble021
|
| 276 |
+
Wood051
|
| 277 |
+
PavingStones117
|
| 278 |
+
Tiles045
|
| 279 |
+
PavingStones103
|
| 280 |
+
Bricks022
|
| 281 |
+
Asphalt021
|
| 282 |
+
Planks029L
|
| 283 |
+
Wicker008B
|
| 284 |
+
Wood048
|
| 285 |
+
Tiles125
|
| 286 |
+
Rocks019
|
| 287 |
+
Rock002
|
| 288 |
+
Rock029
|
| 289 |
+
PavingStones009
|
| 290 |
+
Wood075
|
| 291 |
+
Granite006B
|
| 292 |
+
Wood078
|
| 293 |
+
Fabric030
|
| 294 |
+
Ground023
|
| 295 |
+
Rocks012
|
| 296 |
+
Granite003B
|
| 297 |
+
Ground024
|
| 298 |
+
Asphalt018
|
| 299 |
+
Tiles029
|
| 300 |
+
Tatami003
|
| 301 |
+
PavingStones040
|
| 302 |
+
Wood037
|
| 303 |
+
Plastic004
|
| 304 |
+
Leather029
|
| 305 |
+
Leather036D
|
| 306 |
+
Facade012
|
| 307 |
+
Plastic010
|
| 308 |
+
PavingStones052
|
| 309 |
+
Grass003
|
| 310 |
+
Rock011
|
| 311 |
+
Leather004
|
| 312 |
+
Cardboard004
|
| 313 |
+
Bricks082B
|
| 314 |
+
PavingStones077
|
| 315 |
+
Fabric063
|
| 316 |
+
Marble018
|
| 317 |
+
Tiles022
|
| 318 |
+
PavingStones082
|
| 319 |
+
Bricks034
|
| 320 |
+
Fabric007
|
| 321 |
+
Fabric013
|
| 322 |
+
Rock038
|
| 323 |
+
Tiles120
|
| 324 |
+
Tiles077
|
| 325 |
+
Ivory001A
|
| 326 |
+
PavingStones059
|
| 327 |
+
Ground053
|
| 328 |
+
Foam001
|
| 329 |
+
Gravel005
|
| 330 |
+
Terrazzo003
|
| 331 |
+
PaintedBricks001
|
| 332 |
+
Wood049
|
| 333 |
+
Wicker007B
|
| 334 |
+
Paint004
|
| 335 |
+
PaintedWood008A
|
| 336 |
+
Tiles072
|
| 337 |
+
Rock008
|
| 338 |
+
Gravel008
|
| 339 |
+
PaintedPlaster014
|
| 340 |
+
Wicker009B
|
| 341 |
+
Tiles023
|
| 342 |
+
Carpet006
|
| 343 |
+
Planks003
|
| 344 |
+
PavingStones033
|
| 345 |
+
Bricks070
|
| 346 |
+
Tiles083
|
| 347 |
+
PavingStones039
|
| 348 |
+
Wood031
|
| 349 |
+
Tatami005
|
| 350 |
+
Wicker007A
|
| 351 |
+
PavingStones130
|
| 352 |
+
WoodFloor041
|
| 353 |
+
Leather020
|
| 354 |
+
WoodFloor035
|
| 355 |
+
Bricks061
|
| 356 |
+
PaintedWood006B
|
| 357 |
+
Wood041
|
| 358 |
+
Road005
|
| 359 |
+
Bricks037
|
| 360 |
+
Leather034A
|
| 361 |
+
Wood018
|
| 362 |
+
Cardboard003
|
| 363 |
+
Tiles085
|
| 364 |
+
Tiles103
|
| 365 |
+
Leather017
|
| 366 |
+
PavingStones088
|
| 367 |
+
Paint001
|
| 368 |
+
Fabric047
|
| 369 |
+
Tiles118
|
| 370 |
+
Terrazzo012
|
| 371 |
+
PavingStones078
|
| 372 |
+
PaintedWood007C
|
| 373 |
+
Wood042
|
| 374 |
+
Granite001A
|
| 375 |
+
Planks010
|
| 376 |
+
Concrete040
|
| 377 |
+
Wood084B
|
| 378 |
+
Fabric058
|
| 379 |
+
Marble004
|
| 380 |
+
Bricks081A
|
| 381 |
+
PavingStones020
|
| 382 |
+
Plastic006
|
| 383 |
+
PavingStones119
|
| 384 |
+
Wood081
|
| 385 |
+
Tiles050
|
| 386 |
+
Concrete011
|
| 387 |
+
ThatchedRoof002B
|
| 388 |
+
Wood045
|
| 389 |
+
Ground043
|
| 390 |
+
WoodFloor021
|
| 391 |
+
Wallpaper002A
|
| 392 |
+
Bricks012
|
| 393 |
+
PavingStones056
|
| 394 |
+
Marble025
|
| 395 |
+
Wood085B
|
| 396 |
+
Cork002
|
| 397 |
+
Terrazzo006
|
| 398 |
+
Rocks024S
|
| 399 |
+
PaintedPlaster001
|
| 400 |
+
GlazedTerracotta002
|
| 401 |
+
Tiles100
|
| 402 |
+
Wood079
|
| 403 |
+
WoodFloor052
|
| 404 |
+
Gravel036L
|
| 405 |
+
Chipboard001
|
| 406 |
+
Granite004A
|
| 407 |
+
Gravel011
|
| 408 |
+
Gravel010
|
| 409 |
+
PavingStones115B
|
| 410 |
+
Fabric023
|
| 411 |
+
PavingStones079
|
| 412 |
+
Plastic014B
|
| 413 |
+
Tiles089
|
| 414 |
+
Asphalt028B
|
| 415 |
+
Plastic012A
|
| 416 |
+
Plastic014A
|
| 417 |
+
Tiles004
|
| 418 |
+
WoodFloor026
|
| 419 |
+
PaintedWood001
|
| 420 |
+
Wicker003
|
| 421 |
+
WoodFloor004
|
| 422 |
+
Terrazzo019M
|
| 423 |
+
Clay003
|
| 424 |
+
Asphalt029A
|
| 425 |
+
PavingStones055
|
| 426 |
+
Bricks011
|
| 427 |
+
PaintedPlaster016
|
| 428 |
+
Paint003
|
| 429 |
+
Wood022
|
| 430 |
+
Bricks040
|
| 431 |
+
Bricks010
|
| 432 |
+
WoodFloor047
|
| 433 |
+
Marble003
|
| 434 |
+
Ground036
|
| 435 |
+
Leather033A
|
| 436 |
+
PavingStones062
|
| 437 |
+
WoodFloor033
|
| 438 |
+
Bricks007
|
| 439 |
+
Wood087
|
| 440 |
+
Plastic011
|
| 441 |
+
Fabric051
|
| 442 |
+
PavingStones104
|
| 443 |
+
WoodSiding013
|
| 444 |
+
Wood052
|
| 445 |
+
Tiles009
|
| 446 |
+
Ground049A
|
| 447 |
+
PaintedPlaster010
|
| 448 |
+
PavingStones108
|
| 449 |
+
WoodFloor060
|
| 450 |
+
Ground011
|
| 451 |
+
Granite005B
|
| 452 |
+
PaintedWood008C
|
| 453 |
+
Wood025
|
| 454 |
+
Bricks076B
|
| 455 |
+
Terrazzo013
|
| 456 |
+
Rock026
|
| 457 |
+
WoodFloor059
|
| 458 |
+
PavingStones126B
|
| 459 |
+
PavingStones015
|
| 460 |
+
Rocks005
|
| 461 |
+
Marble017
|
| 462 |
+
Shells001
|
| 463 |
+
Asphalt019
|
| 464 |
+
Bricks032
|
| 465 |
+
Rock010
|
| 466 |
+
Bricks009
|
| 467 |
+
Snow010B
|
| 468 |
+
GlazedTerracotta001
|
| 469 |
+
Rock052
|
| 470 |
+
PavingStones038
|
| 471 |
+
Wicker010B
|
| 472 |
+
Ground039
|
| 473 |
+
Tiles020
|
| 474 |
+
Granite001B
|
| 475 |
+
Asphalt026B
|
| 476 |
+
Clay001
|
| 477 |
+
PavingStones089
|
| 478 |
+
Rock006
|
| 479 |
+
Leather024
|
| 480 |
+
Concrete022
|
| 481 |
+
Concrete028
|
| 482 |
+
PavingStones126A
|
| 483 |
+
Ice003
|
| 484 |
+
Ground069
|
| 485 |
+
Wood007
|
| 486 |
+
Carpet014
|
| 487 |
+
Rock044
|
| 488 |
+
Concrete021
|
| 489 |
+
Tiles101
|
| 490 |
+
Fabric075
|
| 491 |
+
PavingStones112
|
| 492 |
+
Tiles071
|
| 493 |
+
Ice001
|
| 494 |
+
PavingStones035
|
| 495 |
+
Wood014
|
| 496 |
+
Ground028
|
| 497 |
+
Asphalt004
|
| 498 |
+
Rocks008
|
| 499 |
+
Tiles078
|
| 500 |
+
Wood064
|
| 501 |
+
Asphalt020L
|
| 502 |
+
Bricks076A
|
| 503 |
+
Rocks016
|
| 504 |
+
WoodFloor034
|
| 505 |
+
Terrazzo009
|
| 506 |
+
Leather030
|
| 507 |
+
WoodFloor053
|
| 508 |
+
Wood068
|
| 509 |
+
Snow007C
|
| 510 |
+
Asphalt028C
|
| 511 |
+
PaintedPlaster005
|
| 512 |
+
Rocks001
|
| 513 |
+
Marble026
|
| 514 |
+
Fabric055
|
| 515 |
+
Planks007
|
| 516 |
+
Tiles106
|
| 517 |
+
PavingStones125B
|
| 518 |
+
Ground040
|
| 519 |
+
Snow004
|
| 520 |
+
Fabric034
|
| 521 |
+
Ice004
|
| 522 |
+
PavingStones065
|
| 523 |
+
Rock016
|
| 524 |
+
WoodSiding010
|
| 525 |
+
Facade013
|
| 526 |
+
Plaster004
|
| 527 |
+
Fabric043
|
| 528 |
+
Granite007B
|
| 529 |
+
Wood009
|
| 530 |
+
Wallpaper001A
|
| 531 |
+
Bricks073C
|
| 532 |
+
Marble009
|
| 533 |
+
PavingStones124
|
| 534 |
+
Tiles032
|
| 535 |
+
Ground017
|
| 536 |
+
Tiles040
|
| 537 |
+
Marble005
|
| 538 |
+
Leather033C
|
| 539 |
+
Wicker002
|
| 540 |
+
PavingStones008
|
| 541 |
+
Plastic016B
|
| 542 |
+
Bricks073A
|
| 543 |
+
Asphalt025C
|
| 544 |
+
Lava002
|
| 545 |
+
Planks002
|
| 546 |
+
Snow009A
|
| 547 |
+
Wood035
|
| 548 |
+
PavingStones060
|
| 549 |
+
PavingStones133
|
| 550 |
+
PavingStones129
|
| 551 |
+
Tiles121
|
| 552 |
+
Road007
|
| 553 |
+
Snow008A
|
| 554 |
+
Gravel007
|
| 555 |
+
Tiles069
|
| 556 |
+
Ground050
|
| 557 |
+
Bamboo002C
|
| 558 |
+
Wood008
|
| 559 |
+
Asphalt022
|
| 560 |
+
Fabric072
|
| 561 |
+
Bark006
|
| 562 |
+
Fabric021
|
| 563 |
+
WoodChips001
|
| 564 |
+
RoofingTiles004
|
| 565 |
+
Wood026
|
| 566 |
+
Bricks008
|
| 567 |
+
Facade017
|
| 568 |
+
Ground037
|
| 569 |
+
RoofingTiles003
|
| 570 |
+
Snow008B
|
| 571 |
+
Carpet007
|
| 572 |
+
Paint002
|
| 573 |
+
PavingStones075
|
| 574 |
+
PaintedWood007A
|
| 575 |
+
ThatchedRoof001A
|
| 576 |
+
Tiles110
|
| 577 |
+
Wallpaper001B
|
| 578 |
+
Terrazzo008
|
| 579 |
+
WoodFloor051
|
| 580 |
+
Tiles024
|
| 581 |
+
Fabric011
|
| 582 |
+
PavingStones135
|
| 583 |
+
Gravel038
|
| 584 |
+
Fabric022
|
| 585 |
+
Ground018
|
| 586 |
+
Wallpaper002B
|
| 587 |
+
Gravel031
|
| 588 |
+
Bricks005
|
| 589 |
+
PavingStones058
|
| 590 |
+
Rock050
|
| 591 |
+
Bricks019
|
| 592 |
+
Marble020
|
| 593 |
+
WoodFloor022
|
| 594 |
+
Leather019
|
| 595 |
+
Ground051
|
| 596 |
+
Tiles011
|
| 597 |
+
WoodFloor011
|
| 598 |
+
Leather023
|
| 599 |
+
Fabric006
|
| 600 |
+
Concrete003
|
| 601 |
+
Bricks025
|
| 602 |
+
Tiles107
|
| 603 |
+
Wood020
|
| 604 |
+
PavingStones093
|
| 605 |
+
Wicker001
|
| 606 |
+
PaintedPlaster009
|
| 607 |
+
Tiles091
|
| 608 |
+
PaintedPlaster006
|
| 609 |
+
Marble001
|
| 610 |
+
Road002
|
| 611 |
+
Leather022
|
| 612 |
+
Bricks064
|
| 613 |
+
Wood013
|
| 614 |
+
Leather010
|
| 615 |
+
Rock005
|
| 616 |
+
Tiles064
|
| 617 |
+
Tiles054
|
| 618 |
+
Bricks076C
|
| 619 |
+
WoodFloor007
|
| 620 |
+
Bricks067
|
| 621 |
+
Wood057
|
| 622 |
+
Bricks045
|
| 623 |
+
Gravel021
|
| 624 |
+
Plaster002
|
| 625 |
+
Bark004
|
| 626 |
+
Snow007A
|
| 627 |
+
Asphalt030
|
| 628 |
+
Tatami006
|
| 629 |
+
Bark007
|
| 630 |
+
Bricks041
|
| 631 |
+
Wood017
|
| 632 |
+
Bricks073B
|
| 633 |
+
Ground033
|
| 634 |
+
Bricks004
|
| 635 |
+
Plastic015B
|
| 636 |
+
Concrete045
|
| 637 |
+
Wicker005
|
| 638 |
+
Bricks074
|
| 639 |
+
Tiles041
|
| 640 |
+
Terrazzo004
|
| 641 |
+
Clay002
|
| 642 |
+
WoodFloor023
|
| 643 |
+
Rock043S
|
| 644 |
+
Rocks023
|
| 645 |
+
Asphalt015
|
| 646 |
+
Tiles035
|
| 647 |
+
Wicker010A
|
| 648 |
+
Bricks071
|
| 649 |
+
Rocks013
|
| 650 |
+
Planks029S
|
| 651 |
+
Granite003A
|
| 652 |
+
Tiles008
|
| 653 |
+
Fabric068
|
| 654 |
+
Tiles128A
|
| 655 |
+
PavingStones086
|
| 656 |
+
Fabric009
|
| 657 |
+
Rocks017
|
| 658 |
+
WoodFloor005
|
| 659 |
+
Carpet005
|
| 660 |
+
Wood070
|
| 661 |
+
Rock040
|
| 662 |
+
Clay004
|
| 663 |
+
Wicker006
|
| 664 |
+
Marble007
|
| 665 |
+
Rocks011
|
| 666 |
+
Wicker008A
|
| 667 |
+
Ground066
|
| 668 |
+
TactilePaving004
|
| 669 |
+
Tiles019
|
| 670 |
+
Fabric024
|
| 671 |
+
PavingStones018
|
| 672 |
+
Marble010
|
| 673 |
+
Tiles046
|
| 674 |
+
Asphalt012
|
| 675 |
+
Fabric073
|
| 676 |
+
Tiles086
|
| 677 |
+
PavingStones011
|
| 678 |
+
Rock024
|
| 679 |
+
PaintedWood008B
|
| 680 |
+
Rocks022
|
| 681 |
+
Wood044
|
| 682 |
+
PaintedBricks003
|
| 683 |
+
Wood050
|
| 684 |
+
PavingStones125A
|
| 685 |
+
PavingStones025
|
| 686 |
+
Ground056
|
| 687 |
+
Wicker004
|
| 688 |
+
Wood086
|
| 689 |
+
Ground060
|
| 690 |
+
Gravel028
|
| 691 |
+
Wicker012A
|
| 692 |
+
Rocks010
|
| 693 |
+
Fabric005
|
| 694 |
+
Rock007
|
| 695 |
+
Tiles028
|
| 696 |
+
Wood040
|
| 697 |
+
Terrazzo011
|
| 698 |
+
Leather014
|
| 699 |
+
Wood004
|
| 700 |
+
Sponge003
|
| 701 |
+
Concrete032
|
| 702 |
+
Carpet015
|
| 703 |
+
Plastic007
|
| 704 |
+
Tiles031
|
| 705 |
+
Rock009
|
| 706 |
+
Ground015
|
| 707 |
+
Wood021
|
| 708 |
+
PavingStones057
|
| 709 |
+
Leather035D
|
| 710 |
+
Tiles109
|
| 711 |
+
Terrazzo019L
|
| 712 |
+
Leather032
|
| 713 |
+
Planks005
|
| 714 |
+
Tiles111
|
| 715 |
+
Rocks021
|
| 716 |
+
Concrete033
|
| 717 |
+
PavingStones101
|
| 718 |
+
Road004
|
| 719 |
+
PavingStones019
|
| 720 |
+
PaintedWood009A
|
| 721 |
+
PavingStones072
|
| 722 |
+
Plaster005
|
| 723 |
+
Gravel025
|
| 724 |
+
PavingStones080
|
| 725 |
+
Moss003
|
| 726 |
+
TactilePaving002
|
| 727 |
+
Wicker011A
|
| 728 |
+
Asphalt024A
|
| 729 |
+
Wood046
|
| 730 |
+
PavingStones037
|
| 731 |
+
Leather012
|
| 732 |
+
Gravel016
|
| 733 |
+
Plastic016A
|
| 734 |
+
Asphalt017
|
| 735 |
+
Leather025
|
| 736 |
+
PavingStones036
|
| 737 |
+
Bricks033
|
| 738 |
+
Ground016
|
| 739 |
+
Terrazzo005
|
| 740 |
+
WoodFloor028
|
| 741 |
+
Asphalt027C
|
| 742 |
+
WoodFloor045
|
| 743 |
+
Bricks014
|
| 744 |
+
Fabric035
|
| 745 |
+
Bricks028
|
| 746 |
+
RoofingTiles010
|
| 747 |
+
Asphalt027A
|
| 748 |
+
PavingStones071
|
| 749 |
+
WoodFloor050
|
| 750 |
+
Ground049C
|
| 751 |
+
RoofingTiles007
|
| 752 |
+
Fabric074
|
| 753 |
+
Bamboo002A
|
| 754 |
+
Tiles112
|
| 755 |
+
Bricks088
|
| 756 |
+
Ground064
|
| 757 |
+
Wicker009A
|
| 758 |
+
WoodFloor027
|
| 759 |
+
RoofingTiles008
|
| 760 |
+
Gravel033
|
| 761 |
+
Tiles084
|
| 762 |
+
Concrete015
|
| 763 |
+
Leather009
|
| 764 |
+
Fabric014
|
| 765 |
+
Wood089
|
| 766 |
+
Rocks025
|
| 767 |
+
Planks032
|
| 768 |
+
Wood001
|
| 769 |
+
Fabric069
|
| 770 |
+
Carpet013
|
| 771 |
+
PavingStones026
|
| 772 |
+
PavingStones043
|
| 773 |
+
Gravel034
|
| 774 |
+
Terrazzo017
|
| 775 |
+
WoodFloor012
|
| 776 |
+
Chipboard007
|
| 777 |
+
Concrete031
|
| 778 |
+
Fabric016
|
| 779 |
+
Ground007
|
| 780 |
+
Tiles079
|
| 781 |
+
Ivory002B
|
| 782 |
+
PavingStones047
|
| 783 |
+
Bricks013
|
| 784 |
+
Marble006
|
| 785 |
+
PavingStones016
|
| 786 |
+
TactilePaving006
|
| 787 |
+
Fabric050
|
| 788 |
+
Rock033
|
| 789 |
+
Ground034
|
| 790 |
+
Rocks003
|
| 791 |
+
Fabric026
|
| 792 |
+
PavingStones024
|
| 793 |
+
Rocks024L
|
| 794 |
+
Bricks052
|
| 795 |
+
Concrete005
|
| 796 |
+
Tiles036
|
| 797 |
+
Wicker012B
|
| 798 |
+
Candy002
|
| 799 |
+
Tiles090
|
| 800 |
+
Ground059
|
| 801 |
+
PavingStones045
|
| 802 |
+
Rock035
|
| 803 |
+
Wood067
|
| 804 |
+
Plastic001
|
| 805 |
+
Fabric077
|
| 806 |
+
Tiles081
|
| 807 |
+
Wood030
|
| 808 |
+
Paper003
|
| 809 |
+
Tiles073
|
| 810 |
+
Marble013
|
| 811 |
+
Fabric036
|
| 812 |
+
Leather033B
|
| 813 |
+
Tiles047
|
| 814 |
+
Rocks014
|
| 815 |
+
Tiles115
|
| 816 |
+
Ground038
|
| 817 |
+
PavingStones123
|
| 818 |
+
Wood084A
|
| 819 |
+
Leather028
|
| 820 |
+
WoodSiding007
|
| 821 |
+
PavingStones041
|
| 822 |
+
Leather031
|
| 823 |
+
Planks022
|
| 824 |
+
PavingStones081
|
| 825 |
+
Tiles097
|
| 826 |
+
Chipboard006
|
| 827 |
+
Snow010A
|
| 828 |
+
Asphalt025A
|
| 829 |
+
Tiles070
|
| 830 |
+
Leather026
|
| 831 |
+
Ground055S
|
| 832 |
+
Wood065
|
| 833 |
+
Tiles105
|
| 834 |
+
Ground027
|
| 835 |
+
Tiles082
|
| 836 |
+
WoodFloor039
|
| 837 |
+
Granite007A
|
| 838 |
+
Tiles021
|
| 839 |
+
Bricks029
|
| 840 |
+
Leather034D
|
| 841 |
+
Rock015
|
| 842 |
+
PaintedPlaster004
|
| 843 |
+
Plaster006
|
| 844 |
+
Wood038
|
| 845 |
+
PaintedPlaster012
|
| 846 |
+
Wood085A
|
| 847 |
+
Bricks057
|
| 848 |
+
Marble015
|
| 849 |
+
Ground013
|
| 850 |
+
Gravel037
|
| 851 |
+
Leather006
|
| 852 |
+
Plastic015A
|
| 853 |
+
WoodSiding012
|
| 854 |
+
Wood028
|
| 855 |
+
Gravel004
|
| 856 |
+
Wood056
|
| 857 |
+
WoodFloor009
|
| 858 |
+
Wallpaper002C
|
| 859 |
+
Leather003
|
| 860 |
+
Tiles126
|
| 861 |
+
Gravel018
|
| 862 |
+
Wood063
|
| 863 |
+
Bricks069
|
| 864 |
+
Concrete037
|
| 865 |
+
RoofingTiles002
|
| 866 |
+
Chipboard002
|
| 867 |
+
PavingStones031
|
| 868 |
+
Facade016
|
| 869 |
+
Tiles088
|
| 870 |
+
PavingStones044
|
| 871 |
+
Rock004
|
| 872 |
+
Terrazzo016
|
| 873 |
+
PavingStones066
|
| 874 |
+
Snow009C
|
| 875 |
+
Bricks049
|
| 876 |
+
Tiles027
|
| 877 |
+
PavingStones010
|
| 878 |
+
Terrazzo010
|
| 879 |
+
Wood082A
|
| 880 |
+
Ground031
|
| 881 |
+
Fabric031
|
| 882 |
+
Ground047
|
| 883 |
+
Fabric078
|
| 884 |
+
Ground035
|
| 885 |
+
Bricks039
|
| 886 |
+
PaintedBricks002
|
| 887 |
+
Granite002A
|
| 888 |
+
Bark010
|
| 889 |
+
Rock034
|
| 890 |
+
PavingStones107
|
| 891 |
+
Marble022
|
| 892 |
+
PavingStones106
|
| 893 |
+
Bricks080B
|
| 894 |
+
Sponge001
|
| 895 |
+
Cardboard002
|
| 896 |
+
Fabric029
|
| 897 |
+
Bricks023
|
| 898 |
+
Paint006
|
| 899 |
+
Bricks015
|
| 900 |
+
Tiles124
|
| 901 |
+
Leather035C
|
| 902 |
+
Gravel019
|
| 903 |
+
Bricks044
|
| 904 |
+
PavingStones087
|
| 905 |
+
Bricks038
|
| 906 |
+
Terrazzo015
|
| 907 |
+
Rock043L
|
| 908 |
+
Bricks036
|
| 909 |
+
PavingStones048
|
| 910 |
+
Tiles016
|
| 911 |
+
Tiles117
|
| 912 |
+
Ground019
|
| 913 |
+
Tiles030
|
| 914 |
+
Rock048
|
| 915 |
+
WoodFloor018
|
| 916 |
+
Wood032
|
| 917 |
+
Ice002
|
| 918 |
+
Wood062
|
| 919 |
+
Fabric015
|
| 920 |
+
PavingStones099
|
| 921 |
+
Terrazzo001
|
| 922 |
+
ThatchedRoof002A
|
| 923 |
+
Candy001
|
| 924 |
+
Ground021
|
| 925 |
+
Tiles075
|
| 926 |
+
Rocks007
|
| 927 |
+
PavingStones070
|
| 928 |
+
PaintedPlaster015
|
| 929 |
+
Fabric017
|
| 930 |
+
Wood039
|
| 931 |
+
Ground010
|
| 932 |
+
WoodFloor019
|
| 933 |
+
Fabric020
|
| 934 |
+
Fabric070
|
| 935 |
+
PavingStones076
|
| 936 |
+
Rock013
|
| 937 |
+
Leather008
|
| 938 |
+
Tatami004
|
| 939 |
+
Fabric076
|
| 940 |
+
Fabric053
|
| 941 |
+
PavingStones049
|
| 942 |
+
PaintedPlaster008
|
| 943 |
+
Marble011
|
| 944 |
+
Bricks081C
|
| 945 |
+
Asphalt024B
|
| 946 |
+
PaintedPlaster013
|
| 947 |
+
Ground029
|
| 948 |
+
Tiles039
|
| 949 |
+
Carpet003
|
| 950 |
+
Plaster003
|
| 951 |
+
Fabric048
|
| 952 |
+
Fabric049
|
| 953 |
+
Wood083A
|
| 954 |
+
Wood005
|
| 955 |
+
Road003
|
| 956 |
+
PavingStones017
|
| 957 |
+
TactilePaving001
|
| 958 |
+
PavingStones116
|
| 959 |
+
Wood002
|
| 960 |
+
Leather016
|
| 961 |
+
Leather036C
|
| 962 |
+
Leather035B
|
| 963 |
+
Snow003
|
| 964 |
+
Fabric065
|
| 965 |
+
Bricks066
|
| 966 |
+
Rocks015
|
| 967 |
+
Wood061
|
| 968 |
+
Tiles093
|
| 969 |
+
Wood019
|
| 970 |
+
Concrete024
|
| 971 |
+
Rock012
|
| 972 |
+
PavingStones118
|
| 973 |
+
Bricks050
|
| 974 |
+
Road001
|
| 975 |
+
Asphalt006
|
| 976 |
+
Rock028
|
| 977 |
+
PavingStones023
|
| 978 |
+
Wood016
|
| 979 |
+
PavingStones021
|
| 980 |
+
Granite004B
|
| 981 |
+
PavingStones005
|
| 982 |
+
Bamboo001A
|
| 983 |
+
GlazedTerracotta003
|
| 984 |
+
Marble008
|
| 985 |
+
Concrete006
|
| 986 |
+
Tiles063
|
| 987 |
+
Rock053
|
| 988 |
+
Facade015
|
| 989 |
+
Ground063
|
| 990 |
+
PavingStones085
|
| 991 |
+
Rocks018
|
| 992 |
+
PaintedBricks005
|
| 993 |
+
Terrazzo018
|
| 994 |
+
Cardboard001
|
| 995 |
+
Tiles080
|
| 996 |
+
Tiles010
|
| 997 |
+
Bricks021
|
| 998 |
+
Tiles104
|
| 999 |
+
Rock021
|
| 1000 |
+
PavingStones007
|
| 1001 |
+
Concrete004
|
| 1002 |
+
Leather007
|
| 1003 |
+
PaintedPlaster011
|
| 1004 |
+
Gravel027
|
| 1005 |
+
Moss001
|
| 1006 |
+
PaintedWood003
|
| 1007 |
+
Bamboo002B
|
| 1008 |
+
Concrete029
|
| 1009 |
+
WoodFloor003
|
| 1010 |
+
Gravel023
|
| 1011 |
+
Asphalt011
|
| 1012 |
+
Fabric061
|
| 1013 |
+
Wood072
|
| 1014 |
+
Rocks006
|
| 1015 |
+
WoodFloor029
|
| 1016 |
+
WoodFloor037
|
| 1017 |
+
Marble019
|
| 1018 |
+
Asphalt027B
|
| 1019 |
+
Wood058
|
| 1020 |
+
Bricks056
|
| 1021 |
+
Wood076
|
| 1022 |
+
PavingStones028
|
| 1023 |
+
Planks008
|
| 1024 |
+
Road006
|
| 1025 |
+
Tiles033
|
| 1026 |
+
Wood024
|
| 1027 |
+
Carpet001
|
| 1028 |
+
PavingStones027
|
| 1029 |
+
PavingStones115A
|
| 1030 |
+
Tiles096
|
| 1031 |
+
Asphalt026C
|
| 1032 |
+
Bricks018
|
| 1033 |
+
Plastic018A
|
| 1034 |
+
Marble012
|
| 1035 |
+
PavingStones100
|
| 1036 |
+
Asphalt007
|
| 1037 |
+
Tiles005
|
| 1038 |
+
Wood088
|
| 1039 |
+
Planks009
|
| 1040 |
+
PaintedWood004
|
| 1041 |
+
PavingStones073
|
| 1042 |
+
Chipboard003
|
| 1043 |
+
Bricks042
|
| 1044 |
+
Foam002
|
| 1045 |
+
WoodFloor057
|
| 1046 |
+
Ground062S
|
| 1047 |
+
Paper006
|
| 1048 |
+
Ground030
|
| 1049 |
+
RoofingTiles005
|
| 1050 |
+
PavingStones064
|
| 1051 |
+
Fabric046
|
| 1052 |
+
Gravel006
|
| 1053 |
+
Tiles128B
|
| 1054 |
+
Bark003
|
| 1055 |
+
Leather015
|
| 1056 |
+
Fabric038
|
| 1057 |
+
Bricks062
|
| 1058 |
+
Fabric060
|
| 1059 |
+
Fabric040
|
| 1060 |
+
WoodFloor006
|
| 1061 |
+
Rock032
|
| 1062 |
+
Leather027
|
| 1063 |
+
Wood006
|
| 1064 |
+
Snow006
|
| 1065 |
+
Wood027
|
| 1066 |
+
Ground046
|
| 1067 |
+
Rock046S
|
| 1068 |
+
Chipboard008
|
| 1069 |
+
Fabric052
|
| 1070 |
+
Bricks030
|
| 1071 |
+
Porcelain003
|
| 1072 |
+
Gravel014
|
| 1073 |
+
Chipboard005
|
| 1074 |
+
Tiles127
|
| 1075 |
+
Bricks053
|
| 1076 |
+
Fabric008
|
| 1077 |
+
Plastic018B
|
| 1078 |
+
Leather036A
|
| 1079 |
+
Granite006A
|
| 1080 |
+
Tiles129B
|
| 1081 |
+
Sponge002
|
| 1082 |
+
Ground052
|
| 1083 |
+
WoodFloor031
|
| 1084 |
+
Wood055
|
| 1085 |
+
Wood059
|
| 1086 |
+
Bamboo001B
|
| 1087 |
+
Bricks082C
|
| 1088 |
+
Planks004
|
| 1089 |
+
Concrete016
|
| 1090 |
+
Ground070
|
| 1091 |
+
Bricks026
|
| 1092 |
+
Plastic002
|
| 1093 |
+
PavingStones012
|
| 1094 |
+
Leather036B
|
| 1095 |
+
Snow007B
|
| 1096 |
+
Bricks048
|
| 1097 |
+
Leather037
|
| 1098 |
+
Concrete009
|
| 1099 |
+
WoodFloor032
|
| 1100 |
+
Ground055L
|
| 1101 |
+
Tiles087
|
| 1102 |
+
Rock031
|
| 1103 |
+
Fabric018
|
| 1104 |
+
PavingStones032
|
| 1105 |
+
WoodFloor030
|
| 1106 |
+
PavingStones114
|
| 1107 |
+
PavingStones022
|
| 1108 |
+
Granite005A
|
| 1109 |
+
Terrazzo019S
|
| 1110 |
+
Ground026
|
| 1111 |
+
Fabric012
|
| 1112 |
+
PaintedWood002
|
| 1113 |
+
PavingStones092
|
| 1114 |
+
Fabric066
|
| 1115 |
+
Bricks016
|
| 1116 |
+
Rock036
|
| 1117 |
+
Plastic013B
|
| 1118 |
+
Wood036
|
| 1119 |
+
Fabric042
|
| 1120 |
+
Rock020
|
| 1121 |
+
Ground045
|
| 1122 |
+
Ground049B
|
| 1123 |
+
Asphalt020S
|
| 1124 |
+
Concrete007
|
| 1125 |
+
TactilePaving005
|
| 1126 |
+
PavingStones113
|
| 1127 |
+
Terrazzo002
|
| 1128 |
+
Bamboo001C
|
| 1129 |
+
Wood073
|
| 1130 |
+
ThatchedRoof001B
|
| 1131 |
+
Concrete008
|
| 1132 |
+
PavingStones068
|
| 1133 |
+
Paper005
|
| 1134 |
+
Ground067
|
| 1135 |
+
Bricks081B
|
| 1136 |
+
Carpet008
|
| 1137 |
+
Moss002
|
| 1138 |
+
RoofingTiles006
|
| 1139 |
+
Bricks054
|
| 1140 |
+
PavingStones096
|
| 1141 |
+
Concrete018
|
| 1142 |
+
Wood082B
|
| 1143 |
+
Snow009B
|
| 1144 |
+
PavingStones109
|
| 1145 |
+
Bricks058
|
| 1146 |
+
PavingStones034
|
| 1147 |
+
Bricks047
|
| 1148 |
+
Ground009
|
| 1149 |
+
Rock030
|
| 1150 |
+
PaintedWood009B
|
| 1151 |
+
Carpet012
|
| 1152 |
+
RoofingTiles009
|
| 1153 |
+
Carpet009
|
| 1154 |
+
Tiles015
|
| 1155 |
+
Bricks024
|
| 1156 |
+
Gravel030
|
| 1157 |
+
PaintedWood009C
|
| 1158 |
+
Bark009
|
| 1159 |
+
PavingStones030
|
| 1160 |
+
Snow005
|
| 1161 |
+
Tiles049
|
| 1162 |
+
PaintedWood006A
|
| 1163 |
+
Ground041
|
| 1164 |
+
Rocks002
|
| 1165 |
+
Asphalt026A
|
| 1166 |
+
Ground048
|
| 1167 |
+
Bricks082A
|
| 1168 |
+
Terrazzo014
|
| 1169 |
+
Bricks046
|
| 1170 |
+
Bricks059
|
| 1171 |
+
Rocks009
|
| 1172 |
+
Tiles092
|
| 1173 |
+
Bricks080A
|
| 1174 |
+
Wood083B
|
| 1175 |
+
WoodFloor038
|
| 1176 |
+
Bricks055
|
| 1177 |
+
PavingStones131
|
| 1178 |
+
Tiles123
|
| 1179 |
+
Plastic017B
|
| 1180 |
+
Concrete010
|
| 1181 |
+
WoodFloor010
|
| 1182 |
+
Wood077
|
capture/data/matlist/texsd
ADDED
|
@@ -0,0 +1,353 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
acrylic_000
|
| 2 |
+
acrylic_002
|
| 3 |
+
acrylic_003
|
| 4 |
+
acrylic_004
|
| 5 |
+
acrylic_005
|
| 6 |
+
acrylic_006
|
| 7 |
+
acrylic_007
|
| 8 |
+
acrylic_008
|
| 9 |
+
acrylic_009
|
| 10 |
+
acrylic_010
|
| 11 |
+
acrylic_011
|
| 12 |
+
acrylic_012
|
| 13 |
+
acrylic_013
|
| 14 |
+
acrylic_014
|
| 15 |
+
acrylic_016
|
| 16 |
+
acrylic_017
|
| 17 |
+
acrylic_018
|
| 18 |
+
acrylic_019
|
| 19 |
+
acrylic_020
|
| 20 |
+
acrylic_021
|
| 21 |
+
acrylic_022
|
| 22 |
+
acrylic_023
|
| 23 |
+
acrylic_024
|
| 24 |
+
acrylic_025
|
| 25 |
+
acrylic_026
|
| 26 |
+
acrylic_027
|
| 27 |
+
acrylic_028
|
| 28 |
+
acrylic_029
|
| 29 |
+
acrylic_030
|
| 30 |
+
acrylic_031
|
| 31 |
+
acrylic_032
|
| 32 |
+
acrylic_033
|
| 33 |
+
acrylic_034
|
| 34 |
+
acrylic_035
|
| 35 |
+
acrylic_036
|
| 36 |
+
acrylic_037
|
| 37 |
+
acrylic_038
|
| 38 |
+
acrylic_039
|
| 39 |
+
asphalt_000
|
| 40 |
+
asphalt_001
|
| 41 |
+
asphalt_002
|
| 42 |
+
asphalt_003
|
| 43 |
+
asphalt_004
|
| 44 |
+
asphalt_005
|
| 45 |
+
asphalt_006
|
| 46 |
+
asphalt_007
|
| 47 |
+
asphalt_008
|
| 48 |
+
asphalt_009
|
| 49 |
+
asphalt_010
|
| 50 |
+
asphalt_011
|
| 51 |
+
asphalt_012
|
| 52 |
+
asphalt_013
|
| 53 |
+
asphalt_014
|
| 54 |
+
asphalt_015
|
| 55 |
+
asphalt_016
|
| 56 |
+
asphalt_017
|
| 57 |
+
asphalt_018
|
| 58 |
+
asphalt_019
|
| 59 |
+
asphalt_020
|
| 60 |
+
asphalt_021
|
| 61 |
+
asphalt_022
|
| 62 |
+
asphalt_023
|
| 63 |
+
asphalt_025
|
| 64 |
+
asphalt_026
|
| 65 |
+
asphalt_027
|
| 66 |
+
asphalt_028
|
| 67 |
+
asphalt_029
|
| 68 |
+
asphalt_030
|
| 69 |
+
asphalt_031
|
| 70 |
+
asphalt_032
|
| 71 |
+
asphalt_033
|
| 72 |
+
asphalt_034
|
| 73 |
+
asphalt_035
|
| 74 |
+
asphalt_036
|
| 75 |
+
asphalt_037
|
| 76 |
+
asphalt_038
|
| 77 |
+
asphalt_039
|
| 78 |
+
bamboo_000
|
| 79 |
+
bamboo_001
|
| 80 |
+
bamboo_002
|
| 81 |
+
bamboo_003
|
| 82 |
+
bamboo_004
|
| 83 |
+
bamboo_005
|
| 84 |
+
bamboo_006
|
| 85 |
+
bamboo_007
|
| 86 |
+
bamboo_008
|
| 87 |
+
bamboo_009
|
| 88 |
+
bamboo_010
|
| 89 |
+
bamboo_011
|
| 90 |
+
bamboo_012
|
| 91 |
+
bamboo_013
|
| 92 |
+
bamboo_014
|
| 93 |
+
bamboo_015
|
| 94 |
+
bamboo_016
|
| 95 |
+
bamboo_017
|
| 96 |
+
bamboo_018
|
| 97 |
+
bamboo_019
|
| 98 |
+
bamboo_020
|
| 99 |
+
bamboo_021
|
| 100 |
+
bamboo_022
|
| 101 |
+
bamboo_023
|
| 102 |
+
bamboo_024
|
| 103 |
+
bamboo_025
|
| 104 |
+
bamboo_026
|
| 105 |
+
bamboo_027
|
| 106 |
+
bamboo_028
|
| 107 |
+
bamboo_029
|
| 108 |
+
bamboo_030
|
| 109 |
+
bamboo_031
|
| 110 |
+
bamboo_032
|
| 111 |
+
bamboo_033
|
| 112 |
+
bamboo_034
|
| 113 |
+
bamboo_035
|
| 114 |
+
bamboo_036
|
| 115 |
+
bamboo_037
|
| 116 |
+
bamboo_038
|
| 117 |
+
bamboo_039
|
| 118 |
+
bambooflooring_000
|
| 119 |
+
bambooflooring_001
|
| 120 |
+
bambooflooring_002
|
| 121 |
+
bambooflooring_003
|
| 122 |
+
bambooflooring_004
|
| 123 |
+
bambooflooring_005
|
| 124 |
+
bambooflooring_006
|
| 125 |
+
bambooflooring_007
|
| 126 |
+
bambooflooring_008
|
| 127 |
+
bambooflooring_009
|
| 128 |
+
bambooflooring_010
|
| 129 |
+
bambooflooring_011
|
| 130 |
+
bambooflooring_012
|
| 131 |
+
bambooflooring_013
|
| 132 |
+
bambooflooring_014
|
| 133 |
+
bambooflooring_015
|
| 134 |
+
bambooflooring_016
|
| 135 |
+
bambooflooring_017
|
| 136 |
+
bambooflooring_018
|
| 137 |
+
bambooflooring_019
|
| 138 |
+
bambooflooring_020
|
| 139 |
+
bambooflooring_021
|
| 140 |
+
bambooflooring_022
|
| 141 |
+
bambooflooring_023
|
| 142 |
+
bambooflooring_024
|
| 143 |
+
bambooflooring_025
|
| 144 |
+
bambooflooring_026
|
| 145 |
+
bambooflooring_027
|
| 146 |
+
bambooflooring_028
|
| 147 |
+
bambooflooring_029
|
| 148 |
+
bambooflooring_030
|
| 149 |
+
bambooflooring_031
|
| 150 |
+
bambooflooring_032
|
| 151 |
+
bambooflooring_033
|
| 152 |
+
bambooflooring_034
|
| 153 |
+
bambooflooring_035
|
| 154 |
+
bambooflooring_036
|
| 155 |
+
bambooflooring_037
|
| 156 |
+
bambooflooring_038
|
| 157 |
+
bambooflooring_039
|
| 158 |
+
bark_000
|
| 159 |
+
bark_001
|
| 160 |
+
bark_002
|
| 161 |
+
bark_003
|
| 162 |
+
bark_004
|
| 163 |
+
bark_005
|
| 164 |
+
bark_006
|
| 165 |
+
bark_007
|
| 166 |
+
bark_008
|
| 167 |
+
bark_009
|
| 168 |
+
bark_010
|
| 169 |
+
bark_011
|
| 170 |
+
bark_012
|
| 171 |
+
bark_013
|
| 172 |
+
bark_014
|
| 173 |
+
bark_015
|
| 174 |
+
bark_016
|
| 175 |
+
bark_017
|
| 176 |
+
bark_018
|
| 177 |
+
bark_019
|
| 178 |
+
bark_020
|
| 179 |
+
bark_021
|
| 180 |
+
bark_022
|
| 181 |
+
bark_023
|
| 182 |
+
bark_024
|
| 183 |
+
bark_025
|
| 184 |
+
bark_026
|
| 185 |
+
bark_027
|
| 186 |
+
bark_028
|
| 187 |
+
bark_029
|
| 188 |
+
bark_030
|
| 189 |
+
bark_031
|
| 190 |
+
bark_032
|
| 191 |
+
bark_033
|
| 192 |
+
bark_034
|
| 193 |
+
bark_035
|
| 194 |
+
bark_036
|
| 195 |
+
bark_037
|
| 196 |
+
bark_038
|
| 197 |
+
bark_039
|
| 198 |
+
bricks_000
|
| 199 |
+
bricks_001
|
| 200 |
+
bricks_002
|
| 201 |
+
bricks_003
|
| 202 |
+
bricks_004
|
| 203 |
+
bricks_005
|
| 204 |
+
bricks_006
|
| 205 |
+
bricks_007
|
| 206 |
+
bricks_008
|
| 207 |
+
bricks_009
|
| 208 |
+
bricks_010
|
| 209 |
+
bricks_011
|
| 210 |
+
bricks_012
|
| 211 |
+
bricks_013
|
| 212 |
+
bricks_014
|
| 213 |
+
bricks_015
|
| 214 |
+
bricks_016
|
| 215 |
+
bricks_017
|
| 216 |
+
bricks_018
|
| 217 |
+
bricks_019
|
| 218 |
+
bricks_020
|
| 219 |
+
bricks_021
|
| 220 |
+
bricks_022
|
| 221 |
+
bricks_023
|
| 222 |
+
bricks_024
|
| 223 |
+
bricks_025
|
| 224 |
+
bricks_026
|
| 225 |
+
bricks_028
|
| 226 |
+
bricks_029
|
| 227 |
+
bricks_030
|
| 228 |
+
bricks_031
|
| 229 |
+
bricks_032
|
| 230 |
+
bricks_033
|
| 231 |
+
bricks_034
|
| 232 |
+
bricks_035
|
| 233 |
+
bricks_036
|
| 234 |
+
bricks_037
|
| 235 |
+
bricks_038
|
| 236 |
+
bricks_039
|
| 237 |
+
brocade_000
|
| 238 |
+
brocade_001
|
| 239 |
+
brocade_002
|
| 240 |
+
brocade_003
|
| 241 |
+
brocade_004
|
| 242 |
+
brocade_005
|
| 243 |
+
brocade_006
|
| 244 |
+
brocade_007
|
| 245 |
+
brocade_008
|
| 246 |
+
brocade_009
|
| 247 |
+
brocade_010
|
| 248 |
+
brocade_011
|
| 249 |
+
brocade_012
|
| 250 |
+
brocade_013
|
| 251 |
+
brocade_014
|
| 252 |
+
brocade_015
|
| 253 |
+
brocade_016
|
| 254 |
+
brocade_017
|
| 255 |
+
brocade_018
|
| 256 |
+
brocade_019
|
| 257 |
+
brocade_020
|
| 258 |
+
brocade_021
|
| 259 |
+
brocade_022
|
| 260 |
+
brocade_023
|
| 261 |
+
brocade_024
|
| 262 |
+
brocade_025
|
| 263 |
+
brocade_026
|
| 264 |
+
brocade_027
|
| 265 |
+
brocade_028
|
| 266 |
+
brocade_029
|
| 267 |
+
brocade_030
|
| 268 |
+
brocade_031
|
| 269 |
+
brocade_032
|
| 270 |
+
brocade_033
|
| 271 |
+
brocade_034
|
| 272 |
+
brocade_035
|
| 273 |
+
brocade_036
|
| 274 |
+
brocade_037
|
| 275 |
+
brocade_038
|
| 276 |
+
brocade_039
|
| 277 |
+
burlap_000
|
| 278 |
+
burlap_001
|
| 279 |
+
burlap_002
|
| 280 |
+
burlap_003
|
| 281 |
+
burlap_004
|
| 282 |
+
burlap_005
|
| 283 |
+
burlap_006
|
| 284 |
+
burlap_007
|
| 285 |
+
burlap_008
|
| 286 |
+
burlap_009
|
| 287 |
+
burlap_010
|
| 288 |
+
burlap_011
|
| 289 |
+
burlap_012
|
| 290 |
+
burlap_013
|
| 291 |
+
burlap_014
|
| 292 |
+
burlap_015
|
| 293 |
+
burlap_016
|
| 294 |
+
burlap_017
|
| 295 |
+
burlap_018
|
| 296 |
+
burlap_019
|
| 297 |
+
burlap_020
|
| 298 |
+
burlap_021
|
| 299 |
+
burlap_022
|
| 300 |
+
burlap_023
|
| 301 |
+
burlap_024
|
| 302 |
+
burlap_025
|
| 303 |
+
burlap_026
|
| 304 |
+
burlap_027
|
| 305 |
+
burlap_028
|
| 306 |
+
burlap_029
|
| 307 |
+
burlap_030
|
| 308 |
+
burlap_031
|
| 309 |
+
burlap_033
|
| 310 |
+
burlap_034
|
| 311 |
+
burlap_035
|
| 312 |
+
burlap_036
|
| 313 |
+
burlap_037
|
| 314 |
+
burlap_038
|
| 315 |
+
burlap_039
|
| 316 |
+
burnoutvelvet_000
|
| 317 |
+
burnoutvelvet_001
|
| 318 |
+
burnoutvelvet_002
|
| 319 |
+
burnoutvelvet_003
|
| 320 |
+
burnoutvelvet_004
|
| 321 |
+
burnoutvelvet_005
|
| 322 |
+
burnoutvelvet_006
|
| 323 |
+
burnoutvelvet_007
|
| 324 |
+
burnoutvelvet_008
|
| 325 |
+
burnoutvelvet_009
|
| 326 |
+
burnoutvelvet_010
|
| 327 |
+
burnoutvelvet_011
|
| 328 |
+
burnoutvelvet_012
|
| 329 |
+
burnoutvelvet_013
|
| 330 |
+
burnoutvelvet_014
|
| 331 |
+
burnoutvelvet_015
|
| 332 |
+
burnoutvelvet_016
|
| 333 |
+
burnoutvelvet_017
|
| 334 |
+
burnoutvelvet_018
|
| 335 |
+
burnoutvelvet_019
|
| 336 |
+
burnoutvelvet_020
|
| 337 |
+
burnoutvelvet_021
|
| 338 |
+
burnoutvelvet_022
|
| 339 |
+
burnoutvelvet_023
|
| 340 |
+
burnoutvelvet_024
|
| 341 |
+
burnoutvelvet_025
|
| 342 |
+
burnoutvelvet_027
|
| 343 |
+
burnoutvelvet_028
|
| 344 |
+
burnoutvelvet_029
|
| 345 |
+
burnoutvelvet_031
|
| 346 |
+
burnoutvelvet_032
|
| 347 |
+
burnoutvelvet_033
|
| 348 |
+
burnoutvelvet_034
|
| 349 |
+
burnoutvelvet_036
|
| 350 |
+
burnoutvelvet_037
|
| 351 |
+
burnoutvelvet_038
|
| 352 |
+
burnoutvelvet_039
|
| 353 |
+
candy_000
|
capture/data/module.py
ADDED
|
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import typing
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from pprint import pprint
|
| 4 |
+
from torch.utils.data import DataLoader
|
| 5 |
+
from pytorch_lightning import LightningDataModule
|
| 6 |
+
|
| 7 |
+
from . import augment as Aug
|
| 8 |
+
from ..render import Renderer
|
| 9 |
+
from .utils import MultiLoader, EmptyDataset, collate_fn
|
| 10 |
+
from .source import AmbientCG
|
| 11 |
+
from .source import FabricsDataset
|
| 12 |
+
from .target import StableDiffusion
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def is_set(x):
|
| 16 |
+
return x is not None
|
| 17 |
+
|
| 18 |
+
class DataModule(LightningDataModule):
|
| 19 |
+
def __init__(
|
| 20 |
+
self,
|
| 21 |
+
batch_size: int = 1,
|
| 22 |
+
num_workers: int = 1,
|
| 23 |
+
transform: bool = None,
|
| 24 |
+
source_ds: str = '',
|
| 25 |
+
target_ds: str = '',
|
| 26 |
+
test_ds: str = '',
|
| 27 |
+
predict_ds: str = '',
|
| 28 |
+
source_list: typing.Optional[Path] = None,
|
| 29 |
+
target_list: typing.Optional[Path] = None,
|
| 30 |
+
target_val_list: typing.Optional[Path] = None,
|
| 31 |
+
test_list: typing.Optional[Path] = None,
|
| 32 |
+
predict_list: typing.Optional[Path] = None,
|
| 33 |
+
source_dir: typing.Optional[Path] = None,
|
| 34 |
+
target_dir: typing.Optional[Path] = None,
|
| 35 |
+
predict_dir: typing.Optional[Path] = None,
|
| 36 |
+
test_dir: typing.Optional[Path] = None,
|
| 37 |
+
pseudo_labels: bool = False,
|
| 38 |
+
input_size: int = 512,
|
| 39 |
+
):
|
| 40 |
+
super().__init__()
|
| 41 |
+
self.batch_size = batch_size
|
| 42 |
+
self.num_workers = num_workers
|
| 43 |
+
self.transform = transform
|
| 44 |
+
self.pseudo_labels = pseudo_labels
|
| 45 |
+
|
| 46 |
+
self.source_ds = source_ds
|
| 47 |
+
self.target_ds = target_ds
|
| 48 |
+
self.test_ds = test_ds
|
| 49 |
+
self.predict_ds = predict_ds
|
| 50 |
+
|
| 51 |
+
self.source_list = source_list
|
| 52 |
+
self.target_list = target_list
|
| 53 |
+
self.target_val_list = target_val_list
|
| 54 |
+
self.predict_list = predict_list
|
| 55 |
+
self.test_list = test_list
|
| 56 |
+
|
| 57 |
+
self.source_dir = source_dir
|
| 58 |
+
self.target_dir = target_dir
|
| 59 |
+
self.predict_dir = predict_dir
|
| 60 |
+
self.test_dir = test_dir
|
| 61 |
+
|
| 62 |
+
self.input_size = input_size
|
| 63 |
+
# self.use_ref = use_ref
|
| 64 |
+
|
| 65 |
+
assert self.source_ds or self.target_ds or self.test_ds or self.predict_ds
|
| 66 |
+
if self.source_ds:
|
| 67 |
+
assert is_set(source_list)
|
| 68 |
+
if self.target_ds:
|
| 69 |
+
assert is_set(target_list)
|
| 70 |
+
if self.target_ds != 'sd':
|
| 71 |
+
assert is_set(target_val_list)
|
| 72 |
+
|
| 73 |
+
def setup(self, stage: str):
|
| 74 |
+
renderer = Renderer(return_params=True)
|
| 75 |
+
eval_tf = [
|
| 76 |
+
Aug.NormalizeGeometry(),
|
| 77 |
+
# Aug.CenterCrop((2048,2048)),
|
| 78 |
+
Aug.Resize([self.input_size, self.input_size], antialias=True)]
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
if stage == 'fit':
|
| 82 |
+
if self.transform:
|
| 83 |
+
train_tf = [
|
| 84 |
+
Aug.RandomResizedCrop((512,512), scale=(1/16, 1/4), ratio=(1.,1.)),
|
| 85 |
+
# Aug.RandomCrop(self.input_size),
|
| 86 |
+
Aug.NormalizeGeometry(),
|
| 87 |
+
Aug.RandomHorizontalFlip(),
|
| 88 |
+
Aug.RandomVerticalFlip(),
|
| 89 |
+
Aug.RandomIncrementRotate(p=1.),
|
| 90 |
+
Aug.ColorJitter(brightness=.2, hue=.05, contrast=0.1)
|
| 91 |
+
]
|
| 92 |
+
else:
|
| 93 |
+
train_tf = [
|
| 94 |
+
Aug.CenterCrop((self.input_size, self.input_size)),
|
| 95 |
+
Aug.NormalizeGeometry()]
|
| 96 |
+
train_kwargs = dict(pseudo_labels=self.pseudo_labels,
|
| 97 |
+
renderer=renderer,
|
| 98 |
+
transform=train_tf)
|
| 99 |
+
print('stage fit:')
|
| 100 |
+
pprint(train_kwargs)
|
| 101 |
+
|
| 102 |
+
## SOURCE train dataset
|
| 103 |
+
if self.source_ds == 'acg':
|
| 104 |
+
self.src_train = FabricsDataset(split='train',
|
| 105 |
+
dir=self.source_dir,
|
| 106 |
+
matlist=self.source_list,
|
| 107 |
+
**train_kwargs)
|
| 108 |
+
## TARGET train dataset
|
| 109 |
+
if self.target_ds == 'sd':
|
| 110 |
+
self.tgt_train = StableDiffusion(split='train',
|
| 111 |
+
# use_ref=self.use_ref,
|
| 112 |
+
dir=self.target_dir,
|
| 113 |
+
matlist=self.target_list,
|
| 114 |
+
**train_kwargs)
|
| 115 |
+
|
| 116 |
+
if not self.source_ds:
|
| 117 |
+
self.src_train = EmptyDataset(len(self.tgt_train))
|
| 118 |
+
if not self.target_ds:
|
| 119 |
+
self.tgt_train = EmptyDataset(len(self.src_train))
|
| 120 |
+
|
| 121 |
+
if stage == 'fit' or stage == 'validate':
|
| 122 |
+
validate_kwargs = dict(transform=eval_tf,
|
| 123 |
+
renderer=renderer,
|
| 124 |
+
set_seed_render=True)
|
| 125 |
+
|
| 126 |
+
## SOURCE validation dataset
|
| 127 |
+
if self.source_ds == 'acg':
|
| 128 |
+
self.src_valid = FabricsDataset(split='valid',
|
| 129 |
+
dir=self.source_dir,
|
| 130 |
+
matlist=self.source_list,
|
| 131 |
+
**validate_kwargs)
|
| 132 |
+
## TARGET validation dataset
|
| 133 |
+
if self.target_ds == 'sd':
|
| 134 |
+
self.tgt_valid = StableDiffusion(split='valid',
|
| 135 |
+
pseudo_labels=False,
|
| 136 |
+
dir=self.target_dir,
|
| 137 |
+
# use_ref=self.use_ref,
|
| 138 |
+
matlist=self.target_list,
|
| 139 |
+
**validate_kwargs)
|
| 140 |
+
|
| 141 |
+
if not self.source_ds:
|
| 142 |
+
self.src_valid = EmptyDataset(len(self.tgt_valid))
|
| 143 |
+
if not self.target_ds:
|
| 144 |
+
self.tgt_valid = EmptyDataset(len(self.src_valid))
|
| 145 |
+
|
| 146 |
+
elif stage == 'test':
|
| 147 |
+
assert self.test_ds
|
| 148 |
+
|
| 149 |
+
test_kwargs = dict(pseudo_labels=False,
|
| 150 |
+
matlist=self.test_list,
|
| 151 |
+
transform=eval_tf,
|
| 152 |
+
renderer=renderer,
|
| 153 |
+
dir=self.test_dir,
|
| 154 |
+
set_seed_render=True)
|
| 155 |
+
|
| 156 |
+
if self.test_ds == 'acg':
|
| 157 |
+
self.eval = [FabricsDataset(split='all', **test_kwargs)]
|
| 158 |
+
elif self.test_ds == 'sd':
|
| 159 |
+
self.eval = [StableDiffusion(split='all', **test_kwargs)]
|
| 160 |
+
|
| 161 |
+
elif stage == 'predict':
|
| 162 |
+
predict_kwargs = dict(split='all',
|
| 163 |
+
pseudo_labels=False,
|
| 164 |
+
dir=self.predict_dir,
|
| 165 |
+
matlist=None,
|
| 166 |
+
transform=eval_tf,
|
| 167 |
+
renderer=renderer)
|
| 168 |
+
|
| 169 |
+
if self.predict_ds == 'sd':
|
| 170 |
+
self.ds = StableDiffusion(**predict_kwargs)
|
| 171 |
+
|
| 172 |
+
def train_dataloader(self):
|
| 173 |
+
src_dl = DataLoader(dataset=self.src_train,
|
| 174 |
+
batch_size=self.batch_size,
|
| 175 |
+
drop_last=True,
|
| 176 |
+
shuffle=True,
|
| 177 |
+
num_workers=self.num_workers,
|
| 178 |
+
collate_fn=collate_fn)
|
| 179 |
+
tgt_dl = DataLoader(dataset=self.tgt_train,
|
| 180 |
+
batch_size=self.batch_size,
|
| 181 |
+
drop_last=True,
|
| 182 |
+
shuffle=True,
|
| 183 |
+
num_workers=self.num_workers,
|
| 184 |
+
collate_fn=collate_fn)
|
| 185 |
+
|
| 186 |
+
mix = MultiLoader(src_dl, tgt_dl)
|
| 187 |
+
return mix
|
| 188 |
+
|
| 189 |
+
def val_dataloader(self):
|
| 190 |
+
src_dl = DataLoader(dataset=self.src_valid,
|
| 191 |
+
batch_size=self.batch_size,
|
| 192 |
+
drop_last=False,
|
| 193 |
+
shuffle=False,
|
| 194 |
+
num_workers=self.num_workers,
|
| 195 |
+
collate_fn=collate_fn)
|
| 196 |
+
tgt_dl = DataLoader(dataset=self.tgt_valid,
|
| 197 |
+
batch_size=self.batch_size,
|
| 198 |
+
drop_last=False,
|
| 199 |
+
shuffle=False,
|
| 200 |
+
num_workers=self.num_workers,
|
| 201 |
+
collate_fn=collate_fn)
|
| 202 |
+
|
| 203 |
+
mix = MultiLoader(src_dl, tgt_dl)
|
| 204 |
+
return mix
|
| 205 |
+
|
| 206 |
+
def test_dataloader(self):
|
| 207 |
+
return [DataLoader(dataset=ds,
|
| 208 |
+
batch_size=self.batch_size,
|
| 209 |
+
drop_last=False,
|
| 210 |
+
shuffle=False,
|
| 211 |
+
num_workers=self.num_workers) for ds in self.eval]
|
| 212 |
+
|
| 213 |
+
def predict_dataloader(self):
|
| 214 |
+
return DataLoader(dataset=self.ds,
|
| 215 |
+
batch_size=1,
|
| 216 |
+
drop_last=False,
|
| 217 |
+
shuffle=False,
|
| 218 |
+
num_workers=1)
|
capture/data/source.py
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import typing
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from PIL import Image
|
| 4 |
+
import cv2
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from easydict import EasyDict
|
| 8 |
+
import torchvision.transforms.functional as tf
|
| 9 |
+
from torch.utils.data import Dataset
|
| 10 |
+
|
| 11 |
+
from ..utils.log import get_matlist
|
| 12 |
+
from . import augment as Aug
|
| 13 |
+
import numpy as np
|
| 14 |
+
def get_files_from_directory(dir: Path):
|
| 15 |
+
return [f for f in dir.iterdir() if f.is_dir()]
|
| 16 |
+
|
| 17 |
+
class FabricsDataset(Dataset):
|
| 18 |
+
def __init__(
|
| 19 |
+
self,
|
| 20 |
+
split,
|
| 21 |
+
transform,
|
| 22 |
+
renderer,
|
| 23 |
+
matlist,
|
| 24 |
+
dir: typing.Optional[Path] = None,
|
| 25 |
+
set_seed_render: bool = False,
|
| 26 |
+
**kwargs
|
| 27 |
+
):
|
| 28 |
+
assert dir.is_dir()
|
| 29 |
+
assert matlist.is_file()
|
| 30 |
+
assert split in ['train', 'valid', 'all']
|
| 31 |
+
self.set_seed_render = set_seed_render
|
| 32 |
+
|
| 33 |
+
folders = get_files_from_directory(dir)
|
| 34 |
+
#folders = [folder for folder in folders if (folder / 'height.png').is_file()]
|
| 35 |
+
folders = [folder for folder in folders if (folder / 'normal.png').is_file()
|
| 36 |
+
and (folder / 'roughness.png').is_file()
|
| 37 |
+
and (folder / 'basecolor.png').is_file()]
|
| 38 |
+
|
| 39 |
+
valid_folders = []
|
| 40 |
+
for folder in folders:
|
| 41 |
+
R_path = folder / 'roughness.png'
|
| 42 |
+
R = Image.open(R_path).convert('RGB')
|
| 43 |
+
R_array = np.array(R)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
if np.all(R_array == 255):
|
| 47 |
+
continue
|
| 48 |
+
|
| 49 |
+
valid_folders.append(folder)
|
| 50 |
+
|
| 51 |
+
folders = valid_folders
|
| 52 |
+
|
| 53 |
+
print("Размер датасета: ", len(folders))
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
# train/val/ split
|
| 57 |
+
self.split = split
|
| 58 |
+
k = int(len(folders) * .95)
|
| 59 |
+
if split == 'train':
|
| 60 |
+
self.folders = folders[:k]
|
| 61 |
+
elif split == 'valid':
|
| 62 |
+
self.folders = folders[k:]
|
| 63 |
+
elif split == 'all':
|
| 64 |
+
self.folders = folders
|
| 65 |
+
|
| 66 |
+
print(f'FabricsDataset list={matlist}:{self.split}=[{len(self.folders)}/{len(folders)}]')
|
| 67 |
+
|
| 68 |
+
#dtypes = ['normals', 'albedo', 'input', 'input']
|
| 69 |
+
dtypes = ['normals', 'albedo', 'input']
|
| 70 |
+
self.tf = Aug.Pipeline(*transform, dtypes=dtypes)
|
| 71 |
+
self.renderer = renderer
|
| 72 |
+
|
| 73 |
+
def __getitem__(self, index, quick=False):
|
| 74 |
+
folder = self.folders[index]
|
| 75 |
+
|
| 76 |
+
N_path = folder / 'normal.png'
|
| 77 |
+
A_path = folder / 'basecolor.png'
|
| 78 |
+
R_path = folder / 'roughness.png'
|
| 79 |
+
#D_path = folder / 'height.png'
|
| 80 |
+
|
| 81 |
+
N = tf.to_tensor(Image.open(N_path).convert('RGB'))
|
| 82 |
+
A = tf.to_tensor(Image.open(A_path).convert('RGB'))
|
| 83 |
+
R = tf.to_tensor(Image.open(R_path).convert('RGB'))
|
| 84 |
+
#D_pil = cv2.imread(str(D_path), cv2.IMREAD_GRAYSCALE)
|
| 85 |
+
#D = torch.from_numpy(D_pil)[None].repeat(3, 1, 1) / 255
|
| 86 |
+
|
| 87 |
+
# augmentation
|
| 88 |
+
#N, A, R, D = self.tf([N, A, R, D])
|
| 89 |
+
N, A, R = self.tf([N, A, R])
|
| 90 |
+
|
| 91 |
+
if self.set_seed_render:
|
| 92 |
+
torch.manual_seed(hash(folder.name))
|
| 93 |
+
# I, params = self.renderer([N, A, R, D], n_samples=1)
|
| 94 |
+
I, params = self.renderer([N, A, R], n_samples=1)
|
| 95 |
+
params = torch.stack(params)
|
| 96 |
+
|
| 97 |
+
# return homogenous object whatever the source: acg or sd
|
| 98 |
+
return EasyDict(
|
| 99 |
+
input=I[0],
|
| 100 |
+
input_params=params[:, 0],
|
| 101 |
+
normals=N,
|
| 102 |
+
albedo=A,
|
| 103 |
+
roughness=R,
|
| 104 |
+
#displacement=D,
|
| 105 |
+
name=folder.name,
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
def __len__(self):
|
| 109 |
+
return len(self.folders)
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
class AmbientCG(Dataset):
|
| 113 |
+
def __init__(
|
| 114 |
+
self,
|
| 115 |
+
split,
|
| 116 |
+
transform,
|
| 117 |
+
renderer,
|
| 118 |
+
matlist,
|
| 119 |
+
dir: typing.Optional[Path] = None,
|
| 120 |
+
set_seed_render: bool = False,
|
| 121 |
+
**kwargs
|
| 122 |
+
):
|
| 123 |
+
assert dir.is_dir()
|
| 124 |
+
assert matlist.is_file()
|
| 125 |
+
assert split in ['train', 'valid', 'all']
|
| 126 |
+
self.set_seed_render = set_seed_render
|
| 127 |
+
|
| 128 |
+
files = get_matlist(matlist, dir)
|
| 129 |
+
|
| 130 |
+
# train/val/ split
|
| 131 |
+
self.split = split
|
| 132 |
+
k = int(len(files) * .95)
|
| 133 |
+
if split == 'train':
|
| 134 |
+
self.files = files[:k]
|
| 135 |
+
elif split == 'valid':
|
| 136 |
+
self.files = files[k:]
|
| 137 |
+
elif split == 'all':
|
| 138 |
+
self.files = files
|
| 139 |
+
|
| 140 |
+
print(f'AmbientCG list={matlist}:{self.split}=[{len(self.files)}/{len(files)}]')
|
| 141 |
+
|
| 142 |
+
dtypes = ['normals', 'albedo', 'input', 'input']
|
| 143 |
+
self.tf = Aug.Pipeline(*transform, dtypes=dtypes)
|
| 144 |
+
self.renderer = renderer
|
| 145 |
+
|
| 146 |
+
def __getitem__(self, index, quick=False):
|
| 147 |
+
path = self.files[index]
|
| 148 |
+
name = path.stem.split('_')[0]
|
| 149 |
+
root = path.parent
|
| 150 |
+
|
| 151 |
+
N_path = root / f'{name}_2K-PNG_NormalGL.png'
|
| 152 |
+
N = tf.to_tensor(Image.open(N_path).convert('RGB'))
|
| 153 |
+
|
| 154 |
+
A_path = root / f'{name}_2K-PNG_Color.png'
|
| 155 |
+
A = tf.to_tensor(Image.open(A_path).convert('RGB'))
|
| 156 |
+
|
| 157 |
+
R_path = root / f'{name}_2K-PNG_Roughness.png'
|
| 158 |
+
R = tf.to_tensor(Image.open(R_path).convert('RGB'))
|
| 159 |
+
|
| 160 |
+
D_path = root / f'{name}_2K-PNG_Displacement.png'
|
| 161 |
+
D_pil = cv2.imread(str(D_path), cv2.IMREAD_GRAYSCALE)
|
| 162 |
+
D = torch.from_numpy(D_pil)[None].repeat(3, 1, 1) / 255
|
| 163 |
+
|
| 164 |
+
# augmentation
|
| 165 |
+
N, A, R, D = self.tf([N, A, R, D])
|
| 166 |
+
|
| 167 |
+
if self.set_seed_render:
|
| 168 |
+
torch.manual_seed(hash(name))
|
| 169 |
+
I, params = self.renderer([N, A, R, D], n_samples=1)
|
| 170 |
+
params = torch.stack(params)
|
| 171 |
+
|
| 172 |
+
# return homogenous object whatever the source: acg or sd
|
| 173 |
+
return EasyDict(
|
| 174 |
+
input=I[0],
|
| 175 |
+
input_params=params[:, 0],
|
| 176 |
+
normals=N,
|
| 177 |
+
albedo=A,
|
| 178 |
+
roughness=R,
|
| 179 |
+
displacement=D,
|
| 180 |
+
name=name,
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
def __len__(self):
|
| 184 |
+
return len(self.files)
|
capture/data/target.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import typing
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from PIL import Image
|
| 4 |
+
import random
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from easydict import EasyDict
|
| 8 |
+
import torchvision.transforms.functional as tf
|
| 9 |
+
from torch.utils.data import Dataset
|
| 10 |
+
|
| 11 |
+
from ..utils.log import get_matlist
|
| 12 |
+
from . import augment as Aug
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class StableDiffusion(Dataset):
|
| 16 |
+
def __init__(
|
| 17 |
+
self,
|
| 18 |
+
split,
|
| 19 |
+
pseudo_labels,
|
| 20 |
+
transform,
|
| 21 |
+
renderer,
|
| 22 |
+
matlist,
|
| 23 |
+
use_ref=False,
|
| 24 |
+
dir: typing.Optional[Path] = None,
|
| 25 |
+
**kwargs
|
| 26 |
+
):
|
| 27 |
+
assert dir.is_dir()
|
| 28 |
+
assert split in ['train', 'valid', 'all']
|
| 29 |
+
|
| 30 |
+
self.split = split
|
| 31 |
+
self.renderer = renderer
|
| 32 |
+
self.pseudo_labels = pseudo_labels
|
| 33 |
+
self.use_ref = use_ref
|
| 34 |
+
# self.pl_dir = pl_dir
|
| 35 |
+
|
| 36 |
+
if matlist == None:
|
| 37 |
+
files = sorted(dir.rglob('**/outputs/*[0-9].png'))
|
| 38 |
+
files += sorted(dir.rglob('**/out_renorm/*[0-9].png'))
|
| 39 |
+
print(f'total={len(files)}')
|
| 40 |
+
files = [x for x in files if not (x.parent/f'{x.stem}_roughness.png').is_file()]
|
| 41 |
+
print(f'after={len(files)}')
|
| 42 |
+
else:
|
| 43 |
+
files = get_matlist(matlist, dir)
|
| 44 |
+
|
| 45 |
+
### Train/Validation Split
|
| 46 |
+
k = int(len(files)*.98)
|
| 47 |
+
if split == 'train':
|
| 48 |
+
self.files = files[:k]
|
| 49 |
+
elif split == 'valid':
|
| 50 |
+
self.files = files[k:]
|
| 51 |
+
elif split == 'all':
|
| 52 |
+
self.files = files
|
| 53 |
+
|
| 54 |
+
random.shuffle(self.files)
|
| 55 |
+
|
| 56 |
+
print(f'StableDiffusion list={matlist}:{self.split}=[{len(self.files)}/{len(files)}]')
|
| 57 |
+
|
| 58 |
+
dtypes = ['input']
|
| 59 |
+
self.tf = Aug.Pipeline(*transform, dtypes=dtypes)
|
| 60 |
+
|
| 61 |
+
def __getitem__(self, index):
|
| 62 |
+
path = self.files[index]
|
| 63 |
+
name = path.stem
|
| 64 |
+
|
| 65 |
+
o = EasyDict(dir=str(path.parent), name=name)
|
| 66 |
+
|
| 67 |
+
I = tf.to_tensor(Image.open(path).convert('RGB'))
|
| 68 |
+
|
| 69 |
+
o.path = str(path)
|
| 70 |
+
o.input, *_ = self.tf([I])
|
| 71 |
+
return o
|
| 72 |
+
|
| 73 |
+
def __len__(self):
|
| 74 |
+
return len(self.files)
|
capture/data/utils.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
from easydict import EasyDict
|
| 4 |
+
from torch.utils.data import Dataset, default_collate
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class EmptyDataset(Dataset):
|
| 8 |
+
def __init__(self, length):
|
| 9 |
+
self.length = length
|
| 10 |
+
def __getitem__(self, _):
|
| 11 |
+
return None
|
| 12 |
+
def __len__(self):
|
| 13 |
+
return self.length
|
| 14 |
+
|
| 15 |
+
class MultiLoader:
|
| 16 |
+
"""Iterator wrapper to iterate over multiple dataloaders at the same time."""
|
| 17 |
+
def __init__(self, a, b):
|
| 18 |
+
# a = self._repeat(a, b)
|
| 19 |
+
self.loaders = [a,b]
|
| 20 |
+
|
| 21 |
+
def __iter__(self):
|
| 22 |
+
return zip(*self.loaders)
|
| 23 |
+
|
| 24 |
+
def __len__(self):
|
| 25 |
+
return min(map(len, self.loaders))
|
| 26 |
+
|
| 27 |
+
def _repeat(self, a, b):
|
| 28 |
+
if len(a) < len(b):
|
| 29 |
+
k = math.ceil(len(b)/len(a))
|
| 30 |
+
return RepeatLoader(a, k)
|
| 31 |
+
return a
|
| 32 |
+
|
| 33 |
+
class RepeatLoader:
|
| 34 |
+
def __init__(self, loader, k):
|
| 35 |
+
self.loader = loader
|
| 36 |
+
self.k = k
|
| 37 |
+
|
| 38 |
+
def __iter__(self):
|
| 39 |
+
for _ in range(self.k):
|
| 40 |
+
for x in self.loader:
|
| 41 |
+
yield x
|
| 42 |
+
|
| 43 |
+
def __len__(self):
|
| 44 |
+
return self.k*len(self.loader)
|
| 45 |
+
|
| 46 |
+
def collate_fn(data):
|
| 47 |
+
return data if None in data else EasyDict(default_collate(data))
|
capture/predict.yml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
archi: densemtl
|
| 2 |
+
mode: predict
|
| 3 |
+
logger:
|
| 4 |
+
project: ae_acg
|
| 5 |
+
data:
|
| 6 |
+
batch_size: 1
|
| 7 |
+
num_workers: 10
|
| 8 |
+
input_size: 512
|
| 9 |
+
predict_ds: sd
|
| 10 |
+
predict_list: data/matlist/pbrsd_v2
|
| 11 |
+
trainer:
|
| 12 |
+
accelerator: gpu
|
| 13 |
+
devices: 1
|
| 14 |
+
precision: 16
|
| 15 |
+
routine:
|
| 16 |
+
lr: 2e-5
|
| 17 |
+
loss:
|
| 18 |
+
use_source: True
|
| 19 |
+
use_target: False
|
| 20 |
+
reg_weight: 1
|
| 21 |
+
render_weight: 1
|
| 22 |
+
n_random_configs: 3
|
| 23 |
+
n_symmetric_configs: 6
|
| 24 |
+
viz:
|
| 25 |
+
n_batches_shown: 5
|
| 26 |
+
log_every_n_epoch: 5
|
capture/render/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .main import Renderer
|
| 2 |
+
from .scene import Scene, generate_random_scenes, generate_specular_scenes, gamma_decode, gamma_encode, encode_as_unit_interval, decode_from_unit_interval
|
| 3 |
+
|
| 4 |
+
__all__ = ['Renderer', 'Scene', 'generate_random_scenes', 'generate_specular_scenes', 'gamma_decode', 'gamma_encode', 'encode_as_unit_interval', 'decode_from_unit_interval']
|
capture/render/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (482 Bytes). View file
|
|
|
capture/render/__pycache__/main.cpython-310.pyc
ADDED
|
Binary file (5.26 kB). View file
|
|
|
capture/render/__pycache__/scene.cpython-310.pyc
ADDED
|
Binary file (4.09 kB). View file
|
|
|
capture/render/main.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
from .scene import Light, Scene, Camera, dot_product, normalize, generate_normalized_random_direction, gamma_encode
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class Renderer:
|
| 8 |
+
def __init__(self, return_params=False):
|
| 9 |
+
self.use_augmentation = False
|
| 10 |
+
self.return_params = return_params
|
| 11 |
+
|
| 12 |
+
def xi(self, x):
|
| 13 |
+
return (x > 0.0) * torch.ones_like(x)
|
| 14 |
+
|
| 15 |
+
def compute_microfacet_distribution(self, roughness, NH):
|
| 16 |
+
alpha = roughness**2
|
| 17 |
+
alpha_squared = alpha**2
|
| 18 |
+
NH_squared = NH**2
|
| 19 |
+
denominator_part = torch.clamp(NH_squared * (alpha_squared + (1 - NH_squared) / NH_squared), min=0.001)
|
| 20 |
+
return (alpha_squared * self.xi(NH)) / (np.pi * denominator_part**2)
|
| 21 |
+
|
| 22 |
+
def compute_fresnel(self, F0, VH):
|
| 23 |
+
# https://cdn2.unrealengine.com/Resources/files/2013SiggraphPresentationsNotes-26915738.pdf
|
| 24 |
+
return F0 + (1.0 - F0) * (1.0 - VH)**5
|
| 25 |
+
|
| 26 |
+
def compute_g1(self, roughness, XH, XN):
|
| 27 |
+
alpha = roughness**2
|
| 28 |
+
alpha_squared = alpha**2
|
| 29 |
+
XN_squared = XN**2
|
| 30 |
+
return 2 * self.xi(XH / XN) / (1 + torch.sqrt(1 + alpha_squared * (1.0 - XN_squared) / XN_squared))
|
| 31 |
+
|
| 32 |
+
def compute_geometry(self, roughness, VH, LH, VN, LN):
|
| 33 |
+
return self.compute_g1(roughness, VH, VN) * self.compute_g1(roughness, LH, LN)
|
| 34 |
+
|
| 35 |
+
def compute_specular_term(self, wi, wo, albedo, normals, roughness, metalness):
|
| 36 |
+
F0 = 0.04 * (1. - metalness) + metalness * albedo
|
| 37 |
+
|
| 38 |
+
# Compute the half direction
|
| 39 |
+
H = normalize((wi + wo) / 2.0)
|
| 40 |
+
|
| 41 |
+
# Precompute some dot product
|
| 42 |
+
NH = torch.clamp(dot_product(normals, H), min=0.001)
|
| 43 |
+
VH = torch.clamp(dot_product(wo, H), min=0.001)
|
| 44 |
+
LH = torch.clamp(dot_product(wi, H), min=0.001)
|
| 45 |
+
VN = torch.clamp(dot_product(wo, normals), min=0.001)
|
| 46 |
+
LN = torch.clamp(dot_product(wi, normals), min=0.001)
|
| 47 |
+
|
| 48 |
+
F = self.compute_fresnel(F0, VH)
|
| 49 |
+
G = self.compute_geometry(roughness, VH, LH, VN, LN)
|
| 50 |
+
D = self.compute_microfacet_distribution(roughness, NH)
|
| 51 |
+
|
| 52 |
+
return F * G * D / (4.0 * VN * LN)
|
| 53 |
+
|
| 54 |
+
def compute_diffuse_term(self, albedo, metalness):
|
| 55 |
+
return albedo * (1. - metalness) / np.pi
|
| 56 |
+
|
| 57 |
+
def evaluate_brdf(self, wi, wo, normals, albedo, roughness, metalness):
|
| 58 |
+
diffuse_term = self.compute_diffuse_term(albedo, metalness)
|
| 59 |
+
specular_term = self.compute_specular_term(wi, wo, albedo, normals, roughness, metalness)
|
| 60 |
+
return diffuse_term, specular_term
|
| 61 |
+
|
| 62 |
+
def render(self, scene, svbrdf):
|
| 63 |
+
#normals, albedo, roughness, displacement = svbrdf
|
| 64 |
+
normals, albedo, roughness = svbrdf
|
| 65 |
+
device = albedo.device
|
| 66 |
+
|
| 67 |
+
# Generate surface coordinates for the material patch
|
| 68 |
+
# The center point of the patch is located at (0, 0, 0) which is the center of the global coordinate system.
|
| 69 |
+
# The patch itself spans from (-1, -1, 0) to (1, 1, 0).
|
| 70 |
+
xcoords_row = torch.linspace(-1, 1, albedo.shape[-1], device=device)
|
| 71 |
+
xcoords = xcoords_row.unsqueeze(0).expand(albedo.shape[-2], albedo.shape[-1]).unsqueeze(0)
|
| 72 |
+
ycoords = -1 * torch.transpose(xcoords, dim0=1, dim1=2)
|
| 73 |
+
coords = torch.cat((xcoords, ycoords, torch.zeros_like(xcoords)), dim=0)
|
| 74 |
+
|
| 75 |
+
# We treat the center of the material patch as focal point of the camera
|
| 76 |
+
camera_pos = scene.camera.pos.unsqueeze(-1).unsqueeze(-1).to(device)
|
| 77 |
+
relative_camera_pos = camera_pos - coords
|
| 78 |
+
wo = normalize(relative_camera_pos)
|
| 79 |
+
|
| 80 |
+
# Avoid zero roughness (i. e., potential division by zero)
|
| 81 |
+
roughness = torch.clamp(roughness, min=0.001)
|
| 82 |
+
|
| 83 |
+
light_pos = scene.light.pos.unsqueeze(-1).unsqueeze(-1).to(device)
|
| 84 |
+
relative_light_pos = light_pos - coords
|
| 85 |
+
wi = normalize(relative_light_pos)
|
| 86 |
+
|
| 87 |
+
fdiffuse, fspecular = self.evaluate_brdf(wi, wo, normals, albedo, roughness, metalness=0)
|
| 88 |
+
f = fdiffuse + fspecular
|
| 89 |
+
|
| 90 |
+
color = scene.light.color if torch.is_tensor(scene.light.color) else torch.tensor(scene.light.color)
|
| 91 |
+
light_color = color.unsqueeze(-1).unsqueeze(-1).unsqueeze(0).to(device)
|
| 92 |
+
falloff = 1.0 / torch.sqrt(dot_product(relative_light_pos, relative_light_pos))**2 # Radial light intensity falloff
|
| 93 |
+
LN = torch.clamp(dot_product(wi, normals), min=0.0) # Only consider the upper hemisphere
|
| 94 |
+
radiance = torch.mul(torch.mul(f, light_color * falloff), LN)
|
| 95 |
+
|
| 96 |
+
return radiance
|
| 97 |
+
|
| 98 |
+
def _get_input_params(self, n_samples, light, pose):
|
| 99 |
+
min_eps = 0.001
|
| 100 |
+
max_eps = 0.02
|
| 101 |
+
light_distance = 2.197
|
| 102 |
+
view_distance = 2.75
|
| 103 |
+
|
| 104 |
+
# Generate scenes (camera and light configurations)
|
| 105 |
+
# In the first configuration, the light and view direction are guaranteed to be perpendicular to the material sample.
|
| 106 |
+
# For the remaining cases, both are randomly sampled from a hemisphere.
|
| 107 |
+
view_dist = torch.ones(n_samples-1) * view_distance
|
| 108 |
+
if pose is None:
|
| 109 |
+
view_poses = torch.cat([torch.Tensor(2).uniform_(-0.25, 0.25), torch.ones(1) * view_distance], dim=-1).unsqueeze(0)
|
| 110 |
+
if n_samples > 1:
|
| 111 |
+
hemi_views = generate_normalized_random_direction(n_samples - 1, min_eps=min_eps, max_eps=max_eps) * view_distance
|
| 112 |
+
view_poses = torch.cat([view_poses, hemi_views])
|
| 113 |
+
else:
|
| 114 |
+
assert torch.is_tensor(pose)
|
| 115 |
+
view_poses = pose.cpu()
|
| 116 |
+
|
| 117 |
+
if light is None:
|
| 118 |
+
light_poses = torch.cat([torch.Tensor(2).uniform_(-0.75, 0.75), torch.ones(1) * light_distance], dim=-1).unsqueeze(0)
|
| 119 |
+
if n_samples > 1:
|
| 120 |
+
hemi_lights = generate_normalized_random_direction(n_samples - 1, min_eps=min_eps, max_eps=max_eps) * light_distance
|
| 121 |
+
light_poses = torch.cat([light_poses, hemi_lights])
|
| 122 |
+
else:
|
| 123 |
+
assert torch.is_tensor(light)
|
| 124 |
+
light_poses = light.cpu()
|
| 125 |
+
|
| 126 |
+
light_colors = torch.Tensor([10.0]).unsqueeze(-1).expand(n_samples, 3)
|
| 127 |
+
|
| 128 |
+
return view_poses, light_poses, light_colors
|
| 129 |
+
|
| 130 |
+
def __call__(self, svbrdf, n_samples=1, lights=None, poses=None):
|
| 131 |
+
view_poses, light_poses, light_colors = self._get_input_params(n_samples, lights, poses)
|
| 132 |
+
|
| 133 |
+
renderings = []
|
| 134 |
+
for wo, wi, c in zip(view_poses, light_poses, light_colors):
|
| 135 |
+
scene = Scene(Camera(wo), Light(wi, c))
|
| 136 |
+
rendering = self.render(scene, svbrdf)
|
| 137 |
+
|
| 138 |
+
# Simulate noise
|
| 139 |
+
std_deviation_noise = torch.exp(torch.Tensor(1).normal_(mean = np.log(0.005), std=0.3)).numpy()[0]
|
| 140 |
+
noise = torch.zeros_like(rendering).normal_(mean=0.0, std=std_deviation_noise)
|
| 141 |
+
|
| 142 |
+
# clipping
|
| 143 |
+
post_noise = torch.clamp(rendering + noise, min=0.0, max=1.0)
|
| 144 |
+
|
| 145 |
+
# gamma encoding
|
| 146 |
+
post_gamma = gamma_encode(post_noise)
|
| 147 |
+
|
| 148 |
+
renderings.append(post_gamma)
|
| 149 |
+
|
| 150 |
+
renderings = torch.cat(renderings, dim=0)
|
| 151 |
+
|
| 152 |
+
if self.return_params:
|
| 153 |
+
return renderings, (view_poses, light_poses, light_colors)
|
| 154 |
+
return renderings
|
capture/render/scene.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def encode_as_unit_interval(tensor):
|
| 6 |
+
"""
|
| 7 |
+
Maps range [-1, 1] to [0, 1]
|
| 8 |
+
"""
|
| 9 |
+
return (tensor + 1) / 2
|
| 10 |
+
|
| 11 |
+
def decode_from_unit_interval(tensor):
|
| 12 |
+
"""
|
| 13 |
+
Maps range [0, 1] to [-1, 1]
|
| 14 |
+
"""
|
| 15 |
+
return tensor * 2 - 1
|
| 16 |
+
|
| 17 |
+
def gamma_decode(images):
|
| 18 |
+
return torch.pow(images, 2.2)
|
| 19 |
+
|
| 20 |
+
def gamma_encode(images):
|
| 21 |
+
return torch.pow(images, 1.0/2.2)
|
| 22 |
+
|
| 23 |
+
def dot_product(a, b):
|
| 24 |
+
return torch.sum(torch.mul(a, b), dim=-3, keepdim=True)
|
| 25 |
+
|
| 26 |
+
def normalize(a):
|
| 27 |
+
return torch.div(a, torch.sqrt(dot_product(a, a)))
|
| 28 |
+
|
| 29 |
+
def generate_normalized_random_direction(count, min_eps = 0.001, max_eps = 0.05):
|
| 30 |
+
r1 = torch.Tensor(count, 1).uniform_(0.0 + min_eps, 1.0 - max_eps)
|
| 31 |
+
r2 = torch.Tensor(count, 1).uniform_(0.0, 1.0)
|
| 32 |
+
|
| 33 |
+
r = torch.sqrt(r1)
|
| 34 |
+
phi = 2 * math.pi * r2
|
| 35 |
+
|
| 36 |
+
x = r * torch.cos(phi)
|
| 37 |
+
y = r * torch.sin(phi)
|
| 38 |
+
z = torch.sqrt(1.0 - r**2)
|
| 39 |
+
|
| 40 |
+
return torch.cat([x, y, z], axis=-1)
|
| 41 |
+
|
| 42 |
+
def generate_random_scenes(count):
|
| 43 |
+
# Randomly distribute both, view and light positions
|
| 44 |
+
view_positions = generate_normalized_random_direction(count, 0.001, 0.1) # shape = [count, 3]
|
| 45 |
+
light_positions = generate_normalized_random_direction(count, 0.001, 0.1)
|
| 46 |
+
|
| 47 |
+
scenes = []
|
| 48 |
+
for i in range(count):
|
| 49 |
+
c = Camera(view_positions[i])
|
| 50 |
+
# Light has lower power as the distance to the material plane is not as large
|
| 51 |
+
l = Light(light_positions[i], [20.]*3)
|
| 52 |
+
scenes.append(Scene(c, l))
|
| 53 |
+
|
| 54 |
+
return scenes
|
| 55 |
+
|
| 56 |
+
def generate_specular_scenes(count):
|
| 57 |
+
# Only randomly distribute view positions and place lights in a perfect mirror configuration
|
| 58 |
+
view_positions = generate_normalized_random_direction(count, 0.001, 0.1) # shape = [count, 3]
|
| 59 |
+
light_positions = view_positions * torch.Tensor([-1.0, -1.0, 1.0]).unsqueeze(0)
|
| 60 |
+
|
| 61 |
+
# Reference: "parameters chosen empirically to have a nice distance from a -1;1 surface.""
|
| 62 |
+
distance_view = torch.exp(torch.Tensor(count, 1).normal_(mean=0.5, std=0.75))
|
| 63 |
+
distance_light = torch.exp(torch.Tensor(count, 1).normal_(mean=0.5, std=0.75))
|
| 64 |
+
|
| 65 |
+
# Reference: "Shift position to have highlight elsewhere than in the center."
|
| 66 |
+
# NOTE: This code only creates guaranteed specular highlights in the orthographic rendering, not in the perspective one.
|
| 67 |
+
# This is because the camera is -looking- at the center of the patch.
|
| 68 |
+
shift = torch.cat([torch.Tensor(count, 2).uniform_(-1.0, 1.0), torch.zeros((count, 1)) + 0.0001], dim=-1)
|
| 69 |
+
|
| 70 |
+
view_positions = view_positions * distance_view + shift
|
| 71 |
+
light_positions = light_positions * distance_light + shift
|
| 72 |
+
|
| 73 |
+
scenes = []
|
| 74 |
+
for i in range(count):
|
| 75 |
+
c = Camera(view_positions[i])
|
| 76 |
+
l = Light(light_positions[i], [20, 20.0, 20.0])
|
| 77 |
+
scenes.append(Scene(c, l))
|
| 78 |
+
|
| 79 |
+
return scenes
|
| 80 |
+
|
| 81 |
+
class Camera:
|
| 82 |
+
def __init__(self, pos):
|
| 83 |
+
self.pos = pos
|
| 84 |
+
def __str__(self):
|
| 85 |
+
return f'Camera({self.pos.tolist()})'
|
| 86 |
+
|
| 87 |
+
class Light:
|
| 88 |
+
def __init__(self, pos, color):
|
| 89 |
+
self.pos = pos
|
| 90 |
+
self.color = color
|
| 91 |
+
def __str__(self):
|
| 92 |
+
return f'Light({self.pos.tolist()}, {self.color})'
|
| 93 |
+
|
| 94 |
+
class Scene:
|
| 95 |
+
def __init__(self, camera, light):
|
| 96 |
+
self.camera = camera
|
| 97 |
+
self.light = light
|
| 98 |
+
def __str__(self):
|
| 99 |
+
return f'Scene({self.camera}, {self.light})'
|
| 100 |
+
@classmethod
|
| 101 |
+
def load(cls, o):
|
| 102 |
+
cam, light, color = o
|
| 103 |
+
return Scene(Camera(cam), Light(light, color))
|
| 104 |
+
def export(self):
|
| 105 |
+
return [self.camera.pos, self.light.pos, self.light.color]
|
capture/source/__init__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .model import ResnetEncoder, MultiHeadDecoder, DenseMTL
|
| 2 |
+
from .loss import DenseReg, RenderingLoss
|
| 3 |
+
from .routine import Vanilla
|
| 4 |
+
|
| 5 |
+
__all__ = ['ResnetEncoder', 'MultiHeadDecoder', 'DenseMTL', 'DenseReg', 'RenderingLoss', 'Vanilla']
|
capture/source/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (405 Bytes). View file
|
|
|
capture/source/__pycache__/loss.cpython-310.pyc
ADDED
|
Binary file (4.43 kB). View file
|
|
|
capture/source/__pycache__/model.cpython-310.pyc
ADDED
|
Binary file (8.18 kB). View file
|
|
|
capture/source/__pycache__/routine.cpython-310.pyc
ADDED
|
Binary file (5.83 kB). View file
|
|
|
capture/source/loss.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
from easydict import EasyDict
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
import torchvision.transforms.functional as tf
|
| 8 |
+
|
| 9 |
+
from ..render import Renderer, Scene, generate_random_scenes, generate_specular_scenes
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class RenderingLoss(nn.Module):
|
| 13 |
+
def __init__(self, renderer, n_random_configs=0, n_symmetric_configs=0):
|
| 14 |
+
super().__init__()
|
| 15 |
+
self.eps = 0.1
|
| 16 |
+
self.renderer = renderer
|
| 17 |
+
self.n_random_configs = n_random_configs
|
| 18 |
+
self.n_symmetric_configs = n_symmetric_configs
|
| 19 |
+
self.n_renders = n_random_configs + n_symmetric_configs
|
| 20 |
+
|
| 21 |
+
def generate_scenes(self):
|
| 22 |
+
return generate_random_scenes(self.n_random_configs) + generate_specular_scenes(self.n_symmetric_configs)
|
| 23 |
+
|
| 24 |
+
def multiview_render(self, y, x):
|
| 25 |
+
X_renders, Y_renders = [], []
|
| 26 |
+
|
| 27 |
+
#x_svBRDFs = zip(x.normals, x.albedo, x.roughness, x.displacement)
|
| 28 |
+
#y_svBRDFs = zip(y.normals, y.albedo, y.roughness, x.displacement)
|
| 29 |
+
|
| 30 |
+
x_svBRDFs = zip(x.normals, x.albedo, x.roughness)
|
| 31 |
+
y_svBRDFs = zip(y.normals, y.albedo, y.roughness)
|
| 32 |
+
for x_svBRDF, y_svBRDF in zip(x_svBRDFs, y_svBRDFs):
|
| 33 |
+
x_renders, y_renders = [], []
|
| 34 |
+
for scene in self.generate_scenes():
|
| 35 |
+
x_renders.append(self.renderer.render(scene, x_svBRDF))
|
| 36 |
+
y_renders.append(self.renderer.render(scene, y_svBRDF))
|
| 37 |
+
X_renders.append(torch.cat(x_renders))
|
| 38 |
+
Y_renders.append(torch.cat(y_renders))
|
| 39 |
+
|
| 40 |
+
out = torch.stack(X_renders), torch.stack(Y_renders)
|
| 41 |
+
return out
|
| 42 |
+
|
| 43 |
+
def reconstruction(self, y, theta):
|
| 44 |
+
views = []
|
| 45 |
+
#for *svBRDF, t in zip(y.normals, y.albedo, y.roughness, y.displacement, theta):
|
| 46 |
+
for *svBRDF, t in zip(y.normals, y.albedo, y.roughness, theta):
|
| 47 |
+
render = self.renderer.render(Scene.load(t), svBRDF)
|
| 48 |
+
views.append(render)
|
| 49 |
+
return torch.cat(views)
|
| 50 |
+
|
| 51 |
+
def __call__(self, y, x, **kargs):
|
| 52 |
+
loss = F.l1_loss(torch.log(y + self.eps), torch.log(x + self.eps), **kargs)
|
| 53 |
+
return loss
|
| 54 |
+
|
| 55 |
+
class DenseReg(nn.Module):
|
| 56 |
+
def __init__(
|
| 57 |
+
self,
|
| 58 |
+
reg_weight: float,
|
| 59 |
+
render_weight: float,
|
| 60 |
+
pl_reg_weight: float = 0.,
|
| 61 |
+
pl_render_weight: float = 0.,
|
| 62 |
+
use_source: bool = True,
|
| 63 |
+
use_target: bool = True,
|
| 64 |
+
n_random_configs= 3,
|
| 65 |
+
n_symmetric_configs = 6,
|
| 66 |
+
):
|
| 67 |
+
super().__init__()
|
| 68 |
+
|
| 69 |
+
self.weights = [('albedo', reg_weight, self.log_l1),
|
| 70 |
+
('roughness', reg_weight, self.log_l1),
|
| 71 |
+
('normals', reg_weight, F.l1_loss)]
|
| 72 |
+
|
| 73 |
+
self.reg_weight = reg_weight
|
| 74 |
+
self.render_weight = render_weight
|
| 75 |
+
self.pl_reg_weight = pl_reg_weight
|
| 76 |
+
self.pl_render_weight = pl_render_weight
|
| 77 |
+
self.use_source = use_source
|
| 78 |
+
self.use_target = use_target
|
| 79 |
+
|
| 80 |
+
self.renderer = Renderer()
|
| 81 |
+
self.n_random_configs = n_random_configs
|
| 82 |
+
self.n_symmetric_configs = n_symmetric_configs
|
| 83 |
+
self.loss = RenderingLoss(self.renderer, n_random_configs=n_random_configs, n_symmetric_configs=n_symmetric_configs)
|
| 84 |
+
|
| 85 |
+
def log_l1(self, x, y, **kwargs):
|
| 86 |
+
return F.l1_loss(torch.log(x + 0.01), torch.log(y + 0.01), **kwargs)
|
| 87 |
+
|
| 88 |
+
def forward(self, x, y):
|
| 89 |
+
loss = EasyDict()
|
| 90 |
+
x_src, x_tgt = x
|
| 91 |
+
y_src, y_tgt = y
|
| 92 |
+
|
| 93 |
+
if self.use_source:
|
| 94 |
+
# acg regression loss
|
| 95 |
+
for k, w, loss_fn in self.weights:
|
| 96 |
+
loss[k] = w*loss_fn(y_src[k], x_src[k])
|
| 97 |
+
|
| 98 |
+
# rendering loss
|
| 99 |
+
x_src.image, y_src.image = self.loss.multiview_render(y_src, x_src)
|
| 100 |
+
loss.render = self.render_weight*self.loss(y_src.image, x_src.image)
|
| 101 |
+
|
| 102 |
+
# reconstruction
|
| 103 |
+
y_src.reco = self.loss.reconstruction(y_src, x_src.input_params)
|
| 104 |
+
|
| 105 |
+
if self.use_target:
|
| 106 |
+
for k, w, loss_fn in self.weights:
|
| 107 |
+
loss[f'tgt_{k}'] = self.pl_reg_weight*loss_fn(y_tgt[k], x_tgt[k])
|
| 108 |
+
|
| 109 |
+
# rendering loss w/ pseudo label
|
| 110 |
+
y_tgt.image, x_tgt.image = self.loss.multiview_render(y_tgt, x_tgt)
|
| 111 |
+
loss.sd_render = self.pl_render_weight*self.loss(y_tgt.image, x_tgt.image)
|
| 112 |
+
|
| 113 |
+
# reconstruction
|
| 114 |
+
y_tgt.reco = self.loss.reconstruction(y_tgt, x_tgt.input_params)
|
| 115 |
+
|
| 116 |
+
loss.total = torch.stack(list(loss.values())).sum()
|
| 117 |
+
return loss
|
| 118 |
+
|
| 119 |
+
@torch.no_grad()
|
| 120 |
+
def test(self, x, y, batch_idx, epoch, dl_id):
|
| 121 |
+
assert len(x.name) == 1
|
| 122 |
+
y.reco = self.loss.reconstruction(y, x.input_params)
|
| 123 |
+
return EasyDict(total=0)
|
| 124 |
+
|
| 125 |
+
@torch.no_grad()
|
| 126 |
+
def predict(self, x_tgt, y_tgt, batch_idx, split, epoch):
|
| 127 |
+
assert len(x_tgt.name) == 1
|
| 128 |
+
|
| 129 |
+
# gt components
|
| 130 |
+
I = x_tgt.input[0]
|
| 131 |
+
name = x_tgt.name[0]
|
| 132 |
+
|
| 133 |
+
# get the predicted maps
|
| 134 |
+
N_pred = y_tgt.normals[0]
|
| 135 |
+
A_pred = y_tgt.albedo[0]
|
| 136 |
+
R_pred = y_tgt.roughness[0]
|
| 137 |
+
|
| 138 |
+
# A_name = pl_path/f'{name}_albedo.png'
|
| 139 |
+
# save_image(A_pred, A_name)
|
| 140 |
+
|
| 141 |
+
# N_name = pl_path/f'{name}_normals.png'
|
| 142 |
+
# save_image(encode_as_unit_interval(N_pred), N_name)
|
| 143 |
+
|
| 144 |
+
# R_name = pl_path/f'{name}_roughness.png'
|
| 145 |
+
# save_image(R_pred, R_name)
|
| 146 |
+
|
| 147 |
+
return EasyDict(total=0)
|
capture/source/model.py
ADDED
|
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Adapted from monodepth2
|
| 2 |
+
# https://github.com/nianticlabs/monodepth2/blob/master/networks/depth_decoder.py
|
| 3 |
+
#
|
| 4 |
+
# Copyright Niantic 2019. Patent Pending. All rights reserved.
|
| 5 |
+
#
|
| 6 |
+
# This software is licensed under the terms of the Monodepth2 licence
|
| 7 |
+
# which allows for non-commercial use only, the full terms of which are made
|
| 8 |
+
# available in the LICENSE file.
|
| 9 |
+
|
| 10 |
+
from __future__ import absolute_import, division, print_function
|
| 11 |
+
from collections import OrderedDict
|
| 12 |
+
from easydict import EasyDict
|
| 13 |
+
|
| 14 |
+
import numpy as np
|
| 15 |
+
import torch
|
| 16 |
+
import torch.nn as nn
|
| 17 |
+
import torch.nn.functional as F
|
| 18 |
+
import torchvision.models as models
|
| 19 |
+
import torch.utils.model_zoo as model_zoo
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class ConvBlock(torch.nn.Module):
|
| 23 |
+
"""Layer to perform a convolution followed by ELU."""
|
| 24 |
+
def __init__(self, in_channels, out_channels, bn=False, dropout=0.0):
|
| 25 |
+
super(ConvBlock, self).__init__()
|
| 26 |
+
|
| 27 |
+
self.block = nn.Sequential(
|
| 28 |
+
Conv3x3(in_channels, out_channels),
|
| 29 |
+
nn.BatchNorm2d(out_channels) if bn else nn.Identity(),
|
| 30 |
+
nn.ELU(inplace=True),
|
| 31 |
+
# Pay attention: 2d version of dropout is used
|
| 32 |
+
nn.Dropout2d(dropout) if dropout > 0 else nn.Identity())
|
| 33 |
+
|
| 34 |
+
def forward(self, x):
|
| 35 |
+
out = self.block(x)
|
| 36 |
+
return out
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class Conv3x3(nn.Module):
|
| 40 |
+
"""Layer to pad and convolve input with 3x3 kernels."""
|
| 41 |
+
def __init__(self, in_channels, out_channels, use_refl=True):
|
| 42 |
+
super(Conv3x3, self).__init__()
|
| 43 |
+
|
| 44 |
+
if use_refl:
|
| 45 |
+
self.pad = nn.ReflectionPad2d(1)
|
| 46 |
+
else:
|
| 47 |
+
self.pad = nn.ZeroPad2d(1)
|
| 48 |
+
self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3)
|
| 49 |
+
|
| 50 |
+
def forward(self, x):
|
| 51 |
+
out = self.pad(x)
|
| 52 |
+
out = self.conv(out)
|
| 53 |
+
return out
|
| 54 |
+
|
| 55 |
+
def upsample(x):
|
| 56 |
+
"""Upsample input tensor by a factor of 2."""
|
| 57 |
+
return F.interpolate(x, scale_factor=2, mode="nearest")
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class ResNetMultiImageInput(models.ResNet):
|
| 61 |
+
"""Constructs a resnet model with varying number of input images.
|
| 62 |
+
Adapted from https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
|
| 63 |
+
"""
|
| 64 |
+
def __init__(self, block, layers, num_classes=1000, in_channels=3):
|
| 65 |
+
super(ResNetMultiImageInput, self).__init__(block, layers)
|
| 66 |
+
self.inplanes = 64
|
| 67 |
+
self.conv1 = nn.Conv2d(
|
| 68 |
+
in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)
|
| 69 |
+
self.bn1 = nn.BatchNorm2d(64)
|
| 70 |
+
self.relu = nn.ReLU(inplace=True)
|
| 71 |
+
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
| 72 |
+
self.layer1 = self._make_layer(block, 64, layers[0])
|
| 73 |
+
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
|
| 74 |
+
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
|
| 75 |
+
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
|
| 76 |
+
|
| 77 |
+
for m in self.modules():
|
| 78 |
+
if isinstance(m, nn.Conv2d):
|
| 79 |
+
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
| 80 |
+
elif isinstance(m, nn.BatchNorm2d):
|
| 81 |
+
nn.init.constant_(m.weight, 1)
|
| 82 |
+
nn.init.constant_(m.bias, 0)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def resnet_multiimage_input(num_layers, pretrained=False, in_channels=3):
|
| 86 |
+
"""Constructs a ResNet model.
|
| 87 |
+
Args:
|
| 88 |
+
num_layers (int): Number of resnet layers. Must be 18 or 50
|
| 89 |
+
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
| 90 |
+
in_channels (int): Number of input channels
|
| 91 |
+
"""
|
| 92 |
+
assert num_layers in [18, 50], "Can only run with 18 or 50 layer resnet"
|
| 93 |
+
blocks = {18: [2, 2, 2, 2], 50: [3, 4, 6, 3]}[num_layers]
|
| 94 |
+
block_type = {18: models.resnet.BasicBlock, 50: models.resnet.Bottleneck}[num_layers]
|
| 95 |
+
model = ResNetMultiImageInput(block_type, blocks, in_channels=in_channels)
|
| 96 |
+
|
| 97 |
+
if pretrained:
|
| 98 |
+
print('loading imagnet weights on resnet...')
|
| 99 |
+
loaded = model_zoo.load_url(models.resnet.model_urls['resnet{}'.format(num_layers)])
|
| 100 |
+
# loaded['conv1.weight'] = torch.cat(
|
| 101 |
+
# (loaded['conv1.weight'], loaded['conv1.weight']), 1)
|
| 102 |
+
# diff = model.load_state_dict(loaded, strict=False)
|
| 103 |
+
return model
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class ResnetEncoder(nn.Module):
|
| 107 |
+
"""Pytorch module for a resnet encoder
|
| 108 |
+
"""
|
| 109 |
+
def __init__(self, num_layers, pretrained, in_channels=3):
|
| 110 |
+
super(ResnetEncoder, self).__init__()
|
| 111 |
+
|
| 112 |
+
self.num_ch_enc = np.array([64, 64, 128, 256, 512])
|
| 113 |
+
|
| 114 |
+
resnets = {18: models.resnet18,
|
| 115 |
+
34: models.resnet34,
|
| 116 |
+
50: models.resnet50,
|
| 117 |
+
101: models.resnet101,
|
| 118 |
+
152: models.resnet152}
|
| 119 |
+
|
| 120 |
+
if num_layers not in resnets:
|
| 121 |
+
raise ValueError("{} is not a valid number of resnet layers".format(num_layers))
|
| 122 |
+
|
| 123 |
+
if in_channels > 3:
|
| 124 |
+
self.encoder = resnet_multiimage_input(num_layers, pretrained, in_channels)
|
| 125 |
+
else:
|
| 126 |
+
weights = models.ResNet101_Weights.IMAGENET1K_V1 if pretrained else None
|
| 127 |
+
self.encoder = resnets[num_layers](weights=weights)
|
| 128 |
+
|
| 129 |
+
if num_layers > 34:
|
| 130 |
+
self.num_ch_enc[1:] *= 4
|
| 131 |
+
|
| 132 |
+
def forward(self, x):
|
| 133 |
+
self.features = []
|
| 134 |
+
|
| 135 |
+
# input_image, normals = xx
|
| 136 |
+
# x = (input_image - 0.45) / 0.225
|
| 137 |
+
# x = torch.cat((input_image, normals),1)
|
| 138 |
+
x = self.encoder.conv1(x)
|
| 139 |
+
x = self.encoder.bn1(x)
|
| 140 |
+
self.features.append(self.encoder.relu(x))
|
| 141 |
+
self.features.append(self.encoder.layer1(self.encoder.maxpool(self.features[-1])))
|
| 142 |
+
self.features.append(self.encoder.layer2(self.features[-1]))
|
| 143 |
+
self.features.append(self.encoder.layer3(self.features[-1]))
|
| 144 |
+
self.features.append(self.encoder.layer4(self.features[-1]))
|
| 145 |
+
|
| 146 |
+
return self.features
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
class Decoder(nn.Module):
|
| 150 |
+
def __init__(self, num_ch_enc, scales=range(4), num_output_channels=1, use_skips=True,
|
| 151 |
+
kaiming_init=False, return_feats=False):
|
| 152 |
+
super().__init__()
|
| 153 |
+
|
| 154 |
+
self.num_output_channels = num_output_channels
|
| 155 |
+
self.use_skips = use_skips
|
| 156 |
+
self.upsample_mode = 'nearest'
|
| 157 |
+
self.scales = scales
|
| 158 |
+
|
| 159 |
+
self.return_feats = return_feats
|
| 160 |
+
|
| 161 |
+
self.num_ch_enc = num_ch_enc
|
| 162 |
+
self.num_ch_dec = np.array([16, 32, 64, 128, 256])
|
| 163 |
+
|
| 164 |
+
# decoder
|
| 165 |
+
self.convs = OrderedDict()
|
| 166 |
+
for i in range(4, -1, -1):
|
| 167 |
+
# upconv_0
|
| 168 |
+
num_ch_in = self.num_ch_enc[-1] if i == 4 else self.num_ch_dec[i + 1]
|
| 169 |
+
num_ch_out = self.num_ch_dec[i]
|
| 170 |
+
self.convs[("upconv", i, 0)] = ConvBlock(num_ch_in, num_ch_out)
|
| 171 |
+
|
| 172 |
+
# upconv_1
|
| 173 |
+
num_ch_in = self.num_ch_dec[i]
|
| 174 |
+
if self.use_skips and i > 0:
|
| 175 |
+
num_ch_in += self.num_ch_enc[i - 1]
|
| 176 |
+
num_ch_out = self.num_ch_dec[i]
|
| 177 |
+
self.convs[("upconv", i, 1)] = ConvBlock(num_ch_in, num_ch_out)
|
| 178 |
+
|
| 179 |
+
# for s in self.scales:
|
| 180 |
+
self.convs[("dispconv", 0)] = Conv3x3(self.num_ch_dec[0], self.num_output_channels)
|
| 181 |
+
|
| 182 |
+
self.decoder = nn.ModuleList(list(self.convs.values()))
|
| 183 |
+
# self.sigmoid = nn.Sigmoid()
|
| 184 |
+
|
| 185 |
+
if kaiming_init:
|
| 186 |
+
print('init weights of decoder')
|
| 187 |
+
for m in self.children():
|
| 188 |
+
if isinstance(m, nn.Conv2d):
|
| 189 |
+
nn.init.kaiming_normal_(m.weight)
|
| 190 |
+
if m.bias is not None:
|
| 191 |
+
m.bias.data.fill_(0.01)
|
| 192 |
+
|
| 193 |
+
def forward(self, input_features):
|
| 194 |
+
x = input_features[-1]
|
| 195 |
+
for i in range(4, -1, -1):
|
| 196 |
+
x = self.convs[("upconv", i, 0)](x)
|
| 197 |
+
x = [upsample(x)]
|
| 198 |
+
if self.use_skips and i > 0:
|
| 199 |
+
x += [input_features[i - 1]]
|
| 200 |
+
x = torch.cat(x, 1)
|
| 201 |
+
x = self.convs[("upconv", i, 1)](x)
|
| 202 |
+
|
| 203 |
+
# assert self.scales[0] == 0
|
| 204 |
+
final_conv = self.convs[("dispconv", 0)]
|
| 205 |
+
out = final_conv(x)
|
| 206 |
+
|
| 207 |
+
if self.return_feats:
|
| 208 |
+
return out, input_features[-1]
|
| 209 |
+
return out
|
| 210 |
+
|
| 211 |
+
class MultiHeadDecoder(nn.Module):
|
| 212 |
+
def __init__(self, num_ch_enc, tasks, return_feats, use_skips):
|
| 213 |
+
super().__init__()
|
| 214 |
+
self.decoders = nn.ModuleDict({k:
|
| 215 |
+
Decoder(num_ch_enc=num_ch_enc,
|
| 216 |
+
num_output_channels=num_ch,
|
| 217 |
+
scales=[0],
|
| 218 |
+
kaiming_init=False,
|
| 219 |
+
use_skips=use_skips,
|
| 220 |
+
return_feats=return_feats)
|
| 221 |
+
for k, num_ch in tasks.items()})
|
| 222 |
+
|
| 223 |
+
def forward(self, x):
|
| 224 |
+
y = EasyDict({k: v(x) for k, v in self.decoders.items()})
|
| 225 |
+
return y
|
| 226 |
+
|
| 227 |
+
class DenseMTL(nn.Module):
|
| 228 |
+
def __init__(self, encoder, decoder):
|
| 229 |
+
super().__init__()
|
| 230 |
+
self.encoder = encoder
|
| 231 |
+
self.decoder = decoder
|
| 232 |
+
def forward(self, x):
|
| 233 |
+
return self.decoder(self.encoder(x))
|
capture/source/routine.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from torch import optim
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from easydict import EasyDict
|
| 6 |
+
import pytorch_lightning as pl
|
| 7 |
+
import torch.nn.functional as F
|
| 8 |
+
import torchvision.transforms as T
|
| 9 |
+
from torchvision.utils import save_image
|
| 10 |
+
from torchmetrics import MeanSquaredError, StructuralSimilarityIndexMeasure
|
| 11 |
+
#from pytorch_lightning.loggers import TensorBoardLogger, WandbLogger
|
| 12 |
+
|
| 13 |
+
from . import DenseReg, RenderingLoss
|
| 14 |
+
from ..render import Renderer, encode_as_unit_interval, gamma_decode, gamma_encode
|
| 15 |
+
|
| 16 |
+
class Vanilla(pl.LightningModule):
|
| 17 |
+
metrics = ['I_mse','N_mse','A_mse','R_mse','I_ssim','N_ssim','A_ssim','R_ssim']
|
| 18 |
+
maps = {'I': 'reco', 'N': 'normals', 'R': 'roughness', 'A': 'albedo'}
|
| 19 |
+
|
| 20 |
+
def __init__(self, model: nn.Module, loss: DenseReg = None, lr: float = 0, batch_size: int = 0, max_images: int = 10):
|
| 21 |
+
super().__init__()
|
| 22 |
+
self.model = model
|
| 23 |
+
self.loss = loss
|
| 24 |
+
self.lr = lr
|
| 25 |
+
self.batch_size = batch_size
|
| 26 |
+
self.tanh = nn.Tanh()
|
| 27 |
+
self.norm = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
| 28 |
+
self.val_images = []
|
| 29 |
+
self.max_images = max_images
|
| 30 |
+
|
| 31 |
+
self.save_hyperparameters(ignore=['model', 'loss'])
|
| 32 |
+
|
| 33 |
+
def training_step(self, x):
|
| 34 |
+
y = self(*x)
|
| 35 |
+
loss = self.loss(x, y)
|
| 36 |
+
self.log_to('train', loss)
|
| 37 |
+
return dict(loss=loss.total, y=y)
|
| 38 |
+
|
| 39 |
+
def forward(self, src, tgt):
|
| 40 |
+
src_out, tgt_out = None, None
|
| 41 |
+
|
| 42 |
+
if None not in src:
|
| 43 |
+
src_out = self.model(self.norm(src.input))
|
| 44 |
+
self.post_process_(src_out)
|
| 45 |
+
|
| 46 |
+
if None not in tgt:
|
| 47 |
+
tgt_out = self.model(self.norm(tgt.input))
|
| 48 |
+
self.post_process_(tgt_out)
|
| 49 |
+
|
| 50 |
+
return src_out, tgt_out
|
| 51 |
+
|
| 52 |
+
def post_process_(self, o: EasyDict):
|
| 53 |
+
# (1) activation function, (2) concat unit z, (3) normalize to unit vector
|
| 54 |
+
nxy = self.tanh(o.normals)
|
| 55 |
+
nx, ny = torch.split(nxy*3, split_size_or_sections=1, dim=1)
|
| 56 |
+
n = torch.cat([nx, ny, torch.ones_like(nx)], dim=1)
|
| 57 |
+
o.normals = F.normalize(n, dim=1)
|
| 58 |
+
|
| 59 |
+
# (1) activation function, (2) mapping [-1,1]->[0,1]
|
| 60 |
+
a = self.tanh(o.albedo)
|
| 61 |
+
o.albedo = encode_as_unit_interval(a)
|
| 62 |
+
|
| 63 |
+
# (1) activation function, (2) mapping [-1,1]->[0,1], (3) channel repeat x3
|
| 64 |
+
r = self.tanh(o.roughness)
|
| 65 |
+
o.roughness = encode_as_unit_interval(r.repeat(1,3,1,1))
|
| 66 |
+
|
| 67 |
+
def validation_step(self, x, *_):
|
| 68 |
+
y = self(*x)
|
| 69 |
+
loss = self.loss(x, y)
|
| 70 |
+
self.log_to('val', loss)
|
| 71 |
+
|
| 72 |
+
if len(self.val_images) * self.batch_size < self.max_images and self.logger:
|
| 73 |
+
self.val_images.append((x, y))
|
| 74 |
+
|
| 75 |
+
return dict(loss=loss.total, y=y)
|
| 76 |
+
|
| 77 |
+
def on_validation_epoch_end(self):
|
| 78 |
+
cur_ind = 0
|
| 79 |
+
for ind, (x, y) in enumerate(self.val_images):
|
| 80 |
+
for key in ['normals', 'albedo', 'roughness']:
|
| 81 |
+
for i in range(len(x[0][key])):
|
| 82 |
+
pred_image = y[0][key][i]
|
| 83 |
+
gt_image = x[0][key][i]
|
| 84 |
+
|
| 85 |
+
self.logger.experiment.add_image(f'val/{cur_ind}_{key}_pred', pred_image, self.global_step)
|
| 86 |
+
self.logger.experiment.add_image(f'val/{cur_ind}_{key}_gt', gt_image, self.global_step)
|
| 87 |
+
cur_ind += 1
|
| 88 |
+
if cur_ind >= self.max_images:
|
| 89 |
+
break
|
| 90 |
+
|
| 91 |
+
self.val_images.clear()
|
| 92 |
+
|
| 93 |
+
def log_to(self, split, loss):
|
| 94 |
+
self.log_dict({f'{split}/{k}': v for k, v in loss.items()}, batch_size=self.batch_size)
|
| 95 |
+
|
| 96 |
+
def log_images(self, x, y, split, max_images=5):
|
| 97 |
+
# Log predicted and ground truth images
|
| 98 |
+
for key in ['normals', 'albedo', 'roughness']:
|
| 99 |
+
for i in range(min(len(x[0][key]), max_images)):
|
| 100 |
+
self.logger.experiment.add_image(f'{split}/{key}_pred_{i}', y[0][key][i], self.global_step)
|
| 101 |
+
self.logger.experiment.add_image(f'{split}/{key}_gt_{i}', x[0][key][i], self.global_step)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def on_test_start(self):
|
| 105 |
+
self.renderer = RenderingLoss(Renderer())
|
| 106 |
+
|
| 107 |
+
for m in Vanilla.metrics:
|
| 108 |
+
if 'mse' in m:
|
| 109 |
+
setattr(self, m, MeanSquaredError().to(self.device))
|
| 110 |
+
elif 'ssim' in m:
|
| 111 |
+
setattr(self, m, StructuralSimilarityIndexMeasure(data_range=1).to(self.device))
|
| 112 |
+
|
| 113 |
+
def test_step(self, x, batch_idx, dl_id=0):
|
| 114 |
+
y = self.model(self.norm(x.input))
|
| 115 |
+
self.post_process_(y)
|
| 116 |
+
|
| 117 |
+
# image reconstruction
|
| 118 |
+
y.reco = self.renderer.reconstruction(y, x.input_params)
|
| 119 |
+
x.reco = gamma_decode(x.input)
|
| 120 |
+
|
| 121 |
+
for m in Vanilla.metrics:
|
| 122 |
+
mapid, *_ = m
|
| 123 |
+
k = Vanilla.maps[mapid]
|
| 124 |
+
meter = getattr(self, m)
|
| 125 |
+
meter(y[k], x[k].to(y[k].dtype))
|
| 126 |
+
self.log(m, getattr(self, m), on_epoch=True)
|
| 127 |
+
|
| 128 |
+
if self.logger:
|
| 129 |
+
self.log_images(x, y, split='test')
|
| 130 |
+
|
| 131 |
+
def predict_step(self, x, batch_idx):
|
| 132 |
+
y = self.model(self.norm(x.input))
|
| 133 |
+
self.post_process_(y)
|
| 134 |
+
|
| 135 |
+
I, name, outdir = x.input[0], x.name[0], Path(x.path[0]).parent
|
| 136 |
+
N_pred, A_pred, R_pred = y.normals[0], y.albedo[0], y.roughness[0]
|
| 137 |
+
|
| 138 |
+
save_image(gamma_encode(A_pred), outdir/f'{name}_albedo.png')
|
| 139 |
+
save_image(encode_as_unit_interval(N_pred), outdir/f'{name}_normals.png')
|
| 140 |
+
save_image(R_pred, outdir/f'{name}_roughness.png')
|
| 141 |
+
|
| 142 |
+
def configure_optimizers(self):
|
| 143 |
+
optimizer = optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=1e-4)
|
| 144 |
+
return dict(optimizer=optimizer)
|
capture/utils/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from .model import get_module
|
| 3 |
+
from .cli import get_args
|
| 4 |
+
from .exp import Trainer, get_name, get_callbacks, get_data
|
| 5 |
+
from .log import get_logger
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
__all__ = ['get_model', 'get_module', 'get_args', 'get_name', 'get_logger', 'get_data', 'get_callbacks', 'Trainer']
|
capture/utils/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (442 Bytes). View file
|
|
|
capture/utils/__pycache__/cli.cpython-310.pyc
ADDED
|
Binary file (3.51 kB). View file
|
|
|
capture/utils/__pycache__/exp.cpython-310.pyc
ADDED
|
Binary file (3.04 kB). View file
|
|
|
capture/utils/__pycache__/log.cpython-310.pyc
ADDED
|
Binary file (2.05 kB). View file
|
|
|
capture/utils/__pycache__/model.cpython-310.pyc
ADDED
|
Binary file (1.64 kB). View file
|
|
|
capture/utils/cli.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
|
| 4 |
+
import jsonargparse
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
import pytorch_lightning as pl
|
| 7 |
+
from pytorch_lightning.loggers import WandbLogger
|
| 8 |
+
|
| 9 |
+
from ..source import Vanilla, DenseReg
|
| 10 |
+
from ..callbacks import VisualizeCallback
|
| 11 |
+
from ..data.module import DataModule
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
#! refactor this simplification required
|
| 15 |
+
class LightningArgumentParser(jsonargparse.ArgumentParser):
|
| 16 |
+
"""
|
| 17 |
+
Extension of jsonargparse.ArgumentParser to parse pl.classes and more.
|
| 18 |
+
"""
|
| 19 |
+
def __init__(self, *args, **kwargs):
|
| 20 |
+
super().__init__(*args, **kwargs)
|
| 21 |
+
|
| 22 |
+
def add_datamodule(self, datamodule_obj: pl.LightningDataModule):
|
| 23 |
+
self.add_method_arguments(datamodule_obj, '__init__', 'data', as_group=True)
|
| 24 |
+
|
| 25 |
+
def add_lossmodule(self, lossmodule_obj: nn.Module):
|
| 26 |
+
self.add_class(lossmodule_obj, 'loss')
|
| 27 |
+
|
| 28 |
+
def add_routine(self, model_obj: pl.LightningModule):
|
| 29 |
+
skip = {'ae', 'decoder', 'loss', 'transnet', 'model', 'discr', 'adv_loss', 'stage'}
|
| 30 |
+
self.add_class_arguments(model_obj, 'routine', as_group=True, skip=skip)
|
| 31 |
+
|
| 32 |
+
def add_logger(self, logger_obj):
|
| 33 |
+
skip = {'version', 'config', 'name', 'save_dir'}
|
| 34 |
+
self.add_class_arguments(logger_obj, 'logger', as_group=True, skip=skip)
|
| 35 |
+
|
| 36 |
+
def add_class(self, cls, group, **kwargs):
|
| 37 |
+
self.add_class_arguments(cls, group, as_group=True, **kwargs)
|
| 38 |
+
|
| 39 |
+
def add_trainer(self):
|
| 40 |
+
skip = {'default_root_dir', 'logger', 'callbacks'}
|
| 41 |
+
self.add_class_arguments(pl.Trainer, 'trainer', as_group=True, skip=skip)
|
| 42 |
+
|
| 43 |
+
def get_args(datamodule=DataModule, loss=DenseReg, routine=Vanilla, viz=VisualizeCallback):
|
| 44 |
+
parser = LightningArgumentParser()
|
| 45 |
+
|
| 46 |
+
parser.add_argument('--config', action=jsonargparse.ActionConfigFile, required=True)
|
| 47 |
+
parser.add_argument('--archi', type=str, required=True)
|
| 48 |
+
parser.add_argument('--out_dir', type=lambda x: Path(x), required=True)
|
| 49 |
+
|
| 50 |
+
parser.add_argument('--seed', default=666, type=int)
|
| 51 |
+
parser.add_argument('--load_weights_from', type=lambda x: Path(x))
|
| 52 |
+
parser.add_argument('--save_ckpt_every', default=10, type=int)
|
| 53 |
+
parser.add_argument('--wandb', action='store_true', default=False)
|
| 54 |
+
parser.add_argument('--mode', choices=['train', 'eval', 'test', 'predict'], default='train', type=str)
|
| 55 |
+
parser.add_argument('--resume_from', default=None, type=str)
|
| 56 |
+
|
| 57 |
+
if datamodule is not None:
|
| 58 |
+
parser.add_datamodule(datamodule)
|
| 59 |
+
|
| 60 |
+
if loss is not None:
|
| 61 |
+
parser.add_lossmodule(loss)
|
| 62 |
+
|
| 63 |
+
if routine is not None:
|
| 64 |
+
parser.add_routine(routine)
|
| 65 |
+
|
| 66 |
+
if viz is not None:
|
| 67 |
+
parser.add_class_arguments(viz, 'viz', skip={'out_dir', 'exist_ok'})
|
| 68 |
+
|
| 69 |
+
# bindings between modules (data/routine/loss)
|
| 70 |
+
parser.link_arguments('data.batch_size', 'routine.batch_size')
|
| 71 |
+
|
| 72 |
+
parser.add_logger(WandbLogger)
|
| 73 |
+
parser.add_trainer()
|
| 74 |
+
|
| 75 |
+
args = parser.parse_args()
|
| 76 |
+
return args
|
capture/utils/exp.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
from pytorch_lightning.callbacks import ModelCheckpoint
|
| 4 |
+
from pytorch_lightning import Trainer as plTrainer
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
|
| 7 |
+
from ..callbacks import VisualizeCallback, MetricLogging
|
| 8 |
+
from ..data.module import DataModule
|
| 9 |
+
from .log import get_info
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def get_data(args=None, **kwargs):
|
| 13 |
+
if args is None:
|
| 14 |
+
return DataModule(**kwargs)
|
| 15 |
+
else:
|
| 16 |
+
return DataModule(**args.data)
|
| 17 |
+
|
| 18 |
+
def get_name(args) -> str:
|
| 19 |
+
name = ''#f'{args.mode}'
|
| 20 |
+
|
| 21 |
+
src_ds_verbose = str(args.data.source_list).split(os.sep)[-1].replace("_","-").upper()
|
| 22 |
+
|
| 23 |
+
if args.mode == 'train':
|
| 24 |
+
if args.loss.use_source and not args.loss.use_target:
|
| 25 |
+
name += f'pretrain_ds{args.data.source_ds.upper()}_lr{args.routine.lr}_x{args.data.input_size}_bs{args.data.batch_size}_reg{args.loss.reg_weight}_rend{args.loss.render_weight}_ds{str(args.data.source_list).split(os.sep)[-1].replace("_","-").upper()}'
|
| 26 |
+
|
| 27 |
+
elif args.loss.use_target:
|
| 28 |
+
name += f'_F_{args.data.target_ds.upper()}_lr{args.routine.lr}_x{args.data.input_size}_bs{args.data.tgt_bs}_aug{int(args.data.transform)}_reg{args.loss.pl_reg_weight}_rend{args.loss.pl_render_weight}_ds{str(args.data.target_list).split(os.sep)[-1].replace("_","-").upper()}'
|
| 29 |
+
else:
|
| 30 |
+
name += f'_T_{args.data.source_ds.upper()}_lr{args.routine.lr}_x{args.data.input_size}_aug{int(args.data.transform)}_reg{args.loss.render_weight}_rend{args.loss.render_weight}_ds{str(args.data.source_list).split(os.sep)[-1].replace("_","-").upper()}'
|
| 31 |
+
|
| 32 |
+
#if args.loss.adv_weight:
|
| 33 |
+
# name += f'_ADV{args.loss.adv_weight}'
|
| 34 |
+
if args.data.source_ds == 'acg':
|
| 35 |
+
name += f'_mixbs{args.data.batch_size}'
|
| 36 |
+
if args.loss.reg_weight != 0.1:
|
| 37 |
+
name += f'_regSRC{args.loss.reg_weight}'
|
| 38 |
+
if args.loss.render_weight != 1:
|
| 39 |
+
name += f'_rendSRC{args.loss.render_weight}'
|
| 40 |
+
#if args.data.use_ref:
|
| 41 |
+
# name += '_useRef'
|
| 42 |
+
if args.load_weights_from:
|
| 43 |
+
wname, epoch = get_info(str(args.load_weights_from))
|
| 44 |
+
assert wname and epoch
|
| 45 |
+
name += f'_init{wname.replace("_", "-")}-{epoch}ep'
|
| 46 |
+
|
| 47 |
+
name += f'_s{args.seed}'
|
| 48 |
+
return name
|
| 49 |
+
# name += args.load_weights_from.split(os.sep)[-1][:-5]
|
| 50 |
+
|
| 51 |
+
def get_callbacks(args):
|
| 52 |
+
callbacks = [
|
| 53 |
+
VisualizeCallback(out_dir=args.out_dir, exist_ok=bool(args.resume_from), **args.viz),
|
| 54 |
+
ModelCheckpoint(
|
| 55 |
+
dirpath=args.out_dir/'ckpt_1',
|
| 56 |
+
filename='{name}_{epoch}-{step}',
|
| 57 |
+
save_weights_only=False,
|
| 58 |
+
save_top_k=-1,
|
| 59 |
+
every_n_epochs=args.save_ckpt_every),
|
| 60 |
+
MetricLogging(args.load_weights_from, args.data.test_list, outdir=Path('./logs')),
|
| 61 |
+
]
|
| 62 |
+
return callbacks
|
| 63 |
+
|
| 64 |
+
class Trainer(plTrainer):
|
| 65 |
+
def __init__(self, o_args, *args, **kwargs):
|
| 66 |
+
super().__init__(*args, **kwargs)
|
| 67 |
+
self.ckpt_path = o_args.resume_from
|
| 68 |
+
|
| 69 |
+
def __call__(self, mode, module, data) -> None:
|
| 70 |
+
if mode == 'test':
|
| 71 |
+
self.test(module, data)
|
| 72 |
+
elif mode == 'eval':
|
| 73 |
+
self.validate(module, data)
|
| 74 |
+
elif mode == 'predict':
|
| 75 |
+
self.predict(module, data)
|
| 76 |
+
elif mode == 'train':
|
| 77 |
+
self.fit(module, data, ckpt_path=self.ckpt_path)
|
capture/utils/log.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import csv, re, os
|
| 2 |
+
|
| 3 |
+
from pytorch_lightning.loggers import TensorBoardLogger
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def read_csv(fname):
|
| 7 |
+
with open(fname, 'r') as f:
|
| 8 |
+
reader = csv.DictReader(f)
|
| 9 |
+
return list(reader)
|
| 10 |
+
|
| 11 |
+
def append_csv(fname, dicts):
|
| 12 |
+
if isinstance(dicts, dict):
|
| 13 |
+
dicts = [dicts]
|
| 14 |
+
|
| 15 |
+
if os.path.isfile(fname):
|
| 16 |
+
dicts = read_csv(fname) + dicts
|
| 17 |
+
|
| 18 |
+
write_csv(fname, dicts)
|
| 19 |
+
|
| 20 |
+
def write_csv(fname, dicts):
|
| 21 |
+
assert len(dicts) > 0
|
| 22 |
+
with open(fname, 'w', newline='') as f:
|
| 23 |
+
writer = csv.DictWriter(f, fieldnames=dicts[0].keys())
|
| 24 |
+
writer.writeheader()
|
| 25 |
+
for d in dicts:
|
| 26 |
+
writer.writerow(d)
|
| 27 |
+
|
| 28 |
+
def now():
|
| 29 |
+
from datetime import datetime
|
| 30 |
+
return datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
|
| 31 |
+
|
| 32 |
+
def get_info(weights: str):
|
| 33 |
+
search = re.search(r"(.*)_epoch=(\d+)-step", weights)
|
| 34 |
+
if search:
|
| 35 |
+
name, epoch = search.groups()
|
| 36 |
+
return str(name).split(os.sep)[-1], str(epoch)
|
| 37 |
+
return None, None
|
| 38 |
+
|
| 39 |
+
def get_matlist(cache_dir, dir):
|
| 40 |
+
with open(cache_dir, 'r') as f:
|
| 41 |
+
content = f.readlines()
|
| 42 |
+
files = [dir/f.strip() for f in content]
|
| 43 |
+
return files
|
| 44 |
+
|
| 45 |
+
def get_logger(args):
|
| 46 |
+
logger = TensorBoardLogger(save_dir=args.out_dir)
|
| 47 |
+
logger.log_hyperparams(args)
|
capture/utils/model.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch.nn as nn
|
| 2 |
+
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
|
| 5 |
+
from ..source import ResnetEncoder, MultiHeadDecoder, DenseMTL, DenseReg, Vanilla
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def replace_batchnorm_(module: nn.Module):
|
| 9 |
+
for name, child in module.named_children():
|
| 10 |
+
if isinstance(child, nn.BatchNorm2d):
|
| 11 |
+
setattr(module, name, nn.InstanceNorm2d(child.num_features))
|
| 12 |
+
else:
|
| 13 |
+
replace_batchnorm_(child)
|
| 14 |
+
|
| 15 |
+
def get_model(archi):
|
| 16 |
+
assert archi == 'densemtl'
|
| 17 |
+
|
| 18 |
+
encoder = ResnetEncoder(num_layers=101, pretrained=True, in_channels=3)
|
| 19 |
+
decoder = MultiHeadDecoder(
|
| 20 |
+
num_ch_enc=encoder.num_ch_enc,
|
| 21 |
+
tasks=dict(albedo=3, roughness=1, normals=2),
|
| 22 |
+
return_feats=False,
|
| 23 |
+
use_skips=True)
|
| 24 |
+
|
| 25 |
+
model = nn.Sequential(encoder, decoder)
|
| 26 |
+
replace_batchnorm_(model)
|
| 27 |
+
return model
|
| 28 |
+
|
| 29 |
+
def get_module(args):
|
| 30 |
+
loss = DenseReg(**args.loss)
|
| 31 |
+
model = get_model(args.archi)
|
| 32 |
+
|
| 33 |
+
weights = args.load_weights_from
|
| 34 |
+
if weights:
|
| 35 |
+
assert weights.is_file()
|
| 36 |
+
return Vanilla.load_from_checkpoint(str(weights), model=model, loss=loss, strict=False, **args.routine)
|
| 37 |
+
|
| 38 |
+
return Vanilla(model, loss, **args.routine)
|
| 39 |
+
|
| 40 |
+
def get_inference_module(pt):
|
| 41 |
+
assert Path(pt).exists()
|
| 42 |
+
model = get_model('densemtl')
|
| 43 |
+
return Vanilla.load_from_checkpoint(str(pt), model=model, strict=False)
|
fabric_diffusion.py
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import os
|
| 5 |
+
from diffusers import StableDiffusionInstructPix2PixPipeline
|
| 6 |
+
from PIL import Image
|
| 7 |
+
import random
|
| 8 |
+
|
| 9 |
+
SEED=42
|
| 10 |
+
torch.manual_seed(SEED)
|
| 11 |
+
torch.cuda.manual_seed_all(SEED)
|
| 12 |
+
np.random.seed(SEED)
|
| 13 |
+
random.seed(SEED)
|
| 14 |
+
torch.backends.cudnn.deterministic = True
|
| 15 |
+
torch.backends.cudnn.benchmark = False
|
| 16 |
+
torch.backends.cudnn.deterministic = True
|
| 17 |
+
torch.backends.cudnn.benchmark = False
|
| 18 |
+
generator=torch.Generator("cuda" if torch.cuda.is_available() else "cpu").manual_seed(SEED)
|
| 19 |
+
|
| 20 |
+
class FabricDiffusionPipeline():
|
| 21 |
+
def __init__(self, device, texture_checkpoint, print_checkpoint):
|
| 22 |
+
|
| 23 |
+
self.device = device
|
| 24 |
+
self.texture_checkpoint = texture_checkpoint
|
| 25 |
+
self.print_base_model = print_checkpoint
|
| 26 |
+
|
| 27 |
+
if texture_checkpoint:
|
| 28 |
+
self.texture_model = StableDiffusionInstructPix2PixPipeline.from_pretrained(
|
| 29 |
+
texture_checkpoint,
|
| 30 |
+
torch_dtype=torch.float16,
|
| 31 |
+
safety_checker=None
|
| 32 |
+
)
|
| 33 |
+
# with open(os.path.join(texture_checkpoint, "unet", "diffusion_pytorch_model.safetensors"), "rb") as f:
|
| 34 |
+
# data = f.read()
|
| 35 |
+
# loaded = load(data)
|
| 36 |
+
# self.texture_pipeline.unet.load_state_dict(loaded)
|
| 37 |
+
self.texture_model = self.texture_model.to(device)
|
| 38 |
+
else:
|
| 39 |
+
self.texture_model = None
|
| 40 |
+
|
| 41 |
+
# set circular convolution for the texture model
|
| 42 |
+
if self.texture_model:
|
| 43 |
+
for a, b in self.texture_model.unet.named_modules():
|
| 44 |
+
if isinstance(b, nn.Conv2d):
|
| 45 |
+
setattr(b, 'padding_mode', 'circular')
|
| 46 |
+
for a, b in self.texture_model.vae.named_modules():
|
| 47 |
+
if isinstance(b, nn.Conv2d):
|
| 48 |
+
setattr(b, 'padding_mode', 'circular')
|
| 49 |
+
|
| 50 |
+
if print_checkpoint:
|
| 51 |
+
self.print_model = StableDiffusionInstructPix2PixPipeline.from_pretrained(
|
| 52 |
+
print_checkpoint,
|
| 53 |
+
torch_dtype=torch.float16,
|
| 54 |
+
safety_checker=None
|
| 55 |
+
)
|
| 56 |
+
self.print_model = self.print_model.to(device)
|
| 57 |
+
else:
|
| 58 |
+
self.print_model = None
|
| 59 |
+
|
| 60 |
+
def load_real_data_with_mask(self, dataset_path, image_name):
|
| 61 |
+
image = np.array(Image.open(os.path.join(dataset_path, 'images', image_name)).convert('RGB'))
|
| 62 |
+
seg_mask = np.array(Image.open(os.path.join(dataset_path, 'seg_mask', image_name)).convert('L'))[..., None]
|
| 63 |
+
texture_mask = np.array(Image.open(os.path.join(dataset_path, 'texture_mask', image_name)).convert('L'))[
|
| 64 |
+
..., None]
|
| 65 |
+
# crop the image based on texture_mask
|
| 66 |
+
x1, y1, x2, y2 = np.where(texture_mask > 0)[1].min(), np.where(texture_mask > 0)[0].min(), \
|
| 67 |
+
np.where(texture_mask > 0)[1].max(), np.where(texture_mask > 0)[0].max()
|
| 68 |
+
texture_patch = image[y1:y2, x1:x2]
|
| 69 |
+
# resize the texture_patch to 256x256
|
| 70 |
+
texture_patch = Image.fromarray(texture_patch.astype(np.uint8)).resize((256, 256))
|
| 71 |
+
|
| 72 |
+
return image, seg_mask, texture_patch
|
| 73 |
+
|
| 74 |
+
def load_patch_data(self, patch_path):
|
| 75 |
+
texture_patch = Image.open(patch_path).convert('RGB').resize((256, 256))
|
| 76 |
+
return texture_patch
|
| 77 |
+
|
| 78 |
+
def flatten_texture(self, texture_patch, n_samples=3, use_inversion=True):
|
| 79 |
+
num_inference_steps = 20
|
| 80 |
+
self.texture_model.scheduler.set_timesteps(num_inference_steps)
|
| 81 |
+
timesteps = self.texture_model.scheduler.timesteps
|
| 82 |
+
|
| 83 |
+
# convert image to latent using vae
|
| 84 |
+
image = self.texture_model.image_processor.preprocess(texture_patch)
|
| 85 |
+
if use_inversion:
|
| 86 |
+
image_latents = self.texture_model.prepare_image_latents(image, batch_size=1,
|
| 87 |
+
num_images_per_prompt=1,
|
| 88 |
+
device=self.device,
|
| 89 |
+
dtype=torch.float16,
|
| 90 |
+
do_classifier_free_guidance=False)
|
| 91 |
+
|
| 92 |
+
image_latents = (image_latents - torch.mean(image_latents)) / torch.std(image_latents)
|
| 93 |
+
|
| 94 |
+
# forward noising process
|
| 95 |
+
|
| 96 |
+
noise = torch.randn_like(image_latents)
|
| 97 |
+
noisy_image_latents = self.texture_model.scheduler.add_noise(image_latents, noise, timesteps[0:1])
|
| 98 |
+
|
| 99 |
+
noisy_image_latents /= self.texture_model.scheduler.init_noise_sigma
|
| 100 |
+
noisy_image_latents = torch.tile(noisy_image_latents, (n_samples, 1, 1, 1))
|
| 101 |
+
else:
|
| 102 |
+
noisy_image_latents = None
|
| 103 |
+
|
| 104 |
+
image = torch.tile(image, (n_samples, 1, 1, 1))
|
| 105 |
+
gen_imgs = self.texture_model(
|
| 106 |
+
"",
|
| 107 |
+
image=image,
|
| 108 |
+
num_inference_steps=20,
|
| 109 |
+
image_guidance_scale=1.5,
|
| 110 |
+
guidance_scale=7.,
|
| 111 |
+
latents=noisy_image_latents,
|
| 112 |
+
num_images_per_prompt=n_samples,
|
| 113 |
+
generator=generator
|
| 114 |
+
).images
|
| 115 |
+
|
| 116 |
+
return gen_imgs
|
| 117 |
+
|
| 118 |
+
def flatten_print(self, print_patch, n_samples=3):
|
| 119 |
+
image = self.print_model.image_processor.preprocess(print_patch)
|
| 120 |
+
gen_imgs = []
|
| 121 |
+
for i in range(n_samples):
|
| 122 |
+
gen_img = self.print_model(
|
| 123 |
+
"",
|
| 124 |
+
image=image,
|
| 125 |
+
num_inference_steps=20,
|
| 126 |
+
image_guidance_scale=1.5,
|
| 127 |
+
guidance_scale=7.,
|
| 128 |
+
generator=generator
|
| 129 |
+
).images[0]
|
| 130 |
+
gen_img = np.asarray(gen_img) / 255.
|
| 131 |
+
alpha_map = np.clip(gen_img / 0.1 * 1.2 - 0.2, 0., 1).mean(axis=-1, keepdims=True)
|
| 132 |
+
gen_img = np.clip((gen_img - 0.1) / 0.9, 0., 1.)
|
| 133 |
+
gen_img = np.concatenate([gen_img, alpha_map], axis=-1)
|
| 134 |
+
gen_img = (gen_img * 255).astype(np.uint8)
|
| 135 |
+
gen_img = Image.fromarray(gen_img)
|
| 136 |
+
gen_imgs.append(gen_img)
|
| 137 |
+
|
| 138 |
+
return gen_imgs
|
| 139 |
+
|
requirements.txt
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
accelerate==1.3.0
|
| 2 |
+
bitsandbytes==0.45.2
|
| 3 |
+
clearml==1.17.1
|
| 4 |
+
datasets==3.2.0
|
| 5 |
+
diffusers==0.19.3
|
| 6 |
+
easydict==1.13
|
| 7 |
+
gradio==5.16.0
|
| 8 |
+
huggingface_hub==0.25.2
|
| 9 |
+
jsonargparse==4.36.0
|
| 10 |
+
numpy==1.23.5
|
| 11 |
+
opencv_python==4.11.0.86
|
| 12 |
+
peft==0.5.0
|
| 13 |
+
Pillow==11.1.0
|
| 14 |
+
pytorch_lightning==2.4.0
|
| 15 |
+
PyYAML==6.0.2
|
| 16 |
+
Requests==2.32.3
|
| 17 |
+
torch==2.2.0+cu121
|
| 18 |
+
torchmetrics==1.6.1
|
| 19 |
+
torchvision==0.17.0+cu121
|
| 20 |
+
tqdm==4.67.1
|
| 21 |
+
transformers==4.48.1
|
| 22 |
+
wandb==0.19.6
|