repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
speechbrain | speechbrain-main/tests/unittests/test_checkpoints.py | import pytest
def test_checkpointer(tmpdir, device):
from speechbrain.utils.checkpoints import Checkpointer
import torch
class Recoverable(torch.nn.Module):
def __init__(self, param):
super().__init__()
self.param = torch.nn.Parameter(torch.tensor([param]))
def forward(self, x):
return x * self.param
recoverable = Recoverable(2.0)
recoverables = {"recoverable": recoverable}
recoverer = Checkpointer(tmpdir, recoverables)
recoverable.param.data = torch.tensor([1.0], device=device)
# Should not be possible since no checkpoint saved yet:
assert not recoverer.recover_if_possible()
result = recoverable(10.0)
# Check that parameter has not been loaded from original value:
assert recoverable.param.data == torch.tensor([1.0], device=device)
ckpt = recoverer.save_checkpoint()
# Check that the name recoverable has a save file:
# NOTE: Here assuming .pt filename; if convention changes, change test
assert (ckpt.path / "recoverable.ckpt").exists()
# Check that saved checkpoint is found, and location correct:
assert recoverer.list_checkpoints()[0] == ckpt
assert recoverer.list_checkpoints()[0].path.parent == tmpdir
recoverable.param.data = torch.tensor([2.0], device=device)
recoverer.recover_if_possible()
# Check that parameter has been loaded immediately:
assert recoverable.param.data == torch.tensor([1.0], device=device)
result = recoverable(10.0)
# And result correct
assert result == 10.0
other = Recoverable(2.0)
recoverer.add_recoverable("other", other)
# Check that both objects are now found:
assert recoverer.recoverables["recoverable"] == recoverable
assert recoverer.recoverables["other"] == other
new_ckpt = recoverer.save_checkpoint()
# Check that now both recoverables have a save file:
assert (new_ckpt.path / "recoverable.ckpt").exists()
assert (new_ckpt.path / "other.ckpt").exists()
assert new_ckpt in recoverer.list_checkpoints()
recoverable.param.data = torch.tensor([2.0], device=device)
other.param.data = torch.tensor([10.0], device=device)
chosen_ckpt = recoverer.recover_if_possible()
# Should choose newest by default:
assert chosen_ckpt == new_ckpt
# Check again that parameters have been loaded immediately:
assert recoverable.param.data == torch.tensor([1.0], device=device)
assert other.param.data == torch.tensor([2.0], device=device)
other_result = other(10.0)
# And again we should have the correct computations:
assert other_result == 20.0
# Recover from oldest, which does not have "other":
# This also tests a custom sort
# Raises by default:
with pytest.raises(RuntimeError):
chosen_ckpt = recoverer.recover_if_possible(
importance_key=lambda x: -x.meta["unixtime"]
)
# However this operation may have loaded the first object
# so let's set the values manually:
recoverable.param.data = torch.tensor([2.0], device=device)
other.param.data = torch.tensor([10.0], device=device)
recoverer.allow_partial_load = True
chosen_ckpt = recoverer.recover_if_possible(
importance_key=lambda x: -x.meta["unixtime"]
)
# Should have chosen the original:
assert chosen_ckpt == ckpt
# And should recover recoverable:
assert recoverable.param.data == torch.tensor([1.0], device=device)
# But not other:
other_result = other(10.0)
assert other.param.data == torch.tensor([10.0], device=device)
assert other_result == 100.0
# Test saving names checkpoints with meta info, and custom filter
epoch_ckpt = recoverer.save_checkpoint(name="ep1", meta={"loss": 2.0})
assert "ep1" in epoch_ckpt.path.name
other.param.data = torch.tensor([2.0], device=device)
recoverer.save_checkpoint(meta={"loss": 3.0})
chosen_ckpt = recoverer.recover_if_possible(
ckpt_predicate=lambda ckpt: "loss" in ckpt.meta,
importance_key=lambda ckpt: -ckpt.meta["loss"],
)
assert chosen_ckpt == epoch_ckpt
assert other.param.data == torch.tensor([10.0], device=device)
# Make sure checkpoints can't be name saved by the same name
with pytest.raises(FileExistsError):
recoverer.save_checkpoint(name="ep1")
def test_recovery_custom_io(tmpdir):
from speechbrain.utils.checkpoints import register_checkpoint_hooks
from speechbrain.utils.checkpoints import mark_as_saver
from speechbrain.utils.checkpoints import mark_as_loader
from speechbrain.utils.checkpoints import Checkpointer
@register_checkpoint_hooks
class CustomRecoverable:
def __init__(self, param):
self.param = int(param)
@mark_as_saver
def save(self, path):
with open(path, "w") as fo:
fo.write(str(self.param))
@mark_as_loader
def load(self, path, end_of_epoch, device):
del end_of_epoch # Unused
del device
with open(path) as fi:
self.param = int(fi.read())
custom_recoverable = CustomRecoverable(0)
recoverer = Checkpointer(tmpdir, {"custom_recoverable": custom_recoverable})
custom_recoverable.param = 1
# First, make sure no checkpoints are found
# (e.g. somehow tmpdir contaminated)
ckpt = recoverer.recover_if_possible()
assert ckpt is None
ckpt = recoverer.save_checkpoint()
custom_recoverable.param = 2
loaded_ckpt = recoverer.recover_if_possible()
# Make sure we got the same thing:
assert ckpt == loaded_ckpt
# With this custom recoverable, the load is instant:
assert custom_recoverable.param == 1
def test_checkpoint_deletion(tmpdir, device):
from speechbrain.utils.checkpoints import Checkpointer
import torch
class Recoverable(torch.nn.Module):
def __init__(self, param):
super().__init__()
self.param = torch.nn.Parameter(
torch.tensor([param], device=device)
)
def forward(self, x):
return x * self.param
recoverable = Recoverable(1.0)
recoverables = {"recoverable": recoverable}
recoverer = Checkpointer(tmpdir, recoverables)
first_ckpt = recoverer.save_checkpoint()
recoverer.delete_checkpoints()
# Will not delete only checkpoint by default:
assert first_ckpt in recoverer.list_checkpoints()
second_ckpt = recoverer.save_checkpoint()
recoverer.delete_checkpoints()
# Oldest checkpoint is deleted by default:
assert first_ckpt not in recoverer.list_checkpoints()
# Other syntax also should work:
recoverer.save_and_keep_only()
assert second_ckpt not in recoverer.list_checkpoints()
# Can delete all checkpoints:
recoverer.delete_checkpoints(num_to_keep=0)
assert not recoverer.list_checkpoints()
# Now each should be kept:
# Highest foo
c1 = recoverer.save_checkpoint(meta={"foo": 2})
# Latest CKPT after filtering
c2 = recoverer.save_checkpoint(meta={"foo": 1})
# Filtered out
c3 = recoverer.save_checkpoint(meta={"epoch_ckpt": True})
recoverer.delete_checkpoints(
num_to_keep=1,
max_keys=["foo"],
importance_keys=[lambda c: c.meta["unixtime"]],
ckpt_predicate=lambda c: "epoch_ckpt" not in c.meta,
)
assert all(c in recoverer.list_checkpoints() for c in [c1, c2, c3])
# Reset:
recoverer.delete_checkpoints(num_to_keep=0)
assert not recoverer.list_checkpoints()
# Test the keeping multiple checkpoints without predicate:
# This should be deleted:
c_to_delete = recoverer.save_checkpoint(meta={"foo": 2})
# Highest foo
c1 = recoverer.save_checkpoint(meta={"foo": 3})
# Latest CKPT after filtering
c2 = recoverer.save_checkpoint(meta={"foo": 1})
recoverer.delete_checkpoints(
num_to_keep=1,
importance_keys=[lambda c: c.meta["unixtime"], lambda c: c.meta["foo"]],
)
assert all(c in recoverer.list_checkpoints() for c in [c1, c2])
assert c_to_delete not in recoverer.list_checkpoints()
def test_multiple_ckpts_and_criteria(tmpdir):
from speechbrain.utils.checkpoints import Checkpointer
import torch
class Recoverable(torch.nn.Module):
def __init__(self, param):
super().__init__()
self.param = torch.nn.Parameter(torch.tensor([param]))
def forward(self, x):
return x * self.param
recoverable = Recoverable(1.0)
recoverables = {"recoverable": recoverable}
recoverer = Checkpointer(tmpdir, recoverables)
# Here testing multiple checkpoints with equal meta criteria
recoverer.save_and_keep_only(
meta={"error": 5}, min_keys=["error"], keep_recent=True
)
# By default, get the most recent one:
first_ckpt = recoverer.find_checkpoint()
recoverer.save_and_keep_only(
meta={"error": 5}, min_keys=["error"], keep_recent=True
)
second_ckpt = recoverer.find_checkpoint()
assert first_ckpt.meta["unixtime"] < second_ckpt.meta["unixtime"]
recoverer.save_and_keep_only(
meta={"error": 6}, min_keys=["error"], keep_recent=True
)
third_ckpt = recoverer.find_checkpoint()
remaining_ckpts = recoverer.list_checkpoints()
assert first_ckpt not in remaining_ckpts
assert second_ckpt in remaining_ckpts
assert third_ckpt in remaining_ckpts
# With equal importance criteria, the latest checkpoint should always be
# returned
fourth_ckpt = recoverer.save_checkpoint(meta={"error": 5})
found_ckpt = recoverer.find_checkpoint(min_key="error")
assert found_ckpt == fourth_ckpt
fifth_ckpt = recoverer.save_checkpoint(meta={"error": 5})
# Similarly for getting multiple checkpoints:
found_ckpts = recoverer.find_checkpoints(
min_key="error", max_num_checkpoints=2
)
assert found_ckpts == [fifth_ckpt, fourth_ckpt]
def test_torch_meta(tmpdir, device):
from speechbrain.utils.checkpoints import Checkpointer
import torch
class Recoverable(torch.nn.Module):
def __init__(self, param):
super().__init__()
self.param = torch.nn.Parameter(
torch.tensor([param], device=device)
)
def forward(self, x):
return x * self.param
recoverable = Recoverable(1.0)
recoverables = {"recoverable": recoverable}
recoverer = Checkpointer(tmpdir, recoverables)
saved = recoverer.save_checkpoint(
meta={"loss": torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0], device=device)}
)
loaded = recoverer.recover_if_possible()
assert saved.meta["loss"].allclose(loaded.meta["loss"])
def test_checkpoint_hook_register(tmpdir):
from speechbrain.utils.checkpoints import register_checkpoint_hooks
from speechbrain.utils.checkpoints import mark_as_saver
from speechbrain.utils.checkpoints import mark_as_loader
from speechbrain.utils.checkpoints import Checkpointer
# First a proper interface:
@register_checkpoint_hooks
class CustomRecoverable:
def __init__(self, param):
self.param = int(param)
@mark_as_saver
def save(self, path):
with open(path, "w") as fo:
fo.write(str(self.param))
@mark_as_loader
def load(self, path, end_of_epoch, device):
del end_of_epoch # Unused
with open(path) as fi:
self.param = int(fi.read())
recoverable = CustomRecoverable(1.0)
checkpointer = Checkpointer(tmpdir, {"recoverable": recoverable})
checkpointer.save_checkpoint()
recoverable.param = 2.0
checkpointer.recover_if_possible()
assert recoverable.param == 1.0
# Improper interfaces:
with pytest.raises(TypeError):
class BadRecoverable:
def __init__(self, param):
self.param = int(param)
def save(self, path):
with open(path, "w") as fo:
fo.write(str(self.param))
@mark_as_loader
def load(self, path, end_of_epoch): # MISSING device
del end_of_epoch # Unused
with open(path) as fi:
self.param = int(fi.read())
with pytest.raises(TypeError):
class BadRecoverable: # noqa: F811
def __init__(self, param):
self.param = int(param)
@mark_as_saver
def save(self, path, extra_arg): # Extra argument
with open(path, "w") as fo:
fo.write(str(self.param))
def load(self, path, end_of_epoch, device):
del end_of_epoch # Unused
with open(path) as fi:
self.param = int(fi.read())
def test_torch_defaults(tmpdir, device):
from speechbrain.utils.checkpoints import Checkpointer
import torch
module = torch.nn.Linear(10, 10).to(device)
optimizer = torch.optim.Adam(module.parameters())
lr_scheduler = torch.optim.lr_scheduler.CyclicLR(
optimizer, 0.1, 1.0, cycle_momentum=False
)
# ReduceLROnPlateau is on an _LRScheduler for some reason, so have a separate test for it
another_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)
checkpointer = Checkpointer(
tmpdir,
recoverables={
"module": module,
"optimizer": optimizer,
"scheduler": lr_scheduler,
"scheduler2": another_scheduler,
},
)
ckpt = checkpointer.save_checkpoint()
# test the module:
inp = torch.randn((3, 10), device=device)
prev_output = module(inp)
# Re-initialize everything
module = torch.nn.Linear(10, 10, device=device)
optimizer = torch.optim.Adam(module.parameters())
lr_scheduler = torch.optim.lr_scheduler.CyclicLR(
optimizer, 0.1, 1.0, cycle_momentum=False
)
another_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)
checkpointer = Checkpointer(
tmpdir,
recoverables={
"module": module,
"optimizer": optimizer,
"scheduler": lr_scheduler,
"scheduler2": another_scheduler,
},
)
checkpointer.load_checkpoint(ckpt)
assert torch.allclose(module(inp), prev_output)
| 14,332 | 35.940722 | 93 | py |
speechbrain | speechbrain-main/tests/unittests/test_embedding.py | import torch
def test_embedding(device):
from speechbrain.nnet.embedding import Embedding
# create one hot vector and consider blank as zero vector
embedding_dim = 39
blank_id = 39
size_dict = 40
emb = Embedding(
num_embeddings=size_dict, consider_as_one_hot=True, blank_id=blank_id
).to(device)
inputs = torch.Tensor([10, 5, 2, 0, 39]).to(device).long()
output = emb(inputs)
assert output.shape == (5, 39)
# use standard embedding layer
embedding_dim = 128
emb = Embedding(num_embeddings=size_dict, embedding_dim=embedding_dim).to(
device
)
inputs = torch.randint(0, 40, (5, 10), device=device)
output = emb(inputs)
assert output.shape == (5, 10, 128)
assert torch.jit.trace(emb, inputs)
| 783 | 26.034483 | 78 | py |
speechbrain | speechbrain-main/tests/unittests/test_activations.py | import torch
import torch.nn
def test_softmax(device):
from speechbrain.nnet.activations import Softmax
inputs = torch.tensor([1, 2, 3], device=device).float()
act = Softmax(apply_log=False)
outputs = act(inputs)
assert torch.argmax(outputs) == 2
assert torch.jit.trace(act, inputs)
| 312 | 19.866667 | 59 | py |
speechbrain | speechbrain-main/tests/unittests/test_batching.py | import pytest
import torch
import numpy as np
def test_batch_pad_right_to(device):
from speechbrain.utils.data_utils import batch_pad_right
import random
n_channels = 40
batch_lens = [1, 5]
for b in batch_lens:
rand_lens = [random.randint(10, 53) for x in range(b)]
tensors = [
torch.ones((rand_lens[x], n_channels), device=device)
for x in range(b)
]
batched, lens = batch_pad_right(tensors)
assert batched.shape[0] == b
np.testing.assert_almost_equal(
lens, [x / max(rand_lens) for x in rand_lens], decimal=3
)
for b in batch_lens:
rand_lens = [random.randint(10, 53) for x in range(b)]
tensors = [torch.ones(rand_lens[x], device=device) for x in range(b)]
batched, lens = batch_pad_right(tensors)
assert batched.shape[0] == b
np.testing.assert_almost_equal(
lens, [x / max(rand_lens) for x in rand_lens], decimal=3
)
def test_paddedbatch(device):
from speechbrain.dataio.batch import PaddedBatch
batch = PaddedBatch(
[
{
"id": "ex1",
"foo": torch.Tensor([1.0]).to(device),
"bar": torch.Tensor([1.0, 2.0, 3.0]).to(device),
},
{
"id": "ex2",
"foo": torch.Tensor([2.0, 1.0]).to(device),
"bar": torch.Tensor([2.0]).to(device),
},
]
)
batch.to(dtype=torch.half)
assert batch.foo.data.dtype == torch.half
assert batch["foo"][1].dtype == torch.half
assert batch.bar.lengths.dtype == torch.half
assert batch.foo.data.shape == torch.Size([2, 2])
assert batch.bar.data.shape == torch.Size([2, 3])
ids, foos, bars = batch
assert ids == ["ex1", "ex2"]
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Requires CUDA")
def test_pin_memory():
from speechbrain.dataio.batch import PaddedBatch
batch = PaddedBatch(
[
{
"id": "ex1",
"foo": torch.Tensor([1.0]),
"bar": torch.Tensor([1.0, 2.0, 3.0]),
},
{
"id": "ex2",
"foo": torch.Tensor([2.0, 1.0]),
"bar": torch.Tensor([2.0]),
},
]
)
batch.pin_memory()
assert batch.foo.data.is_pinned()
| 2,404 | 28.329268 | 77 | py |
speechbrain | speechbrain-main/tests/unittests/test_linear.py | import torch
import torch.nn
def test_linear(device):
from speechbrain.nnet.linear import Linear
inputs = torch.rand(1, 2, 4, device=device)
lin_t = Linear(n_neurons=4, input_size=inputs.shape[-1], bias=False)
lin_t.w.weight = torch.nn.Parameter(
torch.eye(inputs.shape[-1], device=device)
)
outputs = lin_t(inputs)
assert torch.all(torch.eq(inputs, outputs))
assert torch.jit.trace(lin_t, inputs)
| 443 | 23.666667 | 72 | py |
speechbrain | speechbrain-main/tests/unittests/test_losses.py | import torch
import pytest
def test_nll(device):
from speechbrain.nnet.losses import nll_loss
predictions = torch.zeros(4, 10, 8, device=device)
targets = torch.zeros(4, 10, device=device)
lengths = torch.ones(4, device=device)
out_cost = nll_loss(predictions, targets, lengths)
assert torch.all(torch.eq(out_cost, 0))
def test_mse(device):
from speechbrain.nnet.losses import mse_loss
predictions = torch.ones(4, 10, 8, device=device)
targets = torch.ones(4, 10, 8, device=device)
lengths = torch.ones(4, device=device)
out_cost = mse_loss(predictions, targets, lengths)
assert torch.all(torch.eq(out_cost, 0))
predictions = torch.zeros(4, 10, 8, device=device)
out_cost = mse_loss(predictions, targets, lengths)
assert torch.all(torch.eq(out_cost, 1))
def test_l1(device):
from speechbrain.nnet.losses import l1_loss
predictions = torch.ones(4, 10, 8, device=device)
targets = torch.ones(4, 10, 8, device=device)
lengths = torch.ones(4, device=device)
out_cost = l1_loss(predictions, targets, lengths)
assert torch.all(torch.eq(out_cost, 0))
def test_bce_loss(device):
from speechbrain.nnet.losses import bce_loss
# Ensure this works both with and without singleton dimension
predictions_singleton = torch.zeros(4, 10, 1, device=device)
predictions_match = torch.zeros(4, 10, device=device)
targets = torch.ones(4, 10, device=device)
lengths = torch.ones(4, device=device)
out_cost_singleton = bce_loss(predictions_singleton, targets, lengths)
out_cost_match = bce_loss(predictions_match, targets, lengths)
assert torch.allclose(
torch.exp(out_cost_singleton), torch.tensor(2.0, device=device)
)
assert torch.allclose(
torch.exp(out_cost_match), torch.tensor(2.0, device=device)
)
# How about one dimensional inputs
predictions = torch.zeros(5, 1, device=device)
targets = torch.ones(5, device=device)
out_cost = bce_loss(predictions, targets)
assert torch.allclose(torch.exp(out_cost), torch.tensor(2.0, device=device))
# Can't pass lengths in 1D case
with pytest.raises(ValueError):
bce_loss(predictions, targets, length=torch.ones(5, device=device))
def test_classification_error(device):
from speechbrain.nnet.losses import classification_error
predictions = torch.zeros(4, 10, 8, device=device)
predictions[:, :, 0] += 1.0
targets = torch.zeros(4, 10, device=device)
lengths = torch.ones(4, device=device)
out_cost = classification_error(predictions, targets, lengths)
assert torch.all(torch.eq(out_cost, 0))
def test_pitwrapper(device):
from speechbrain.nnet.losses import PitWrapper
import torch
from torch import nn
base_loss = nn.MSELoss(reduction="none")
pit = PitWrapper(base_loss)
predictions = torch.rand(
(2, 32, 4), device=device
) # batch, frames, sources
p = (3, 0, 2, 1)
# same but we invert the ordering to check if permutation invariant
targets = predictions[..., p]
loss, opt_p = pit(predictions, targets)
assert [x == p for x in opt_p] == [True for i in range(len(opt_p))]
predictions = pit.reorder_tensor(predictions, opt_p)
assert torch.all(torch.eq(base_loss(predictions, targets), 0))
predictions = torch.rand(
(3, 32, 32, 32, 5), device=device
) # batch, ..., sources
p = (3, 0, 2, 1, 4)
targets = predictions[
..., p
] # same but we invert the ordering to check if permutation invariant
loss, opt_p = pit(predictions, targets)
assert [x == p for x in opt_p] == [True for i in range(len(opt_p))]
predictions = pit.reorder_tensor(predictions, opt_p)
assert torch.all(torch.eq(base_loss(predictions, targets), 0))
def test_transducer_loss(device):
# Make this its own test since it can only be run
# if numba is installed and a GPU is available
pytest.importorskip("numba")
if torch.cuda.device_count() == 0:
pytest.skip("This test can only be run if a GPU is available")
from speechbrain.nnet.losses import transducer_loss
device = torch.device("cuda")
log_probs = (
torch.Tensor(
[
[
[
[0.1, 0.6, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.6, 0.1, 0.1],
[0.1, 0.1, 0.2, 0.8, 0.1],
],
[
[0.1, 0.6, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.2, 0.1, 0.1],
[0.7, 0.1, 0.2, 0.1, 0.1],
],
]
]
)
.to(device)
.requires_grad_()
.log_softmax(dim=-1)
)
targets = torch.Tensor([[1, 2]]).to(device).int()
probs_length = torch.Tensor([1.0]).to(device)
target_length = torch.Tensor([1.0]).to(device)
out_cost = transducer_loss(
log_probs,
targets,
probs_length,
target_length,
blank_index=0,
use_torchaudio=False,
)
out_cost.backward()
assert out_cost.item() == pytest.approx(2.2478, 0.0001)
def test_guided_attention_loss_mask(device):
from speechbrain.nnet.loss.guidedattn_loss import GuidedAttentionLoss
loss = GuidedAttentionLoss().to(device)
input_lengths = torch.tensor([3, 2, 6], device=device)
output_lengths = torch.tensor([4, 3, 5], device=device)
soft_mask = loss.guided_attentions(input_lengths, output_lengths)
ref_soft_mask = torch.tensor(
[
[
[0.0, 0.54216665, 0.9560631, 0.9991162, 0.0],
[0.7506478, 0.08314464, 0.2933517, 0.8858382, 0.0],
[0.9961341, 0.8858382, 0.2933517, 0.08314464, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.7506478, 0.9961341, 0.0, 0.0],
[0.9560631, 0.2933517, 0.2933517, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.39346933, 0.86466473, 0.988891, 0.99966455],
[0.2933517, 0.01379288, 0.49366438, 0.90436554, 0.993355],
[0.7506478, 0.1992626, 0.05404053, 0.5888877, 0.93427145],
[0.9560631, 0.6753475, 0.1175031, 0.1175031, 0.6753475],
[0.9961341, 0.93427145, 0.5888877, 0.05404053, 0.1992626],
[0.9998301, 0.993355, 0.90436554, 0.49366438, 0.01379288],
],
],
device=device,
)
assert torch.allclose(soft_mask, ref_soft_mask)
def test_guided_attention_loss_value(device):
from speechbrain.nnet.loss.guidedattn_loss import GuidedAttentionLoss
loss = GuidedAttentionLoss().to(device)
input_lengths = torch.tensor([2, 3], device=device)
target_lengths = torch.tensor([3, 4], device=device)
alignments = torch.tensor(
[
[
[0.8, 0.2, 0.0],
[0.4, 0.6, 0.0],
[0.2, 0.8, 0.0],
[0.0, 0.0, 0.0],
],
[
[0.6, 0.2, 0.2],
[0.1, 0.7, 0.2],
[0.3, 0.4, 0.3],
[0.2, 0.3, 0.5],
],
],
device=device,
)
loss_value = loss(alignments, input_lengths, target_lengths)
ref_loss_value = torch.tensor(0.1142)
assert torch.isclose(loss_value, ref_loss_value, 0.0001, 0.0001).item()
def test_guided_attention_loss_shapes(device):
from speechbrain.nnet.loss.guidedattn_loss import GuidedAttentionLoss
loss = GuidedAttentionLoss().to(device)
input_lengths = torch.tensor([3, 2, 6], device=device)
output_lengths = torch.tensor([4, 3, 5], device=device)
soft_mask = loss.guided_attentions(input_lengths, output_lengths)
assert soft_mask.shape == (3, 6, 5)
soft_mask = loss.guided_attentions(
input_lengths, output_lengths, max_input_len=10
)
assert soft_mask.shape == (3, 10, 5)
soft_mask = loss.guided_attentions(
input_lengths, output_lengths, max_target_len=12
)
assert soft_mask.shape == (3, 6, 12)
soft_mask = loss.guided_attentions(
input_lengths, output_lengths, max_input_len=10, max_target_len=12
)
assert soft_mask.shape == (3, 10, 12)
| 8,520 | 34.210744 | 80 | py |
speechbrain | speechbrain-main/tests/unittests/test_metrics.py | import torch
import torch.nn
import math
def test_metric_stats(device):
from speechbrain.utils.metric_stats import MetricStats
from speechbrain.nnet.losses import l1_loss
l1_stats = MetricStats(metric=l1_loss)
l1_stats.append(
ids=["utterance1", "utterance2"],
predictions=torch.tensor([[0.1, 0.2], [0.1, 0.2]], device=device),
targets=torch.tensor([[0.1, 0.3], [0.2, 0.3]], device=device),
length=torch.ones(2, device=device),
reduction="batch",
)
summary = l1_stats.summarize()
assert math.isclose(summary["average"], 0.075, rel_tol=1e-5)
assert math.isclose(summary["min_score"], 0.05, rel_tol=1e-5)
assert summary["min_id"] == "utterance1"
assert math.isclose(summary["max_score"], 0.1, rel_tol=1e-5)
assert summary["max_id"] == "utterance2"
def test_error_rate_stats(device):
from speechbrain.utils.metric_stats import ErrorRateStats
wer_stats = ErrorRateStats()
i2l = {1: "hello", 2: "world", 3: "the"}
def mapper(batch):
return [[i2l[int(x)] for x in seq] for seq in batch]
wer_stats.append(
ids=["utterance1", "utterance2"],
predict=[[3, 2, 1], [2, 3]],
target=torch.tensor([[3, 2, 0], [2, 1, 0]], device=device),
target_len=torch.tensor([0.67, 0.67], device=device),
ind2lab=mapper,
)
summary = wer_stats.summarize()
assert summary["WER"] == 50.0
assert summary["insertions"] == 1
assert summary["substitutions"] == 1
assert summary["deletions"] == 0
assert wer_stats.scores[0]["ref_tokens"] == ["the", "world"]
assert wer_stats.scores[0]["hyp_tokens"] == ["the", "world", "hello"]
def test_binary_metrics(device):
from speechbrain.utils.metric_stats import BinaryMetricStats
binary_stats = BinaryMetricStats()
binary_stats.append(
ids=["utt1", "utt2", "utt3", "utt4", "utt5", "utt6"],
scores=torch.tensor([0.1, 0.4, 0.8, 0.2, 0.3, 0.6], device=device),
labels=torch.tensor([1, 0, 1, 0, 1, 0], device=device),
)
summary = binary_stats.summarize(threshold=0.5)
assert summary["TP"] == 1
assert summary["TN"] == 2
assert summary["FP"] == 1
assert summary["FN"] == 2
summary = binary_stats.summarize(threshold=None)
assert summary["threshold"] >= 0.3 and summary["threshold"] < 0.4
summary = binary_stats.summarize(threshold=None, max_samples=1)
assert summary["threshold"] >= 0.1 and summary["threshold"] < 0.2
def test_EER(device):
from speechbrain.utils.metric_stats import EER
positive_scores = torch.tensor([0.1, 0.2, 0.3], device=device)
negative_scores = torch.tensor([0.4, 0.5, 0.6], device=device)
eer, threshold = EER(positive_scores, negative_scores)
assert eer == 1.0
assert threshold > 0.3 and threshold < 0.4
positive_scores = torch.tensor([0.4, 0.5, 0.6], device=device)
negative_scores = torch.tensor([0.3, 0.2, 0.1], device=device)
eer, threshold = EER(positive_scores, negative_scores)
assert eer == 0
assert threshold > 0.3 and threshold < 0.4
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
input1 = torch.randn(1000, 64, device=device)
input2 = torch.randn(1000, 64, device=device)
positive_scores = cos(input1, input2)
input1 = torch.randn(1000, 64, device=device)
input2 = torch.randn(1000, 64, device=device)
negative_scores = cos(input1, input2)
eer, threshold = EER(positive_scores, negative_scores)
correct = (positive_scores > threshold).nonzero(as_tuple=False).size(0) + (
negative_scores < threshold
).nonzero(as_tuple=False).size(0)
assert correct > 900 and correct < 1100
def test_minDCF(device):
from speechbrain.utils.metric_stats import minDCF
positive_scores = torch.tensor([0.1, 0.2, 0.3], device=device)
negative_scores = torch.tensor([0.4, 0.5, 0.6], device=device)
min_dcf, threshold = minDCF(positive_scores, negative_scores)
assert (0.01 - min_dcf) < 1e-4
assert threshold >= 0.6
positive_scores = torch.tensor([0.4, 0.5, 0.6], device=device)
negative_scores = torch.tensor([0.1, 0.2, 0.3], device=device)
min_dcf, threshold = minDCF(positive_scores, negative_scores)
assert min_dcf == 0
assert threshold > 0.3 and threshold < 0.4
def test_classification_stats():
import pytest
from speechbrain.utils.metric_stats import ClassificationStats
stats = ClassificationStats()
stats.append(ids=["1", "2"], predictions=["B", "A"], targets=["B", "A"])
stats.append(ids=["3", "4"], predictions=["A", "B"], targets=["B", "C"])
summary = stats.summarize()
assert pytest.approx(summary["accuracy"], 0.01) == 0.5
classwise_accuracy = summary["classwise_accuracy"]
assert pytest.approx(classwise_accuracy["A"]) == 1.0
assert pytest.approx(classwise_accuracy["B"]) == 0.5
assert pytest.approx(classwise_accuracy["C"]) == 0.0
def test_categorized_classification_stats():
import pytest
from speechbrain.utils.metric_stats import ClassificationStats
stats = ClassificationStats()
stats.append(
ids=["1", "2"],
predictions=["B", "A"],
targets=["B", "A"],
categories=["C1", "C2"],
)
stats.append(
ids=["3", "4"],
predictions=["A", "B"],
targets=["B", "C"],
categories=["C2", "C1"],
)
stats.append(
ids=["5", "6"],
predictions=["A", "C"],
targets=["B", "C"],
categories=["C2", "C1"],
)
summary = stats.summarize()
assert pytest.approx(summary["accuracy"], 0.01) == 0.5
classwise_accuracy = summary["classwise_accuracy"]
assert pytest.approx(classwise_accuracy["C1", "B"]) == 1.0
assert pytest.approx(classwise_accuracy["C1", "C"]) == 0.5
assert pytest.approx(classwise_accuracy["C2", "A"]) == 1.0
assert pytest.approx(classwise_accuracy["C2", "B"]) == 0.0
def test_classification_stats_report():
from io import StringIO
from speechbrain.utils.metric_stats import ClassificationStats
stats = ClassificationStats()
stats.append(ids=["1", "2"], predictions=["B", "A"], targets=["B", "A"])
stats.append(ids=["3", "4"], predictions=["A", "B"], targets=["B", "C"])
report_file = StringIO()
stats.write_stats(report_file)
report_file.seek(0)
report = report_file.read()
ref_report = """Overall Accuracy: 50%
Class-Wise Accuracy
-------------------
A: 1 / 1 (100.00%)
B: 1 / 2 (50.00%)
C: 0 / 1 (0.00%)
Confusion
---------
Target: A
-> A: 1 / 1 (100.00%)
Target: B
-> A: 1 / 2 (50.00%)
-> B: 1 / 2 (50.00%)
Target: C
-> B: 1 / 1 (100.00%)
"""
assert report == ref_report
| 6,686 | 32.268657 | 79 | py |
speechbrain | speechbrain-main/tests/unittests/test_CNN.py | import torch
import torch.nn
def test_SincConv(device):
from speechbrain.nnet.CNN import SincConv
input = torch.rand([4, 16000], device=device)
convolve = SincConv(
input_shape=input.shape, out_channels=8, kernel_size=65, padding="same"
).to(device)
output = convolve(input)
assert output.shape[-1] == 8
assert torch.jit.trace(convolve, input)
# Multichannel case
input = torch.rand([10, 16000, 8], device=device)
convolve = SincConv(
input_shape=input.shape, out_channels=16, kernel_size=11, padding="same"
).to(device)
output = convolve(input)
assert output.shape[-1] == 16
assert torch.jit.trace(convolve, input)
def test_Conv1d(device):
from speechbrain.nnet.CNN import Conv1d
input = (
torch.tensor([-1, -1, -1, -1], device=device)
.unsqueeze(0)
.unsqueeze(2)
.float()
)
convolve = Conv1d(
out_channels=1, kernel_size=1, input_shape=input.shape, padding="same"
).to(device)
output = convolve(input)
assert input.shape == output.shape
convolve.conv.weight = torch.nn.Parameter(
torch.tensor([-1], device=device).float().unsqueeze(0).unsqueeze(1)
)
convolve.conv.bias = torch.nn.Parameter(
torch.tensor([0], device=device).float()
)
output = convolve(input)
assert torch.all(torch.eq(torch.ones(input.shape, device=device), output))
assert torch.jit.trace(convolve, input)
def test_Conv2d(device):
from speechbrain.nnet.CNN import Conv2d
input = torch.rand([4, 11, 32, 1], device=device)
convolve = Conv2d(
out_channels=1,
input_shape=input.shape,
kernel_size=(1, 1),
padding="same",
).to(device)
output = convolve(input)
assert output.shape[-1] == 1
convolve.conv.weight = torch.nn.Parameter(
torch.zeros(convolve.conv.weight.shape, device=device)
)
convolve.conv.bias = torch.nn.Parameter(
torch.tensor([0], device=device).float()
)
output = convolve(input)
assert torch.all(torch.eq(torch.zeros(input.shape, device=device), output))
convolve.conv.weight = torch.nn.Parameter(
torch.ones(convolve.conv.weight.shape, device=device)
)
convolve.conv.bias = torch.nn.Parameter(
torch.tensor([0], device=device).float()
)
output = convolve(input)
assert torch.all(torch.eq(input, output))
assert torch.jit.trace(convolve, input)
def test_Leaf(device):
from speechbrain.lobes.features import Leaf
input = torch.rand([4, 16000], device=device)
convolve = Leaf(
input_shape=input.shape,
window_len=25.0,
window_stride=10.0,
out_channels=8,
).to(device)
output = convolve(input)
assert output.shape[-1] == 8
assert torch.jit.trace(convolve, input)
| 2,850 | 26.413462 | 80 | py |
speechbrain | speechbrain-main/tests/unittests/test_pooling.py | import torch
import torch.nn
def test_pooling1d(device):
from speechbrain.nnet.pooling import Pooling1d
input = (
torch.tensor([1, 3, 2], device=device)
.unsqueeze(0)
.unsqueeze(-1)
.float()
)
pool = Pooling1d("max", 3).to(device)
output = pool(input)
assert output == 3
pool = Pooling1d("avg", 3).to(device)
output = pool(input)
assert output == 2
assert torch.jit.trace(pool, input)
def test_pooling2d(device):
from speechbrain.nnet.pooling import Pooling2d
input = (
torch.tensor([[1, 3, 2], [4, 6, 5]], device=device).float().unsqueeze(0)
)
pool = Pooling2d("max", (2, 3)).to(device)
output = pool(input)
assert output == 6
input = (
torch.tensor([[1, 3, 2], [4, 6, 5]], device=device).float().unsqueeze(0)
)
pool = Pooling2d("max", (1, 3)).to(device)
output = pool(input)
assert output[0][0] == 3
assert output[0][1] == 6
input = (
torch.tensor([[1, 3, 2], [4, 6, 5]], device=device).float().unsqueeze(0)
)
pool = Pooling2d("avg", (2, 3)).to(device)
output = pool(input)
assert output == 3.5
input = (
torch.tensor([[1, 3, 2], [4, 6, 5]], device=device).float().unsqueeze(0)
)
pool = Pooling2d("avg", (1, 3)).to(device)
output = pool(input)
assert output[0][0] == 2
assert output[0][1] == 5
assert torch.jit.trace(pool, input)
| 1,446 | 22.721311 | 80 | py |
speechbrain | speechbrain-main/tests/unittests/test_tokenizer.py | import os
import torch
def test_tokenizer():
from speechbrain.tokenizers.SentencePiece import SentencePiece
gt = [
["HELLO", "MORNING", "MORNING", "HELLO"],
["HELLO", "MORNING", "HELLO"],
]
# Word-level input test
dict_int2lab = {1: "HELLO", 2: "MORNING"}
spm = SentencePiece(
os.path.abspath("tests/tmp/tokenizer_data"),
100,
annotation_train=os.path.abspath(
"tests/samples/annotation/tokenizer.csv"
),
annotation_read="wrd",
model_type="bpe",
)
encoded_seq_ids, encoded_seq_pieces = spm(
torch.Tensor([[1, 2, 2, 1], [1, 2, 1, 0]]),
torch.Tensor([1.0, 0.75]),
dict_int2lab,
task="encode",
)
lens = (encoded_seq_pieces * encoded_seq_ids.shape[1]).round().int()
# decode from torch tensors (batch, batch_lens)
words_seq = spm(encoded_seq_ids, encoded_seq_pieces, task="decode")
assert words_seq == gt, "output not the same"
# decode from a list of bpe sequence (without padding)
hyps_list = [
encoded_seq_ids[0].int().tolist(),
encoded_seq_ids[1][: lens[1]].int().tolist(),
]
words_seq = spm(hyps_list, task="decode_from_list")
assert words_seq == gt, "output not the same"
# Char-level input test
dict_int2lab = {
1: "H",
2: "E",
3: "L",
4: "O",
5: "M",
6: "R",
7: "N",
8: "I",
9: "G",
10: "_",
}
spm = SentencePiece(
os.path.abspath("tests/tmp/tokenizer_data"),
100,
annotation_train=os.path.abspath(
"tests/sample/annotation/tokenzer.csv"
),
annotation_read="char",
char_format_input=True,
model_type="bpe",
)
encoded_seq_ids, encoded_seq_pieces = spm(
torch.Tensor(
[
[
1,
2,
3,
3,
4,
10,
5,
4,
6,
7,
8,
7,
9,
10,
5,
4,
6,
7,
8,
7,
9,
10,
1,
2,
3,
3,
4,
],
[
1,
2,
3,
3,
4,
10,
5,
4,
6,
7,
8,
7,
9,
10,
1,
2,
3,
3,
4,
0,
0,
0,
0,
0,
0,
0,
0,
],
]
),
torch.Tensor([1.0, 0.7037037037037037]),
dict_int2lab,
task="encode",
)
lens = (encoded_seq_pieces * encoded_seq_ids.shape[1]).round().int()
# decode from torch tensors (batch, batch_lens)
words_seq = spm(encoded_seq_ids, encoded_seq_pieces, task="decode")
assert words_seq == gt, "output not the same"
# decode from a list of bpe sequence (without padding)
hyps_list = [
encoded_seq_ids[0].int().tolist(),
encoded_seq_ids[1][: lens[1]].int().tolist(),
]
words_seq = spm(hyps_list, task="decode_from_list")
assert words_seq == gt, "output not the same"
| 3,846 | 25.531034 | 72 | py |
speechbrain | speechbrain-main/tests/unittests/test_dataloader.py | import torch
import pytest
def test_saveable_dataloader(tmpdir, device):
from speechbrain.dataio.dataloader import SaveableDataLoader
save_file = tmpdir + "/dataloader.ckpt"
dataset = torch.randn(10, 1, device=device)
dataloader = SaveableDataLoader(dataset, collate_fn=None)
data_iterator = iter(dataloader)
first_item = next(data_iterator)
assert first_item == dataset[0]
# Save here:
dataloader._speechbrain_save(save_file)
second_item = next(data_iterator)
assert second_item == dataset[1]
# Now make a new dataloader and recover:
new_dataloader = SaveableDataLoader(dataset, collate_fn=None)
new_dataloader._speechbrain_load(save_file, end_of_epoch=False, device=None)
new_data_iterator = iter(new_dataloader)
second_second_item = next(new_data_iterator)
assert second_second_item == second_item
def test_saveable_dataloader_multiprocess(tmpdir):
# Same test as above, but with multiprocess dataloading
from speechbrain.dataio.dataloader import SaveableDataLoader
save_file = tmpdir + "/dataloader.ckpt"
dataset = torch.randn(10, 1)
for num_parallel in [1, 2, 3, 4]:
dataloader = SaveableDataLoader(
dataset, num_workers=num_parallel, collate_fn=None
) # Note num_workers
data_iterator = iter(dataloader)
first_item = next(data_iterator)
assert first_item == dataset[0]
# Save here, note that this overwrites.
dataloader._speechbrain_save(save_file)
second_item = next(data_iterator)
assert second_item == dataset[1]
# Cleanup needed for MacOS (open file limit)
del data_iterator
del dataloader
# Now make a new dataloader and recover:
new_dataloader = SaveableDataLoader(
dataset, num_workers=num_parallel, collate_fn=None
)
new_dataloader._speechbrain_load(
save_file, end_of_epoch=False, device=None
)
new_data_iterator = iter(new_dataloader)
second_second_item = next(new_data_iterator)
assert second_second_item == second_item
del new_data_iterator
del new_dataloader
def test_looped_loader(tmpdir):
# Tests that LoopedLoader will raise StopIteration appropriately
# And that it can recover and keep the place.
from speechbrain.dataio.dataloader import LoopedLoader
save_file = tmpdir + "/loopedloader.ckpt"
data = range(3)
dataloader = LoopedLoader(data, epoch_length=2)
data_iterator = iter(dataloader)
assert next(data_iterator) == 0
# Save here, 1 to go:
dataloader.save(save_file)
assert next(data_iterator) == 1
with pytest.raises(StopIteration):
next(data_iterator)
# And it can be continued past the range:
assert next(data_iterator) == 2
assert next(data_iterator) == 0
# And again it raises:
with pytest.raises(StopIteration):
next(data_iterator)
# Now make a new dataloader and recover:
new_dataloader = LoopedLoader(data, epoch_length=2)
new_dataloader.load(save_file, end_of_epoch=False, device=None)
new_data_iterator = iter(new_dataloader)
next(new_data_iterator)
with pytest.raises(StopIteration):
next(new_data_iterator)
| 3,274 | 36.215909 | 80 | py |
speechbrain | speechbrain-main/tests/unittests/test_attention.py | import torch
def test_rel_pos_MHA(device):
from speechbrain.nnet.attention import RelPosMHAXL
bsz = 2
emb_dim = 4
k_len = [12, 10]
q_len = [10, 12]
bias = [True, False]
head_dim = [4, None]
for kl in k_len:
for ql in q_len:
for b in bias:
for h in head_dim:
relpos = RelPosMHAXL(
emb_dim, num_heads=2, vbias=b, vdim=h
).to(device)
q = torch.rand((bsz, ql, emb_dim), device=device)
k = torch.rand((bsz, kl, emb_dim), device=device)
pos_embs = torch.rand(
(1, 2 * kl - 1, emb_dim), device=device
)
relpos(q, k, k, pos_embs=pos_embs)
| 792 | 27.321429 | 69 | py |
speechbrain | speechbrain-main/tests/unittests/test_data_io.py | import torch
import os
def test_read_audio(tmpdir, device):
from speechbrain.dataio.dataio import read_audio, write_audio
test_waveform = torch.rand(16000, device=device)
wavfile = os.path.join(tmpdir, "wave.wav")
write_audio(wavfile, test_waveform.cpu(), 16000)
# dummy annotation
for i in range(3):
start = torch.randint(0, 8000, (1,), device=device).item()
stop = start + torch.randint(500, 1000, (1,), device=device).item()
loaded_range = read_audio(
{"file": wavfile, "start": start, "stop": stop}
).to(device)
assert loaded_range.allclose(test_waveform[start:stop], atol=1e-4)
loaded_omit_start = read_audio({"file": wavfile, "stop": stop}).to(
device
)
assert loaded_omit_start.allclose(test_waveform[:stop], atol=1e-4)
loaded_omit_stop = read_audio({"file": wavfile, "start": start}).to(
device
)
assert loaded_omit_stop.allclose(test_waveform[start:], atol=1e-4)
loaded_simple = read_audio(wavfile).to(device)
assert loaded_simple.allclose(test_waveform, atol=1e-4)
# set to equal when switching to the sox_io backend
# assert torch.all(torch.eq(loaded, test_waveform[0, start:stop]))
def test_read_audio_multichannel(tmpdir, device):
from speechbrain.dataio.dataio import read_audio_multichannel, write_audio
test_waveform = torch.rand(16000, 2, device=device)
wavfile = os.path.join(tmpdir, "wave.wav")
# sf.write(wavfile, test_waveform, 16000, subtype="float")
write_audio(wavfile, test_waveform.cpu(), 16000)
# dummy annotation we save and load one multichannel file
for i in range(2):
start = torch.randint(0, 8000, (1,), device=device).item()
stop = start + torch.randint(500, 1000, (1,), device=device).item()
wav_obj = {"wav": {"files": [wavfile], "start": start, "stop": stop}}
loaded = read_audio_multichannel(wav_obj["wav"]).to(device)
assert loaded.allclose(test_waveform[start:stop, :], atol=1e-4)
# set to equal when switching to the sox_io backend
# assert torch.all(torch.eq(loaded, test_waveform[:,start:stop]))
# we test now multiple files loading
test_waveform_2 = torch.rand(16000, 2, device=device)
wavfile_2 = os.path.join(tmpdir, "wave_2.wav")
write_audio(wavfile_2, test_waveform_2.cpu(), 16000)
# sf.write(wavfile_2, test_waveform_2, 16000, subtype="float")
for i in range(2):
start = torch.randint(0, 8000, (1,), device=device).item()
stop = start + torch.randint(500, 1000, (1,), device=device).item()
wav_obj = {
"wav": {"files": [wavfile, wavfile_2], "start": start, "stop": stop}
}
loaded = read_audio_multichannel(wav_obj["wav"]).to(device)
test_waveform3 = torch.cat(
(test_waveform[start:stop, :], test_waveform_2[start:stop, :]), 1
)
assert loaded.allclose(test_waveform3, atol=1e-4)
# set to equal when switching to the sox_io backend
# assert torch.all(
# torch.eq(
# loaded,
# torch.cat(
# (test_waveform[:,start:stop], test_waveform_2[:,start:stop]), 0
# ),
# )
# )
| 3,324 | 37.218391 | 85 | py |
speechbrain | speechbrain-main/tests/unittests/test_g2p.py | import torch
from torch.nn import functional as F
def _fake_probs(idx, count):
result = torch.zeros(count)
result[idx] = 2.0
return F.softmax(result, dim=-1)
def _batch_fake_probs(indexes, count):
p_seq = torch.zeros(indexes.shape + (count,))
for batch_idx in range(len(indexes)):
for item_idx in range(indexes.size(1)):
p_seq[batch_idx, item_idx, :] = _fake_probs(
indexes[batch_idx, item_idx], count
)
return p_seq
def test_subsequence_loss():
from speechbrain.nnet.losses import nll_loss
from speechbrain.lobes.models.g2p.homograph import SubsequenceLoss
phn_dim = 4
phns = torch.tensor(
[
[1, 2, 3, 0, 3, 1, 2, 1, 0, 3, 1, 2, 0, 0],
[1, 2, 3, 1, 0, 3, 2, 1, 0, 1, 3, 2, 0, 0],
[1, 2, 3, 1, 2, 3, 0, 1, 3, 1, 3, 2, 0, 1],
]
)
phn_lens = torch.tensor([12, 12, 14])
preds = torch.tensor(
[
[1, 3, 3, 0, 3, 1, 2, 1, 0, 3, 1, 2],
[1, 1, 2, 1, 0, 3, 2, 1, 0, 1, 3, 2],
[3, 2, 1, 1, 2, 3, 0, 1, 3, 2, 3, 3],
]
)
p_seq = _batch_fake_probs(preds, phn_dim)
start = torch.tensor([0, 5, 7])
end = torch.tensor([3, 8, 12])
word_phns_pred = torch.tensor(
[[1, 3, 3, 0, 0], [3, 2, 1, 0, 0], [1, 3, 2, 3, 3]]
)
word_phns_ref = torch.tensor(
[[1, 2, 3, 0, 0], [3, 2, 1, 0, 0], [1, 3, 1, 3, 2]]
)
word_p_seq = _batch_fake_probs(word_phns_pred, phn_dim)
word_lengths = torch.tensor([3, 3, 5]) / 5
loss = SubsequenceLoss(seq_cost=nll_loss, word_separator=0)
loss_value = loss.forward(phns, phn_lens, p_seq.log(), start, end)
loss_value_ref = nll_loss(word_p_seq.log(), word_phns_ref, word_lengths)
assert loss_value == loss_value_ref
def test_extract_hyps():
from speechbrain.lobes.models.g2p.homograph import SubsequenceExtractor
phns = torch.tensor(
[
[1, 2, 3, 0, 3, 1, 2, 1, 0, 3, 1, 2, 0, 0],
[1, 2, 3, 1, 0, 3, 2, 1, 0, 1, 3, 2, 0, 0],
[1, 2, 3, 1, 2, 3, 0, 1, 3, 1, 3, 2, 0, 1],
]
)
hyps = [
[1, 2, 3, 2, 0, 3, 1, 2, 1, 0, 3, 1, 2],
[1, 2, 3, 0, 3, 2, 1, 0, 1, 3, 2],
[1, 2, 3, 1, 2, 3, 0, 1, 3, 1, 3, 2, 0, 1],
]
subsequence_phn_start = torch.tensor([4, 0, 7])
ref_hyps = [[3, 1, 2, 1], [1, 2, 3], [1, 3, 1, 3, 2]]
extractor = SubsequenceExtractor(word_separator=0)
subsequence_hyps = extractor.extract_hyps(
ref_seq=phns, hyps=hyps, subsequence_phn_start=subsequence_phn_start
)
assert subsequence_hyps == ref_hyps
| 2,629 | 29.229885 | 76 | py |
speechbrain | speechbrain-main/tests/integration/ASR_alignment_viterbi/example_asr_alignment_viterbi_experiment.py | #!/usr/bin/env/python3
"""This minimal example trains an HMM-based aligner with the Viterbi algorithm.
The encoder is based on a combination of convolutional, recurrent, and
feed-forward networks (CRDNN) that predict phoneme states.
Given the tiny dataset, the expected behavior is to overfit the training data
(with a validation performance that stays high).
"""
import pathlib
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
class AlignBrain(sb.Brain):
def compute_forward(self, batch, stage):
"Given an input batch it computes the output probabilities."
batch = batch.to(self.device)
wavs, lens = batch.sig
feats = self.modules.compute_features(wavs)
feats = self.modules.mean_var_norm(feats, lens)
x = self.modules.model(feats)
x = self.modules.lin(x)
outputs = self.hparams.softmax(x)
return outputs, lens
def compute_objectives(self, predictions, batch, stage):
"Given the network predictions and targets computed the forward loss."
predictions, lens = predictions
phns, phn_lens = batch.phn_encoded
prev_alignments = self.hparams.aligner.get_prev_alignments(
batch.id, predictions, lens, phns, phn_lens
)
loss = self.hparams.compute_cost(predictions, prev_alignments)
if stage != sb.Stage.TRAIN:
viterbi_scores, alignments = self.hparams.aligner(
predictions, lens, phns, phn_lens, "viterbi"
)
self.hparams.aligner.store_alignments(batch.id, alignments)
return loss
def on_stage_end(self, stage, stage_loss, epoch=None):
"Gets called when a stage (either training, validation, test) starts."
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
if stage == sb.Stage.VALID:
print("Epoch %d complete" % epoch)
print("Train loss: %.2f" % self.train_loss)
print("Valid loss: %.2f" % stage_loss)
print("Recalculating and recording alignments...")
self.evaluate(self.hparams.train_data)
def data_prep(data_folder, hparams):
"Creates the datasets and their data processing pipelines."
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_train.json",
replacements={"data_root": data_folder},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_dev.json",
replacements={"data_root": data_folder},
)
# The evaluate method of the brain class, needs to align over training data
hparams["train_data"] = train_data
datasets = [train_data, valid_data]
label_encoder = sb.dataio.encoder.CTCTextEncoder()
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("phn")
@sb.utils.data_pipeline.provides("phn_list", "phn_encoded")
def text_pipeline(phn):
phn_list = phn.strip().split()
yield phn_list
phn_encoded = label_encoder.encode_sequence_torch(phn_list)
yield phn_encoded
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 3. Fit encoder:
# NOTE: In this minimal example, also update from valid data
label_encoder.update_from_didataset(train_data, output_key="phn_list")
label_encoder.update_from_didataset(valid_data, output_key="phn_list")
# 4. Set output:
sb.dataio.dataset.set_output_keys(datasets, ["id", "sig", "phn_encoded"])
return train_data, valid_data
def main(device="cpu"):
experiment_dir = pathlib.Path(__file__).resolve().parent
hparams_file = experiment_dir / "hyperparams.yaml"
data_folder = "../../samples/ASR/"
data_folder = (experiment_dir / data_folder).resolve()
# Load model hyper parameters:
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin)
# Dataset creation
train_data, valid_data = data_prep(data_folder, hparams)
# Trainer initialization
ali_brain = AlignBrain(
hparams["modules"],
hparams["opt_class"],
hparams,
run_opts={"device": device},
)
# Training/validation loop
ali_brain.fit(
range(hparams["N_epochs"]),
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Evaluation is run separately (now just evaluating on valid data)
ali_brain.evaluate(valid_data)
# Check if model overfits for integration test
assert ali_brain.train_loss < 2.0
if __name__ == "__main__":
main()
def test_error(device):
main(device)
| 5,017 | 32.677852 | 79 | py |
speechbrain | speechbrain-main/tests/integration/separation/example_conv_tasnet.py | #!/usr/bin/env/python3
"""This minimal example trains a speech separation system with on a tiny dataset.
The architecture is based on ConvTasnet and expects in input mixtures of two
speakers.
"""
import torch
import pathlib
import speechbrain as sb
import torch.nn.functional as F
from hyperpyyaml import load_hyperpyyaml
from speechbrain.nnet.losses import get_si_snr_with_pitwrapper
class SepBrain(sb.Brain):
def compute_forward(self, mixture, stage):
"Given an input batch it computes the two estimated sources."
mixture = mixture.to(self.device)
mix_w = self.hparams.encoder(mixture)
est_mask = self.hparams.mask_net(mix_w)
mix_w = torch.stack([mix_w] * 2)
sep_h = mix_w * est_mask
# Decoding
est_source = torch.cat(
[self.hparams.decoder(sep_h[i]).unsqueeze(-1) for i in range(2)],
dim=-1,
)
# T changed after conv1d in encoder, fix it here
T_origin = mixture.size(1)
T_conv = est_source.size(1)
if T_origin > T_conv:
est_source = F.pad(est_source, (0, 0, 0, T_origin - T_conv))
else:
est_source = est_source[:, :T_origin, :]
return est_source
def compute_objectives(self, predictions, targets):
"Given the network predictions and targets computed the PIT loss."
loss = get_si_snr_with_pitwrapper(targets, predictions)
return loss
def fit_batch(self, batch):
"""Fits a training batch."""
inputs = batch.mix_sig.data.to(self.device)
targets = torch.cat(
[
batch.source1.data.unsqueeze(-1),
batch.source2.data.unsqueeze(-1),
],
dim=-1,
).to(self.device)
predictions = self.compute_forward(inputs, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, targets)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Evaluates a batch"""
inputs = batch.mix_sig.data.to(self.device)
targets = torch.cat(
[
batch.source1.data.unsqueeze(-1),
batch.source2.data.unsqueeze(-1),
],
dim=-1,
).to(self.device)
predictions = self.compute_forward(inputs, stage)
loss = self.compute_objectives(predictions, targets)
return loss.detach()
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of a stage."""
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
elif stage == sb.Stage.VALID:
print("Completed epoch %d" % epoch)
print("Train SI-SNR: %.3f" % -self.train_loss)
print("Valid SI-SNR: %.3f" % -stage_loss)
elif stage == sb.Stage.TEST:
print("Test SI-SNR: %.3f" % -stage_loss)
def data_prep(data_folder, hparams):
"Creates the datasets and their data processing pipelines."
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=data_folder / "../annotation/separation_train.csv",
replacements={"data_root": data_folder},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=data_folder / "../annotation/separation_dev.csv",
replacements={"data_root": data_folder},
)
datasets = [train_data, valid_data]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("mix_wav", "s1_wav", "s2_wav")
@sb.utils.data_pipeline.provides("mix_sig", "source1", "source2")
def audio_pipeline(mix_wav, s1_wav, s2_wav):
mix_sig = sb.dataio.dataio.read_audio(mix_wav)
yield mix_sig
source1 = sb.dataio.dataio.read_audio(s1_wav)
yield source1
source2 = sb.dataio.dataio.read_audio(s2_wav)
yield source2
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "mix_sig", "source1", "source2"]
)
return train_data, valid_data
def main(device="cpu"):
experiment_dir = pathlib.Path(__file__).resolve().parent
hparams_file = experiment_dir / "hyperparams.yaml"
data_folder = "../../samples/separation"
data_folder = (experiment_dir / data_folder).resolve()
# Load model hyper parameters:
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin)
# Dataset creation
train_data, valid_data = data_prep(data_folder, hparams)
# Trainer initialization
sep_brain = SepBrain(
hparams["modules"],
hparams["opt_class"],
hparams,
run_opts={"device": device},
)
# Training/validation loop
sep_brain.fit(
range(hparams["N_epochs"]),
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Evaluation is run separately (now just evaluating on valid data)
sep_brain.evaluate(valid_data)
# Check if model overfits for integration test
assert sep_brain.train_loss < 5.0
if __name__ == "__main__":
main()
def test_error(device):
main(device)
| 5,334 | 30.755952 | 81 | py |
speechbrain | speechbrain-main/tests/integration/G2P/example_g2p.py | #!/usr/bin/env/python3
"""This minimal example trains a grapheme-to-phoneme (G2P) converter
that turns a sequence of characters into a sequence of phonemes. The system uses
a standard attention-based encoder-decoder pipeline. The encoder is based on an
LSTM, while the decoder is based on a GRU. Greedy search applied on the top of
the output probabilities to detect the final sequence of phonemes. Given the
tiny dataset, the expected behavior is to overfit the training dataset
(with a validation performance that stays high).
"""
import pathlib
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
class seq2seqBrain(sb.Brain):
def compute_forward(self, batch, stage):
"Given input chars it computes the phoneme's probabilities"
batch = batch.to(self.device)
chars, char_lens = batch.char_encoded
phns, phn_lens = batch.phn_encoded_bos
emb_char = self.modules.encoder_emb(chars)
x, _ = self.modules.enc(emb_char)
e_in = self.modules.emb(phns)
h, w = self.modules.dec(e_in, x, char_lens)
logits = self.modules.lin(h)
outputs = self.hparams.softmax(logits)
if stage != sb.Stage.TRAIN:
seq, _ = self.hparams.searcher(x, char_lens)
return outputs, seq
return outputs
def compute_objectives(self, predictions, batch, stage):
"Given the network predictions and targets computed the NLL loss."
if stage == sb.Stage.TRAIN:
outputs = predictions
else:
outputs, seq = predictions
phns, phn_lens = batch.phn_encoded_eos
loss = self.hparams.compute_cost(outputs, phns, length=phn_lens)
if stage != sb.Stage.TRAIN:
self.per_metrics.append(batch.id, seq, phns, target_len=phn_lens)
return loss
def on_stage_start(self, stage, epoch=None):
"Gets called when a stage (either training, validation, test) starts."
if stage != sb.Stage.TRAIN:
self.per_metrics = self.hparams.per_stats()
def on_stage_end(self, stage, stage_loss, epoch=None):
"Gets called when a stage (either training, validation, test) ends."
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
if stage == sb.Stage.VALID and epoch is not None:
print("Epoch %d complete" % epoch)
print("Train loss: %.2f" % self.train_loss)
if stage != sb.Stage.TRAIN:
print(stage, "loss: %.2f" % stage_loss)
print(stage, "PER: %.2f" % self.per_metrics.summarize("error_rate"))
def data_prep(data_folder, hparams):
"Creates the datasets and their data processing pipelines."
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_train.json",
replacements={"data_root": data_folder},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_dev.json",
replacements={"data_root": data_folder},
)
datasets = [train_data, valid_data]
char_encoder = sb.dataio.encoder.TextEncoder()
phn_encoder = sb.dataio.encoder.TextEncoder()
# 2. Define char pipeline:
@sb.utils.data_pipeline.takes("char")
@sb.utils.data_pipeline.provides("char_list", "char_encoded")
def char_pipeline(char):
char_list = char.strip().split()
yield char_list
char_encoded = char_encoder.encode_sequence_torch(char_list)
yield char_encoded
sb.dataio.dataset.add_dynamic_item(datasets, char_pipeline)
# 3. Fit encoder:
# NOTE: In this minimal example, also update from valid data
char_encoder.insert_bos_eos(bos_index=hparams["bos_index"])
char_encoder.update_from_didataset(train_data, output_key="char_list")
char_encoder.update_from_didataset(valid_data, output_key="char_list")
# 4. Define char pipeline:
@sb.utils.data_pipeline.takes("phn")
@sb.utils.data_pipeline.provides(
"phn_list", "phn_encoded_bos", "phn_encoded_eos"
)
def phn_pipeline(phn):
phn_list = phn.strip().split()
yield phn_list
phn_encoded = phn_encoder.encode_sequence_torch(phn_list)
phn_encoded_bos = phn_encoder.prepend_bos_index(phn_encoded).long()
yield phn_encoded_bos
phn_encoded_eos = phn_encoder.append_eos_index(phn_encoded).long()
yield phn_encoded_eos
sb.dataio.dataset.add_dynamic_item(datasets, phn_pipeline)
# 5. Fit encoder:
# NOTE: In this minimal example, also update from valid data
phn_encoder.insert_bos_eos(bos_index=hparams["bos_index"])
phn_encoder.update_from_didataset(train_data, output_key="phn_list")
phn_encoder.update_from_didataset(valid_data, output_key="phn_list")
# 6. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "char_encoded", "phn_encoded_eos", "phn_encoded_bos"]
)
return train_data, valid_data
def main(device="cpu"):
experiment_dir = pathlib.Path(__file__).resolve().parent
hparams_file = experiment_dir / "hyperparams.yaml"
data_folder = "../../samples/ASR"
data_folder = (experiment_dir / data_folder).resolve()
# Load model hyper parameters:
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin)
# Dataset creation
train_data, valid_data = data_prep(data_folder, hparams)
# Trainer initialization
seq2seq_brain = seq2seqBrain(
hparams["modules"],
hparams["opt_class"],
hparams,
run_opts={"device": device},
)
# Training/validation loop
seq2seq_brain.fit(
range(hparams["N_epochs"]),
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Evaluation is run separately (now just evaluating on valid data)
seq2seq_brain.evaluate(valid_data)
# Check that model overfits for integration test
assert seq2seq_brain.train_loss < 1.0
if __name__ == "__main__":
main()
def test_error(device):
main(device)
| 6,166 | 34.854651 | 80 | py |
speechbrain | speechbrain-main/tests/integration/ASR_CTC/example_asr_ctc_experiment_complex_net.py | #!/usr/bin/env/python3
"""This minimal example trains a CTC-based speech recognizer on a tiny dataset.
The encoder is based on a combination of convolutional, recurrent, and
feed-forward networks (CRDNN) that predict phonemes. A greedy search is used on
top of the output probabilities.
Given the tiny dataset, the expected behavior is to overfit the training dataset
(with a validation performance that stays high).
"""
import pathlib
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
class CTCBrain(sb.Brain):
def compute_forward(self, batch, stage):
"Given an input batch it computes the output probabilities."
batch = batch.to(self.device)
wavs, lens = batch.sig
feats = self.modules.compute_features(wavs)
feats = self.modules.mean_var_norm(feats, lens)
x = self.modules.model(feats)
x = self.modules.lin(x)
outputs = self.hparams.softmax(x)
return outputs, lens
def compute_objectives(self, predictions, batch, stage):
"Given the network predictions and targets computed the CTC loss."
predictions, lens = predictions
phns, phn_lens = batch.phn_encoded
loss = self.hparams.compute_cost(predictions, phns, lens, phn_lens)
if stage != sb.Stage.TRAIN:
seq = sb.decoders.ctc_greedy_decode(
predictions, lens, blank_id=self.hparams.blank_index
)
self.per_metrics.append(batch.id, seq, phns, target_len=phn_lens)
return loss
def on_stage_start(self, stage, epoch=None):
"Gets called when a stage (either training, validation, test) starts."
if stage != sb.Stage.TRAIN:
self.per_metrics = self.hparams.per_stats()
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of a stage."""
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
if stage == sb.Stage.VALID and epoch is not None:
print("Epoch %d complete" % epoch)
print("Train loss: %.2f" % self.train_loss)
if stage != sb.Stage.TRAIN:
print(stage, "loss: %.2f" % stage_loss)
print(stage, "PER: %.2f" % self.per_metrics.summarize("error_rate"))
def data_prep(data_folder, hparams):
"Creates the datasets and their data processing pipelines."
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_train.json",
replacements={"data_root": data_folder},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_dev.json",
replacements={"data_root": data_folder},
)
datasets = [train_data, valid_data]
label_encoder = sb.dataio.encoder.CTCTextEncoder()
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("phn")
@sb.utils.data_pipeline.provides("phn_list", "phn_encoded")
def text_pipeline(phn):
phn_list = phn.strip().split()
yield phn_list
phn_encoded = label_encoder.encode_sequence_torch(phn_list)
yield phn_encoded
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 3. Fit encoder:
# NOTE: In this minimal example, also update from valid data
label_encoder.insert_blank(index=hparams["blank_index"])
label_encoder.update_from_didataset(train_data, output_key="phn_list")
label_encoder.update_from_didataset(valid_data, output_key="phn_list")
# 4. Set output:
sb.dataio.dataset.set_output_keys(datasets, ["id", "sig", "phn_encoded"])
return train_data, valid_data
def main(device="cpu"):
experiment_dir = pathlib.Path(__file__).resolve().parent
hparams_file = experiment_dir / "hyperparams_complex_net.yaml"
data_folder = "../../samples/ASR"
data_folder = (experiment_dir / data_folder).resolve()
# Load model hyper parameters:
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin)
# Dataset creation
train_data, valid_data = data_prep(data_folder, hparams)
# Trainer initialization
ctc_brain = CTCBrain(
hparams["modules"],
hparams["opt_class"],
hparams,
run_opts={"device": device},
)
# Training/validation loop
ctc_brain.fit(
range(hparams["N_epochs"]),
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Evaluation is run separately (now just evaluating on valid data)
ctc_brain.evaluate(valid_data)
# Check if model overfits for integration test
assert ctc_brain.train_loss < 1.0
if __name__ == "__main__":
main()
def test_error(device):
main(device)
| 5,107 | 33.053333 | 80 | py |
speechbrain | speechbrain-main/tests/integration/ASR_CTC/example_asr_ctc_experiment.py | #!/usr/bin/env/python3
"""This minimal example trains a CTC-based speech recognizer on a tiny dataset.
The encoder is based on a combination of convolutional, recurrent, and
feed-forward networks (CRDNN) that predict phonemes. A greedy search is used on
top of the output probabilities.
Given the tiny dataset, the expected behavior is to overfit the training dataset
(with a validation performance that stays high).
"""
import pathlib
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
class CTCBrain(sb.Brain):
def compute_forward(self, batch, stage):
"Given an input batch it computes the output probabilities."
batch = batch.to(self.device)
wavs, lens = batch.sig
feats = self.modules.compute_features(wavs)
feats = self.modules.mean_var_norm(feats, lens)
x = self.modules.model(feats)
x = self.modules.lin(x)
outputs = self.hparams.softmax(x)
return outputs, lens
def compute_objectives(self, predictions, batch, stage):
"Given the network predictions and targets computed the CTC loss."
predictions, lens = predictions
phns, phn_lens = batch.phn_encoded
loss = self.hparams.compute_cost(predictions, phns, lens, phn_lens)
if stage != sb.Stage.TRAIN:
seq = sb.decoders.ctc_greedy_decode(
predictions, lens, blank_id=self.hparams.blank_index
)
self.per_metrics.append(batch.id, seq, phns, target_len=phn_lens)
return loss
def on_stage_start(self, stage, epoch=None):
"Gets called when a stage (either training, validation, test) starts."
if stage != sb.Stage.TRAIN:
self.per_metrics = self.hparams.per_stats()
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of a stage."""
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
if stage == sb.Stage.VALID and epoch is not None:
print("Epoch %d complete" % epoch)
print("Train loss: %.2f" % self.train_loss)
if stage != sb.Stage.TRAIN:
print(stage, "loss: %.2f" % stage_loss)
print(stage, "PER: %.2f" % self.per_metrics.summarize("error_rate"))
def data_prep(data_folder, hparams):
"Creates the datasets and their data processing pipelines."
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_train.json",
replacements={"data_root": data_folder},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_dev.json",
replacements={"data_root": data_folder},
)
datasets = [train_data, valid_data]
label_encoder = sb.dataio.encoder.CTCTextEncoder()
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("phn")
@sb.utils.data_pipeline.provides("phn_list", "phn_encoded")
def text_pipeline(phn):
phn_list = phn.strip().split()
yield phn_list
phn_encoded = label_encoder.encode_sequence_torch(phn_list)
yield phn_encoded
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 3. Fit encoder:
# NOTE: In this minimal example, also update from valid data
label_encoder.insert_blank(index=hparams["blank_index"])
label_encoder.update_from_didataset(train_data, output_key="phn_list")
label_encoder.update_from_didataset(valid_data, output_key="phn_list")
# 4. Set output:
sb.dataio.dataset.set_output_keys(datasets, ["id", "sig", "phn_encoded"])
return train_data, valid_data
def main(device="cpu"):
experiment_dir = pathlib.Path(__file__).resolve().parent
hparams_file = experiment_dir / "hyperparams.yaml"
data_folder = "../../samples/ASR/"
data_folder = (experiment_dir / data_folder).resolve()
# Load model hyper parameters:
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin)
# Dataset creation
train_data, valid_data = data_prep(data_folder, hparams)
# Trainer initialization
ctc_brain = CTCBrain(
hparams["modules"],
hparams["opt_class"],
hparams,
run_opts={"device": device},
)
# Training/validation loop
ctc_brain.fit(
range(hparams["N_epochs"]),
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Evaluation is run separately (now just evaluating on valid data)
ctc_brain.evaluate(valid_data)
# Check if model overfits for integration test
assert ctc_brain.train_loss < 1.0
if __name__ == "__main__":
main()
def test_error(device):
main(device)
| 5,096 | 32.98 | 80 | py |
speechbrain | speechbrain-main/tests/integration/ASR_CTC/example_asr_ctc_experiment_quaternion_net.py | #!/usr/bin/env/python3
"""This minimal example trains a CTC-based speech recognizer on a tiny dataset.
The encoder is based on a combination of convolutional, recurrent, and
feed-forward networks (CRDNN) that predict phonemes. A greedy search is used on
top of the output probabilities.
Given the tiny dataset, the expected behavior is to overfit the training dataset
(with a validation performance that stays high).
"""
import pathlib
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
class CTCBrain(sb.Brain):
def compute_forward(self, batch, stage):
"Given an input batch it computes the output probabilities."
batch = batch.to(self.device)
wavs, lens = batch.sig
feats = self.modules.compute_features(wavs)
feats = self.modules.mean_var_norm(feats, lens)
x = self.modules.model(feats)
x = self.modules.lin(x)
outputs = self.hparams.softmax(x)
return outputs, lens
def compute_objectives(self, predictions, batch, stage):
"Given the network predictions and targets computed the CTC loss."
predictions, lens = predictions
phns, phn_lens = batch.phn_encoded
loss = self.hparams.compute_cost(predictions, phns, lens, phn_lens)
if stage != sb.Stage.TRAIN:
seq = sb.decoders.ctc_greedy_decode(
predictions, lens, blank_id=self.hparams.blank_index
)
self.per_metrics.append(batch.id, seq, phns, target_len=phn_lens)
return loss
def on_stage_start(self, stage, epoch=None):
"Gets called when a stage (either training, validation, test) starts."
if stage != sb.Stage.TRAIN:
self.per_metrics = self.hparams.per_stats()
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of a stage."""
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
if stage == sb.Stage.VALID and epoch is not None:
print("Epoch %d complete" % epoch)
print("Train loss: %.2f" % self.train_loss)
if stage != sb.Stage.TRAIN:
print(stage, "loss: %.2f" % stage_loss)
print(stage, "PER: %.2f" % self.per_metrics.summarize("error_rate"))
def data_prep(data_folder, hparams):
"Creates the datasets and their data processing pipelines."
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_train.json",
replacements={"data_root": data_folder},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_dev.json",
replacements={"data_root": data_folder},
)
datasets = [train_data, valid_data]
label_encoder = sb.dataio.encoder.CTCTextEncoder()
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("phn")
@sb.utils.data_pipeline.provides("phn_list", "phn_encoded")
def text_pipeline(phn):
phn_list = phn.strip().split()
yield phn_list
phn_encoded = label_encoder.encode_sequence_torch(phn_list)
yield phn_encoded
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 3. Fit encoder:
# NOTE: In this minimal example, also update from valid data
label_encoder.insert_blank(index=hparams["blank_index"])
label_encoder.update_from_didataset(train_data, output_key="phn_list")
label_encoder.update_from_didataset(valid_data, output_key="phn_list")
# 4. Set output:
sb.dataio.dataset.set_output_keys(datasets, ["id", "sig", "phn_encoded"])
return train_data, valid_data
def main(device="cpu"):
experiment_dir = pathlib.Path(__file__).resolve().parent
hparams_file = experiment_dir / "hyperparams_quaternion_net.yaml"
data_folder = "../../samples/ASR"
data_folder = (experiment_dir / data_folder).resolve()
# Load model hyper parameters:
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin)
# Dataset creation
train_data, valid_data = data_prep(data_folder, hparams)
# Trainer initialization
ctc_brain = CTCBrain(
hparams["modules"],
hparams["opt_class"],
hparams,
run_opts={"device": device},
)
# Training/validation loop
ctc_brain.fit(
range(hparams["N_epochs"]),
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Evaluation is run separately (now just evaluating on valid data)
ctc_brain.evaluate(valid_data)
# Check if model overfits for integration test
assert ctc_brain.train_loss < 1.0
if __name__ == "__main__":
main()
def test_error(device):
main(device)
| 5,110 | 33.073333 | 80 | py |
speechbrain | speechbrain-main/tests/integration/ASR_Transducer/example_asr_transducer_experiment.py | #!/usr/bin/env/python3
"""This minimal example trains a RNNT-based speech recognizer on a tiny dataset.
The encoder is based on a combination of convolutional, recurrent, and
feed-forward networks (CRDNN) that predict phonemes. A beamsearch is used on
top of the output probabilities.
Given the tiny dataset, the expected behavior is to overfit the training dataset
(with a validation performance that stays high).
"""
import pathlib
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
class TransducerBrain(sb.Brain):
def compute_forward(self, batch, stage):
"Given an input batch it computes the output probabilities."
batch = batch.to(self.device)
wavs, lens = batch.sig
feats = self.modules.compute_features(wavs)
feats = self.modules.mean_var_norm(feats, lens)
# Transcription network: input-output dependency
TN_output = self.modules.enc(feats)
TN_output = self.modules.enc_lin(TN_output)
# Prediction network: output-output dependency
targets, target_lens = batch.phn_encoded_bos
PN_output = self.modules.emb(targets)
PN_output, _ = self.modules.dec(PN_output)
PN_output = self.modules.dec_lin(PN_output)
# Joint the networks
joint = self.modules.Tjoint(
TN_output.unsqueeze(2), PN_output.unsqueeze(1),
)
outputs = self.modules.output(joint)
outputs = self.hparams.log_softmax(outputs)
if stage == sb.Stage.TRAIN:
return outputs, lens
else:
hyps, scores, _, _ = self.hparams.searcher(TN_output)
return outputs, lens, hyps
def compute_objectives(self, predictions, batch, stage):
"Given the network predictions and targets computed the CTC loss."
phns, phn_lens = batch.phn_encoded
if stage == sb.Stage.TRAIN:
predictions, lens = predictions
else:
predictions, lens, seq = predictions
self.per_metrics.append(batch.id, seq, phns, target_len=phn_lens)
loss = self.hparams.compute_cost(
predictions,
phns.to(self.device).long(),
lens,
phn_lens.to(self.device),
)
return loss
def on_stage_start(self, stage, epoch=None):
"Gets called when a stage (either training, validation, test) starts."
if stage != sb.Stage.TRAIN:
self.per_metrics = self.hparams.per_stats()
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of a stage."""
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
if stage == sb.Stage.VALID and epoch is not None:
print("Epoch %d complete" % epoch)
print("Train loss: %.2f" % self.train_loss)
if stage != sb.Stage.TRAIN:
print(stage, "loss: %.2f" % stage_loss)
print(stage, "PER: %.2f" % self.per_metrics.summarize("error_rate"))
def data_prep(data_folder, hparams):
"Creates the datasets and their data processing pipelines."
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_train.json",
replacements={"data_root": data_folder},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_dev.json",
replacements={"data_root": data_folder},
)
datasets = [train_data, valid_data]
label_encoder = sb.dataio.encoder.CTCTextEncoder()
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("phn")
@sb.utils.data_pipeline.provides(
"phn_list", "phn_encoded", "phn_encoded_bos"
)
def text_pipeline(phn):
phn_list = phn.strip().split()
yield phn_list
phn_encoded = label_encoder.encode_sequence_torch(phn_list)
yield phn_encoded
phn_encoded_bos = label_encoder.prepend_bos_index(phn_encoded).long()
yield phn_encoded_bos
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 3. Fit encoder:
# NOTE: In this minimal example, also update from valid data
label_encoder.insert_blank(index=hparams["blank_index"])
label_encoder.insert_bos_eos(
bos_index=hparams["bos_index"], eos_label="<bos>"
)
label_encoder.update_from_didataset(train_data, output_key="phn_list")
label_encoder.update_from_didataset(valid_data, output_key="phn_list")
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "phn_encoded", "phn_encoded_bos"]
)
return train_data, valid_data, label_encoder
def main(device="cpu"):
experiment_dir = pathlib.Path(__file__).resolve().parent
hparams_file = experiment_dir / "hyperparams.yaml"
data_folder = "../../samples/ASR"
data_folder = (experiment_dir / data_folder).resolve()
# Load model hyper parameters:
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin)
# Dataset creation
train_data, valid_data, label_encoder = data_prep(data_folder, hparams)
# Trainer initialization
transducer_brain = TransducerBrain(
hparams["modules"],
hparams["opt_class"],
hparams,
run_opts={"device": device},
)
# Training/validation loop
transducer_brain.fit(
range(hparams["N_epochs"]),
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Evaluation is run separately (now just evaluating on valid data)
transducer_brain.evaluate(valid_data)
# Check that model overfits for integration test
assert transducer_brain.train_loss < 90.0
if __name__ == "__main__":
main()
def test_error(device):
main(device)
| 6,161 | 34.011364 | 80 | py |
speechbrain | speechbrain-main/tests/integration/LM_RNN/example_lm_rnn_experiment.py | #!/usr/bin/env/python3
"""This minimal example trains a character-level language model that predicts
the next characters given the previous ones. The system uses a standard
attention-based encoder-decoder pipeline. The encoder is based on a simple LSTM.
Given the tiny dataset, the expected behavior is to overfit the training dataset
(with a validation performance that stays high).
"""
import math
import pathlib
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
class LMBrain(sb.Brain):
def compute_forward(self, batch, stage):
"Given an input chars it computes the next-char probability."
batch = batch.to(self.device)
chars, char_lens = batch.char_encoded_bos
logits = self.modules.model(chars)
pout = self.hparams.log_softmax(logits)
return pout
def compute_objectives(self, predictions, batch, stage):
"Given the network predictions and targets computed the NLL loss."
chars, char_lens = batch.char_encoded_eos
loss = self.hparams.compute_cost(predictions, chars, length=char_lens)
return loss
def on_stage_end(self, stage, stage_loss, epoch=None):
"Gets called when a stage (either training, validation, test) starts."
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
if stage == sb.Stage.VALID:
print("Epoch %d complete" % epoch)
print("Train loss: %.2f" % self.train_loss)
if stage != sb.Stage.TRAIN:
print(stage, "loss: %.2f" % stage_loss)
perplexity = math.e ** stage_loss
print(stage, "perplexity: %.2f" % perplexity)
def data_prep(data_folder, hparams):
"Creates the datasets and their data processing pipelines."
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_train.json",
replacements={"data_root": data_folder},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_dev.json",
replacements={"data_root": data_folder},
)
datasets = [train_data, valid_data]
char_encoder = sb.dataio.encoder.TextEncoder()
# 2. Define char pipeline:
@sb.utils.data_pipeline.takes("char")
@sb.utils.data_pipeline.provides(
"char_list", "char_encoded_bos", "char_encoded_eos"
)
def char_pipeline(char):
char_list = char.strip().split()
yield char_list
char_encoded = char_encoder.encode_sequence_torch(char_list)
char_encoded_bos = char_encoder.prepend_bos_index(char_encoded).long()
yield char_encoded_bos
char_encoded_eos = char_encoder.append_eos_index(char_encoded).long()
yield char_encoded_eos
sb.dataio.dataset.add_dynamic_item(datasets, char_pipeline)
# 3. Fit encoder:
# NOTE: In this minimal example, also update from valid data
char_encoder.insert_bos_eos(bos_index=hparams["bos_index"])
char_encoder.update_from_didataset(train_data, output_key="char_list")
char_encoder.update_from_didataset(valid_data, output_key="char_list")
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "char_encoded_bos", "char_encoded_eos"]
)
return train_data, valid_data
def main(device="cpu"):
experiment_dir = pathlib.Path(__file__).resolve().parent
hparams_file = experiment_dir / "hyperparams.yaml"
data_folder = "../../samples/ASR"
data_folder = (experiment_dir / data_folder).resolve()
# Load model hyper parameters:
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin)
# Dataset creation
train_data, valid_data = data_prep(data_folder, hparams)
# Trainer initialization
lm_brain = LMBrain(
hparams["modules"],
hparams["opt_class"],
hparams,
run_opts={"device": device},
)
# Training/validation loop
lm_brain.fit(
range(hparams["N_epochs"]),
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Evaluation is run separately (now just evaluating on valid data)
lm_brain.evaluate(valid_data)
# Check that model overfits for integration test
assert lm_brain.train_loss < 0.15
if __name__ == "__main__":
main()
def test_error(device):
main(device)
| 4,471 | 33.666667 | 80 | py |
speechbrain | speechbrain-main/tests/integration/VAD/example_vad.py | """This minimal example trains a Voice Activity Detector (VAD) on a tiny dataset.
The network is based on a LSTM with a linear transformation on the top of that.
The system is trained with the binary cross-entropy metric.
"""
import os
import torch
import numpy as np
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
class VADBrain(sb.Brain):
def compute_forward(self, batch, stage):
"Given an input batch it computes the binary probability."
batch = batch.to(self.device)
wavs, lens = batch.sig
feats = self.hparams.compute_features(wavs)
feats = self.modules.mean_var_norm(feats, lens)
x, _ = self.modules.rnn(feats)
outputs = self.modules.lin(x)
return outputs, lens
def compute_objectives(self, predictions, batch, stage=True):
"Given the network predictions and targets computed the binary CE"
predictions, lens = predictions
targets, lens = batch.target
targets = targets.to(predictions.device)
predictions = predictions[:, : targets.shape[-1], 0]
loss = self.hparams.compute_BCE_cost(
torch.nn.BCEWithLogitsLoss(reduction="none"),
predictions,
targets,
lens,
)
# compute metrics
self.binary_metrics.append(
batch.id, torch.sigmoid(predictions), targets
)
return loss
def on_stage_start(self, stage, epoch=None):
"Gets called when a stage (either training, validation, test) starts."
self.binary_metrics = sb.utils.metric_stats.BinaryMetricStats()
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of a stage."""
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
train_summary = self.binary_metrics.summarize(threshold=0.5)
print("Epoch %d completed" % epoch)
print("Train loss: %.4f" % stage_loss)
print("Train Precision: %.2f" % train_summary["precision"])
print("Train Recall: %.2f" % train_summary["recall"])
def data_prep(data_folder, hparams):
"Creates the datasets and their data processing pipelines."
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=os.path.join(data_folder, "../annotation/VAD_train.json"),
replacements={"data_folder": data_folder},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=os.path.join(data_folder, "../annotation/VAD_dev.json"),
replacements={"data_folder": data_folder},
)
datasets = [train_data, valid_data]
# 1. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
# 2. vad targets creation from annotated speech boundaries
@sb.utils.data_pipeline.takes("speech")
@sb.utils.data_pipeline.provides("target")
def vad_targets(string, hparams=hparams):
if len(string) > 0:
boundaries = string.split(" ")
# we group by two
# 0.01 is 10 ms hop size ...
boundaries = [int(float(x) / 0.01) for x in boundaries]
boundaries = list(zip(boundaries[::2], boundaries[1::2]))
else:
boundaries = []
gt = torch.zeros(int(np.ceil(hparams["example_length"] * (1 / 0.01))))
for indxs in boundaries:
start, stop = indxs
gt[start:stop] = 1
return gt
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
sb.dataio.dataset.add_dynamic_item(datasets, vad_targets)
# 3. Set output:
sb.dataio.dataset.set_output_keys(datasets, ["id", "sig", "target"])
return train_data, valid_data
def main(device="cpu"):
experiment_dir = os.path.dirname(os.path.abspath(__file__))
hparams_file = os.path.join(experiment_dir, "hyperparams.yaml")
data_folder = "/../../samples/VAD"
data_folder = os.path.abspath(experiment_dir + data_folder)
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin)
# Data IO creation
train_data, valid_data = data_prep(data_folder, hparams)
# Trainer initialization
ctc_brain = VADBrain(
hparams["modules"],
hparams["opt_class"],
hparams,
run_opts={"device": device},
)
# Training/validation loop
ctc_brain.fit(
range(hparams["N_epochs"]),
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Evaluation is run separately (now just evaluating on valid data)
ctc_brain.evaluate(valid_data)
# Check if model overfits for integration test
assert ctc_brain.train_loss < 1.0
if __name__ == "__main__":
main()
def test_error(device):
main(device)
| 4,976 | 31.109677 | 81 | py |
speechbrain | speechbrain-main/tests/integration/ASR_alignment_forward/example_asr_alignment_forward_experiment.py | #!/usr/bin/env/python3
"""This minimal example trains an HMM-based aligner with the forward algorithm.
The encoder is based on a combination of convolutional, recurrent, and
feed-forward networks (CRDNN) that predict phoneme states.
Given the tiny dataset, the expected behavior is to overfit the training data
(with a validation performance that stays high).
"""
import pathlib
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
class AlignBrain(sb.Brain):
def compute_forward(self, batch, stage):
"Given an input batch it computes the output probabilities."
batch = batch.to(self.device)
wavs, lens = batch.sig
feats = self.hparams.compute_features(wavs)
feats = self.modules.mean_var_norm(feats, lens)
x = self.modules.model(feats)
x = self.modules.lin(x)
outputs = self.hparams.softmax(x)
return outputs, lens
def compute_objectives(self, predictions, batch, stage):
"Given the network predictions and targets computed the forward loss."
predictions, lens = predictions
phns, phn_lens = batch.phn_encoded
sum_alpha_T = self.hparams.aligner(
predictions, lens, phns, phn_lens, "forward"
)
loss = -sum_alpha_T.sum()
if stage != sb.Stage.TRAIN:
viterbi_scores, alignments = self.hparams.aligner(
predictions, lens, phns, phn_lens, "viterbi"
)
return loss
def on_stage_end(self, stage, stage_loss, epoch=None):
"Gets called when a stage (either training, validation, test) starts."
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
if stage == sb.Stage.VALID:
print("Epoch %d complete" % epoch)
print("Train loss: %.2f" % self.train_loss)
if stage != sb.Stage.TRAIN:
print(stage, "loss: %.2f" % stage_loss)
def data_prep(data_folder, hparams):
"Creates the datasets and their data processing pipelines."
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_train.json",
replacements={"data_root": data_folder},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_dev.json",
replacements={"data_root": data_folder},
)
datasets = [train_data, valid_data]
label_encoder = sb.dataio.encoder.CTCTextEncoder()
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("phn")
@sb.utils.data_pipeline.provides("phn_list", "phn_encoded")
def text_pipeline(phn):
phn_list = phn.strip().split()
yield phn_list
phn_encoded = label_encoder.encode_sequence_torch(phn_list)
yield phn_encoded
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 3. Fit encoder:
# NOTE: In this minimal example, also update from valid data
label_encoder.update_from_didataset(train_data, output_key="phn_list")
label_encoder.update_from_didataset(valid_data, output_key="phn_list")
# 4. Set output:
sb.dataio.dataset.set_output_keys(datasets, ["id", "sig", "phn_encoded"])
return train_data, valid_data
def main(device="cpu"):
experiment_dir = pathlib.Path(__file__).resolve().parent
hparams_file = experiment_dir / "hyperparams.yaml"
data_folder = "../../samples/ASR/"
data_folder = (experiment_dir / data_folder).resolve()
# Load model hyper parameters:
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin)
# Dataset creation
train_data, valid_data = data_prep(data_folder, hparams)
# Trainer initialization
ali_brain = AlignBrain(
hparams["modules"],
hparams["opt_class"],
hparams,
run_opts={"device": device},
)
# Training/validation loop
ali_brain.fit(
range(hparams["N_epochs"]),
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Evaluation is run separately (now just evaluating on valid data)
ali_brain.evaluate(valid_data)
# Check if model overfits for integration test
assert ali_brain.train_loss < 350
if __name__ == "__main__":
main()
def test_error(device):
main(device)
| 4,687 | 31.783217 | 79 | py |
speechbrain | speechbrain-main/tests/integration/ASR_seq2seq/example_asr_seq2seq_experiment.py | #!/usr/bin/env/python3
"""This minimal example trains a seq2seq attention-based model for speech
recognition on a tiny dataset. The encoder is based on a combination of
convolutional, recurrent, and feed-forward networks (CRDNN). The decoder is
based on a GRU. A greedy search is used on top of the output probabilities.
Given the tiny dataset, the expected behavior is to overfit the training dataset
(with a validation performance that stays high).
"""
import pathlib
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
class seq2seqBrain(sb.Brain):
def compute_forward(self, batch, stage):
"Given an input batch it computes the output probabilities."
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
phns_bos, _ = batch.phn_encoded_bos
feats = self.hparams.compute_features(wavs)
feats = self.modules.mean_var_norm(feats, wav_lens)
x = self.modules.enc(feats)
# Prepend bos token at the beginning
e_in = self.modules.emb(phns_bos)
h, w = self.modules.dec(e_in, x, wav_lens)
logits = self.modules.lin(h)
outputs = self.hparams.softmax(logits)
if stage != sb.Stage.TRAIN:
seq, _ = self.hparams.searcher(x, wav_lens)
return outputs, seq
return outputs
def compute_objectives(self, predictions, batch, stage):
"Given the network predictions and targets computed the NLL loss."
if stage == sb.Stage.TRAIN:
outputs = predictions
else:
outputs, seq = predictions
ids = batch.id
phns, phn_lens = batch.phn_encoded_eos
loss = self.hparams.compute_cost(outputs, phns, length=phn_lens)
if stage != sb.Stage.TRAIN:
self.per_metrics.append(ids, seq, phns, target_len=phn_lens)
return loss
def fit_batch(self, batch):
"""Fits train batches"""
preds = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(preds, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.optimizer.step()
self.optimizer.zero_grad()
return loss.detach()
def evaluate_batch(self, batch, stage=sb.Stage.TEST):
"""Evaluates test batches"""
out = self.compute_forward(batch, stage)
loss = self.compute_objectives(out, batch, stage)
return loss.detach()
def on_stage_start(self, stage, epoch=None):
"Gets called when a stage (either training, validation, test) starts."
if stage != sb.Stage.TRAIN:
self.per_metrics = self.hparams.per_stats()
def on_stage_end(self, stage, stage_loss, epoch=None):
"Gets called when a stage (either training, validation, test) ends."
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
if stage == sb.Stage.VALID and epoch is not None:
print("Epoch %d complete" % epoch)
print("Train loss: %.2f" % self.train_loss)
if stage != sb.Stage.TRAIN:
print(stage, "loss: %.2f" % stage_loss)
print(stage, "PER: %.2f" % self.per_metrics.summarize("error_rate"))
def data_prep(data_folder, hparams):
"Creates the datasets and their data processing pipelines."
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_train.json",
replacements={"data_root": data_folder},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_dev.json",
replacements={"data_root": data_folder},
)
datasets = [train_data, valid_data]
label_encoder = sb.dataio.encoder.TextEncoder()
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("phn")
@sb.utils.data_pipeline.provides(
"phn_list", "phn_encoded_bos", "phn_encoded_eos"
)
def text_pipeline(phn):
phn_list = phn.strip().split()
yield phn_list
phn_encoded = label_encoder.encode_sequence_torch(phn_list)
phn_encoded_bos = label_encoder.prepend_bos_index(phn_encoded).long()
yield phn_encoded_bos
phn_encoded_eos = label_encoder.append_eos_index(phn_encoded).long()
yield phn_encoded_eos
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 3. Fit encoder:
# NOTE: In this minimal example, also update from valid data
label_encoder.insert_bos_eos(bos_index=hparams["bos_index"])
label_encoder.update_from_didataset(train_data, output_key="phn_list")
label_encoder.update_from_didataset(valid_data, output_key="phn_list")
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "phn_encoded_eos", "phn_encoded_bos"]
)
return train_data, valid_data
def main(device="cpu"):
experiment_dir = pathlib.Path(__file__).resolve().parent
hparams_file = experiment_dir / "hyperparams.yaml"
data_folder = "../../samples/ASR"
data_folder = (experiment_dir / data_folder).resolve()
# Load model hyper parameters:
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin)
# Dataset creation
train_data, valid_data = data_prep(data_folder, hparams)
# Trainer initialization
seq2seq_brain = seq2seqBrain(
hparams["modules"],
hparams["opt_class"],
hparams,
run_opts={"device": device},
)
# Training/validation loop
seq2seq_brain.fit(
range(hparams["N_epochs"]),
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Evaluation is run separately (now just evaluating on valid data)
seq2seq_brain.evaluate(valid_data)
# Check that model overfits for integration test
assert seq2seq_brain.train_loss < 1.0
if __name__ == "__main__":
main()
def test_error(device):
main(device)
| 6,322 | 33.933702 | 80 | py |
speechbrain | speechbrain-main/tests/integration/enhance_GAN/example_enhance_gan_experiment.py | #!/usr/bin/env/python3
"""This minimal example trains a GAN speech enhancement system on a tiny dataset.
The generator and the discriminator are based on convolutional networks.
"""
import torch
import pathlib
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
class EnhanceGanBrain(sb.Brain):
def compute_forward(self, batch, stage):
"Given an input batch it computes the enhanced signal"
batch = batch.to(self.device)
wavs, lens = batch.sig
noisy = self.hparams.add_noise(wavs, lens).unsqueeze(-1)
enhanced = self.modules.generator(noisy)
return enhanced
def compute_objectives(self, predictions, batch, stage, optim_name=""):
"Given the network predictions and targets computed the total loss"
clean_wavs, lens = batch.sig
batch_size = clean_wavs.size(0)
# Average the predictions of each time step
clean_wavs = clean_wavs.unsqueeze(-1)
real_result = self.modules.discriminator(clean_wavs).mean(dim=1)
simu_result = self.modules.discriminator(predictions).mean(dim=1)
real_cost = 0
simu_cost = 0
map_cost = self.hparams.compute_cost(predictions, clean_wavs, lens)
# One is real, zero is fake
if optim_name == "generator":
simu_target = torch.ones(batch_size, 1, device=self.device)
simu_cost = self.hparams.compute_cost(simu_result, simu_target)
real_cost = 0.0
self.metrics["G"].append(simu_cost.detach())
elif optim_name == "discriminator":
real_target = torch.ones(batch_size, 1, device=self.device)
simu_target = torch.zeros(batch_size, 1, device=self.device)
real_cost = self.hparams.compute_cost(real_result, real_target)
simu_cost = self.hparams.compute_cost(simu_result, simu_target)
self.metrics["D"].append((real_cost + simu_cost).detach())
return real_cost + simu_cost + map_cost
def fit_batch(self, batch):
"Trains the GAN with a batch"
self.g_optimizer.zero_grad()
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
g_loss = self.compute_objectives(
predictions, batch, sb.Stage.TRAIN, "generator"
)
g_loss.backward()
self.g_optimizer.step()
self.d_optimizer.zero_grad()
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
d_loss = self.compute_objectives(
predictions, batch, sb.Stage.TRAIN, "discriminator"
)
d_loss.backward()
self.d_optimizer.step()
return g_loss.detach() + d_loss.detach()
def on_stage_start(self, stage, epoch=None):
"Gets called when a stage (either training, validation, test) starts."
if stage == sb.Stage.TRAIN:
self.metrics = {"G": [], "D": []}
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of a stage."""
if stage == sb.Stage.TRAIN:
g_loss = torch.tensor(self.metrics["G"])
d_loss = torch.tensor(self.metrics["D"])
print("Avg G loss: %.2f" % torch.mean(g_loss))
print("Avg D loss: %.2f" % torch.mean(d_loss))
print("train loss: ", stage_loss)
elif stage == sb.Stage.VALID:
print("Completed epoch %d" % epoch)
print("Valid loss: %.3f" % stage_loss)
else:
self.test_loss = stage_loss
def init_optimizers(self):
"""Initializes the generator and discriminator optimizers"""
self.g_optimizer = self.hparams.g_opt_class(
self.modules.generator.parameters()
)
self.d_optimizer = self.hparams.d_opt_class(
self.modules.discriminator.parameters()
)
def zero_grad(self, set_to_none=False):
"""Sets the gradients of all optimized `torch.Tensor`s to zero."""
self.g_optimizer.zero_grad(set_to_none)
self.d_optimizer.zero_grad(set_to_none)
def data_prep(data_folder):
"Creates the datasets and their data processing pipelines."
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_train.json",
replacements={"data_root": data_folder},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_dev.json",
replacements={"data_root": data_folder},
)
datasets = [train_data, valid_data]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Set output:
sb.dataio.dataset.set_output_keys(datasets, ["id", "sig"])
return train_data, valid_data
def main(device="cpu"):
experiment_dir = pathlib.Path(__file__).resolve().parent
hparams_file = experiment_dir / "hyperparams.yaml"
data_folder = "../../samples/ASR"
data_folder = (experiment_dir / data_folder).resolve()
# Load model hyper parameters:
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin)
# Dataset creation
train_data, valid_data = data_prep(data_folder)
# Trainer initialization
gan_brain = EnhanceGanBrain(
modules=hparams["modules"], hparams=hparams, run_opts={"device": device}
)
# Training/validation loop
gan_brain.fit(
range(hparams["N_epochs"]),
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Evaluation is run separately (now just evaluating on valid data)
gan_brain.evaluate(valid_data)
# Check test loss (mse), train loss is GAN loss
assert gan_brain.test_loss < 0.002
if __name__ == "__main__":
main()
def test_loss(device):
main(device)
| 6,058 | 33.821839 | 81 | py |
speechbrain | speechbrain-main/tests/integration/sampling/example_sorting.py | """This minimal example checks on sampling with ascending/descending sorting and random shuffling; w/ & w/o DDP.
"""
import os
import torch
import pickle
import pathlib
import itertools
import speechbrain as sb
import torch.multiprocessing as mp
from hyperpyyaml import load_hyperpyyaml
class SamplingBrain(sb.Brain):
def compute_forward(self, batch, stage):
"Given an input batch it computes the binary probability."
batch = batch.to(self.device)
lens = batch.duration
if stage == sb.Stage.TRAIN:
self.ids_list.append(batch.id)
if self.hparams.sorting == "ascending":
# ignore last; non-evenly divisible data; 99 items -> last batch: 19 -> 20 items (thus, nearby sort)
if not all(
[x == y for x, y in zip(lens[:-1], sorted(lens[:-1]))]
): # ":-1" is specific to dummy data
print(lens)
assert False
elif self.hparams.sorting == "descending":
if not all(
[
x == y
for x, y in zip(
lens[:-1], sorted(lens[:-1], reverse=True)
)
]
):
print(lens)
assert False
elif self.hparams.sorting == "random":
assert not all(
[x == y for x, y in zip(lens[:-1], sorted(lens[:-1]))]
)
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
return lens
def compute_objectives(self, predictions, batch, stage=True):
"Given the network predictions and targets computed the binary CE"
inputs = torch.tensor([10.0, -6.0], requires_grad=True)
targets = torch.tensor([1, 0])
loss = self.hparams.compute_loss(inputs, targets)
return loss
def on_stage_start(self, stage, epoch=None):
"Gets called when a stage (either training, validation, test) starts."
if stage == sb.Stage.TRAIN:
self.ids_list = []
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of a stage."""
if stage == sb.Stage.TRAIN:
# check that all IDs are unique; no duplicate IDs
batched_ids = sorted(list(itertools.chain(*self.ids_list)))
assert batched_ids == sorted(list(set(batched_ids)))
# write out to check later all IDs were visited
if self.distributed_launch:
with open(
f"tests/tmp/ddp_sorting_ids_{self.hparams.sorting}_{self.hparams.rank}",
"wb",
) as f:
pickle.dump(batched_ids, f)
def data_prep(data_folder, hparams):
"Creates the datasets and their data processing pipelines."
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=data_folder / "annotation/dev-clean.csv",
replacements={"data_root": data_folder},
)
# start: sorting impact
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(
sort_key="duration",
# key_max_value={"duration": hparams["avoid_if_longer_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_options"]["shuffle"] = False
# hparams["dataloader_options"]["drop_last"] = True # drops last entry which is out of order
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration",
reverse=True,
# key_max_value={"duration": hparams["avoid_if_longer_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_options"]["shuffle"] = False
# hparams["dataloader_options"]["drop_last"] = True # drops last entry which is out of order
elif hparams["sorting"] == "random":
# hparams["dataloader_options"]["drop_last"] = True # reduced length from 99 to 80
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
# end: sorting impact
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=data_folder / "annotation/dev-clean.csv",
replacements={"data_root": data_folder},
)
datasets = [train_data, valid_data]
# 1. Define audio pipeline:
@sb.utils.data_pipeline.takes("duration")
@sb.utils.data_pipeline.provides("duration")
def audio_pipeline(duration):
return duration
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Set output:
sb.dataio.dataset.set_output_keys(datasets, ["id", "duration"])
return train_data, valid_data
def recipe(device="cpu", yaml_file="hyperparams.yaml", run_opts=None):
experiment_dir = pathlib.Path(__file__).resolve().parent
hparams_file = os.path.join(experiment_dir, yaml_file)
data_folder = "../../samples/"
data_folder = (experiment_dir / data_folder).resolve()
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin)
# usually here: sb.utils.distributed.ddp_init_group(run_opts)
# Data IO creation
train_data, valid_data = data_prep(data_folder, hparams)
# Trainer initialization
if run_opts is None:
run_opts = {}
else:
hparams["rank"] = run_opts["local_rank"]
run_opts["device"] = device
ctc_brain = SamplingBrain(
hparams["modules"], hparams["opt_class"], hparams, run_opts=run_opts,
)
# Training/validation loop
ctc_brain.fit(
range(hparams["N_epochs"]),
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
if __name__ == "__main__":
recipe(yaml_file="random.yaml")
recipe(yaml_file="asc.yaml")
recipe(yaml_file="dsc.yaml")
def test_error(device):
recipe(device=device, yaml_file="random.yaml")
recipe(device=device, yaml_file="asc.yaml")
recipe(device=device, yaml_file="dsc.yaml")
def ddp_recipes(rank, size, backend="gloo"):
""" Initialize the distributed environment. """
os.environ["WORLD_SIZE"] = f"{size}"
os.environ["RANK"] = f"{rank}"
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = "29500"
run_opts = dict()
run_opts["distributed_launch"] = True
run_opts["distributed_backend"] = backend
run_opts["local_rank"] = rank
sb.utils.distributed.ddp_init_group(run_opts)
recipe(device="cpu", yaml_file="random.yaml", run_opts=run_opts)
recipe(device="cpu", yaml_file="asc.yaml", run_opts=run_opts)
recipe(device="cpu", yaml_file="dsc.yaml", run_opts=run_opts)
def test_ddp():
size = 2
processes = []
mp.set_start_method("spawn", force=True)
os.makedirs("tests/tmp", exist_ok=True)
for rank in range(size):
p = mp.Process(target=ddp_recipes, args=(rank, size))
p.start()
processes.append(p)
for p in processes:
p.join()
assert p.exitcode == 0
# check all
for sorting in ["random", "ascending", "descending"]:
ids = []
for rank in range(2):
idf = f"tests/tmp/ddp_sorting_ids_{sorting}_{rank}"
with open(idf, "rb") as f:
ids += pickle.load(f)
assert (
len(ids) == 100 if sorting == "random" else 99
) # sorted data stays within the 99; random not
assert len(set(ids)) == 99
| 7,867 | 33.358079 | 116 | py |
speechbrain | speechbrain-main/tests/integration/speaker_id/example_xvector_experiment.py | #!/usr/bin/env/python3
"""This minimal example trains a speaker identification system based on
x-vectors. The encoder is based on TDNNs. The classifier is a MLP.
"""
import pathlib
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
# Trains xvector model
class XvectorBrain(sb.Brain):
def compute_forward(self, batch, stage):
"Given an input batch it computes the speaker probabilities."
batch = batch.to(self.device)
wavs, lens = batch.sig
feats = self.hparams.compute_features(wavs)
feats = self.modules.mean_var_norm(feats, lens)
x_vect = self.modules.xvector_model(feats)
outputs = self.modules.classifier(x_vect)
return outputs, lens
def compute_objectives(self, predictions, batch, stage):
"Given the network predictions and targets computed the CE loss."
predictions, lens = predictions
spkid, spkid_lens = batch.spk_id_encoded
loss = self.hparams.compute_cost(predictions, spkid, lens)
if stage != sb.Stage.TRAIN:
self.error_metrics.append(batch.id, predictions, spkid, lens)
return loss
def on_stage_start(self, stage, epoch=None):
"Gets called when a stage (either training, validation, test) starts."
if stage != sb.Stage.TRAIN:
self.error_metrics = self.hparams.error_stats()
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of a stage."""
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
if stage == sb.Stage.VALID:
print("Epoch %d complete" % epoch)
print("Train loss: %.2f" % self.train_loss)
if stage != sb.Stage.TRAIN:
print(stage, "loss: %.2f" % stage_loss)
print(
stage, "error: %.2f" % self.error_metrics.summarize("average")
)
def data_prep(data_folder, hparams):
"Creates the datasets and their data processing pipelines."
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_train.json",
replacements={"data_root": data_folder},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_dev.json",
replacements={"data_root": data_folder},
)
datasets = [train_data, valid_data]
label_encoder = sb.dataio.encoder.CategoricalEncoder()
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("spk_id")
@sb.utils.data_pipeline.provides("spk_id", "spk_id_encoded")
def label_pipeline(spk_id):
yield spk_id
spk_id_encoded = label_encoder.encode_sequence_torch([spk_id])
yield spk_id_encoded
sb.dataio.dataset.add_dynamic_item(datasets, label_pipeline)
# 3. Fit encoder:
# NOTE: In this minimal example, also update from valid data
label_encoder.update_from_didataset(train_data, output_key="spk_id")
label_encoder.update_from_didataset(valid_data, output_key="spk_id")
# 4. Set output:
sb.dataio.dataset.set_output_keys(datasets, ["id", "sig", "spk_id_encoded"])
return train_data, valid_data
def main(device="cpu"):
experiment_dir = pathlib.Path(__file__).resolve().parent
hparams_file = experiment_dir / "hyperparams.yaml"
data_folder = "../../samples/ASR"
data_folder = (experiment_dir / data_folder).resolve()
# Load model hyper parameters:
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin)
# Dataset creation
train_data, valid_data = data_prep(data_folder, hparams)
# Trainer initialization
xvect_brain = XvectorBrain(
hparams["modules"],
hparams["opt_class"],
hparams,
run_opts={"device": device},
)
# Training/validation loop
xvect_brain.fit(
range(hparams["N_epochs"]),
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Evaluation is run separately (now just evaluating on valid data)
xvect_brain.evaluate(valid_data)
# Check if model overfits for integration test
assert xvect_brain.train_loss < 0.2
if __name__ == "__main__":
main()
def test_error(device):
main(device)
| 4,655 | 32.021277 | 80 | py |
speechbrain | speechbrain-main/tests/utils/refactoring_checks.py | #!/usr/bin/env/python3
"""This is a test script for creating a list of expected outcomes (before refactoring);
then, manual editing might change YAMLs and/or code; another test runs to compare results
(after refactoring to before). The target is a list of known HF repos.
The goal is to identify to which extent changes break existing functionality.
Then, larger changes to code base can be rolled out more assured.
Authors
* Andreas Nautsch, 2022, 2023
"""
import os
import sys
from tqdm import tqdm
import yaml
import torch # noqa
import importlib # noqa
import subprocess
import speechbrain # noqa
from glob import glob
from copy import deepcopy
from torch.utils.data import DataLoader
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main # noqa
from speechbrain.utils.train_logger import FileTrainLogger
from speechbrain.pretrained.interfaces import foreign_class # noqa
from speechbrain.dataio.dataloader import LoopedLoader, make_dataloader
def init(
new_interfaces_git="https://github.com/speechbrain/speechbrain",
new_interfaces_branch="hf-interface-testing",
new_interfaces_local_dir="tests/tmp/hf_interfaces",
):
"""Initialises a PR branch to: https://github.com/speechbrain/speechbrain/tree/hf-interface-testing
Skip if the path as of `new_interfaces_local_dir` exists (e.g. by DIY init instead of via this script).
Parameters
----------
new_interfaces_git: str
Your git repo (or default: `https://github.com/speechbrain/speechbrain`);
can be specified in tests/utils/overrides.yaml
new_interfaces_branch: str
Default is `hf-interface-testing` (a git branch); can be specified in tests/utils/overrides.yaml
new_interfaces_local_dir: str
Default is `tests/tmp/hf_interfaces` (a local path); can be specified in tests/utils/overrides.yaml
Returns
-------
str
Local path of `updates_pretrained_models` where the update HF yaml/interface files can be found.
"""
# set up git etc
if not os.path.exists(new_interfaces_local_dir):
# note: not checking for anything, whether it exists or not - or if there is a previous one already
# clone repo with PR on updates_pretrained_models into local folder
cmd_out_clone = subprocess.run(
["git", "clone", new_interfaces_git, new_interfaces_local_dir],
capture_output=True,
)
print(f"\tgit clone log: {cmd_out_clone}")
# cd into that local folder, switch branch to the one containing updates_pretrained_models & cd back
cwd = os.getcwd()
os.chdir(new_interfaces_local_dir)
cmd_out_co = subprocess.run(
["git", "checkout", new_interfaces_branch], capture_output=True
)
print(f"\tgit checkout log: {cmd_out_co}")
os.chdir(cwd)
# return the valid local path with updates_pretrained_models
updates_dir = f"{new_interfaces_local_dir}/updates_pretrained_models"
return updates_dir
def get_model(repo, values, updates_dir=None, run_opts=None):
"""Fetches a pretrained model with the option the re-specify its hyperparameters & interface.
Parameters
----------
repo: str
Source of pretrained model (assuming its within the HF speechbrain collection).
values: dict
Interface specification.
Example: speechbrain:hf-interface-testing/updates_pretrained_models/ssl-wav2vec2-base-librispeech/test.yaml
updates_dir: str
Local folder with yaml:interface updates; None (default) = take original yaml/interface specification.
run_opts: dict
Run options, such as device
Returns
-------
A pretrained model with a speechbrain.pretrained.interface or a custom interface.
"""
# get the pretrained class; model & predictions
kwargs = {
"source": f"speechbrain/{repo}",
"savedir": f"pretrained_models/{repo}",
}
# adjust symlinks
hparams = f"pretrained_models/{repo}/hyperparams.yaml"
if (
"foreign" in values.keys()
): # it's a custom model which has its own Python filename
custom = f'pretrained_models/{repo}/{values["foreign"]}'
# prepare model loading: is it the old -or- the new yaml/interface?
if updates_dir is not None:
# testing the refactoring; assuming all model data has been loaded already
kwargs["source"] = f"{updates_dir}/{repo}"
os.unlink(hparams)
os.symlink(f"{updates_dir}/{repo}/hyperparams.yaml", hparams)
if "foreign" in values.keys():
os.unlink(custom)
os.symlink(
f'{updates_dir}/{repo}/{values["foreign"]}', custom,
)
else:
# re:testing on develop? => simply unlink anything before and re:link from cached HF hub
if os.path.exists(hparams):
os.unlink(hparams)
if "foreign" in values.keys():
if os.path.exists(custom):
os.unlink(custom)
if run_opts is not None:
kwargs["run_opts"] = run_opts
print(f"\trepo: {repo}")
# load pretrained model either via specified pretrained class or custom interface
if "foreign" not in values.keys():
print(f'\tspeechbrain.pretrained.{values["cls"]}')
print(f"\tobj.from_hparams({kwargs})")
obj = eval(f'speechbrain.pretrained.{values["cls"]}')
model = obj.from_hparams(**kwargs)
else:
kwargs["pymodule_file"] = values["foreign"]
kwargs["classname"] = values["cls"]
model = foreign_class(**kwargs)
return model
def get_prediction(repo, values, updates_dir=None):
"""Gets the prediction for one predefined audio example, pattern: {repo}/{values["sample"]} (see HF model card).
Parameters
----------
repo: str
Source of pretrained model (assuming its within the HF speechbrain collection).
values: dict
Interface specification.
Examples: speechbrain:hf-interface-testing/updates_pretrained_models/ssl-wav2vec2-base-librispeech/test.yaml
speechbrain:hf-interface-testing/updates_pretrained_models/asr-wav2vec2-librispeech/test.yaml
updates_dir: str
Controls whether/not we are in the refactored results (None: expected results; before refactoring).
Returns
-------
Cleaned-up prediction results for yaml output (result logging & comparison through yaml de/serialization).
"""
def sanitize(data):
# cleanup data for yaml output (w/o this, yaml will make attempts to save torch/numpy arrays in their format)
if isinstance(data, torch.Tensor):
data = data.detach().cpu().numpy()
if data.ndim:
data = list(data)
return data
# get the pretrained model (before/after yaml/interface update)
model = get_model(repo=repo, values=values, updates_dir=updates_dir) # noqa
try:
# simulate batch from single file
prediction = eval(
f'model.{values["fnx"]}(model.load_audio("{repo}/{values["sample"]}", savedir="pretrained_models/{repo}").unsqueeze(0), torch.tensor([1.0]))'
)
except Exception:
# use an example audio if no audio can be loaded
print(f'\tWARNING - no audio found on HF: {repo}/{values["sample"]}')
prediction = eval(
f'model.{values["fnx"]}(model.load_audio("tests/samples/single-mic/example1.wav", savedir="pretrained_models/{repo}").unsqueeze(0), torch.tensor([1.0]))'
)
finally:
del model
return [sanitize(x[0]) for x in prediction]
def gather_expected_results(
glob_filter="*",
new_interfaces_git="https://github.com/speechbrain/speechbrain",
new_interfaces_branch="hf-interface-testing",
new_interfaces_local_dir="tests/tmp/hf_interfaces",
yaml_path="tests/tmp/refactoring_results.yaml",
):
"""Before refactoring HF YAMLs and/or code, gather prediction results.
Parameters
----------
glob_filter: str
Filter for a repo subset or a specific repo.
new_interfaces_git: str
Your git repo (or default: `https://github.com/speechbrain/speechbrain`);
can be specified in tests/utils/overrides.yaml
new_interfaces_branch: str
Default is `hf-interface-testing` (a git branch); can be specified in tests/utils/overrides.yaml
new_interfaces_local_dir: str
Default is `tests/tmp/hf_interfaces` (a local path); can be specified in tests/utils/overrides.yaml
yaml_path : str
Path where to store/load refactoring testing results for later comparison.
"""
# load results, if existing -or- new from scratch
if os.path.exists(yaml_path):
with open(yaml_path) as yaml_in:
results = yaml.safe_load(yaml_in)
else:
results = {}
# go through each repo
updates_dir = init(
new_interfaces_git, new_interfaces_branch, new_interfaces_local_dir
)
repos = map(os.path.basename, glob(f"{updates_dir}/{glob_filter}"),)
for repo in repos:
# skip if results are there
if repo not in results.keys():
# get values
with open(f"{updates_dir}/{repo}/test.yaml") as yaml_test:
values = load_hyperpyyaml(yaml_test)
print(f"Collecting results for: {repo} w/ values={values}")
prediction = get_prediction(repo, values)
# extend the results
results[repo] = {"before": prediction}
with open(yaml_path, "w") as yaml_out:
yaml.dump(results, yaml_out, default_flow_style=None)
def gather_refactoring_results(
glob_filter="*",
new_interfaces_git="https://github.com/speechbrain/speechbrain",
new_interfaces_branch="hf-interface-testing",
new_interfaces_local_dir="tests/tmp/hf_interfaces",
yaml_path="tests/tmp/refactoring_results.yaml",
):
"""After refactoring HF YAMLs and/or code, gather prediction results.
Parameters
----------
glob_filter: str
Filter for a repo subset or a specific repo.
new_interfaces_git: str
Your git repo (or default: `https://github.com/speechbrain/speechbrain`);
can be specified in tests/utils/overrides.yaml
new_interfaces_branch: str
Default is `hf-interface-testing` (a git branch); can be specified in tests/utils/overrides.yaml
new_interfaces_local_dir: str
Default is `tests/tmp/hf_interfaces` (a local path); can be specified in tests/utils/overrides.yaml
yaml_path: str
Path where to store/load refactoring testing results for later comparison.
"""
# expected results need to exist
if os.path.exists(yaml_path):
with open(yaml_path) as yaml_in:
results = yaml.safe_load(yaml_in)
# go through each repo
updates_dir = init(
new_interfaces_git, new_interfaces_branch, new_interfaces_local_dir
)
repos = map(os.path.basename, glob(f"{updates_dir}/{glob_filter}"),)
for repo in repos:
# skip if results are there
if "after" not in results[repo].keys():
# get values
with open(f"{updates_dir}/{repo}/test.yaml") as yaml_test:
values = load_hyperpyyaml(yaml_test)
print(
f"Collecting refactoring results for: {repo} w/ values={values}"
)
# extend the results
results[repo]["after"] = get_prediction(repo, values, updates_dir)
results[repo]["same"] = (
results[repo]["before"] == results[repo]["after"]
)
# update
with open(yaml_path, "w") as yaml_out:
yaml.dump(results, yaml_out, default_flow_style=None)
print(f"\tsame: {results[repo]['same'] }")
def test_performance(
repo, values, run_opts, updates_dir=None, recipe_overrides={}
):
"""Runs the evaluation partition of a recipe dataset for a pretrained model.
Parameters
----------
repo: str
Source of pretrained model (assuming its within the HF speechbrain collection).
values: dict
Interface specification.
Examples: speechbrain:hf-interface-testing/updates_pretrained_models/ssl-wav2vec2-base-librispeech/test.yaml
speechbrain:hf-interface-testing/updates_pretrained_models/asr-wav2vec2-librispeech/test.yaml
run_opts: dict
Run options, such as device
updates_dir: str
Controls whether/not we are in the refactored results (None: expected results; before refactoring).
recipe_overrides: dict
Recipe YAMLs contain placeholders and flags which need to be overwritten (e.g. data_folder & skip_prep).
See: overrides.yaml
Returns
-------
Dict for export to yaml with performance statistics, as specified in the test.yaml files.
"""
# Dataset depending file structure
tmp_dir = f'tests/tmp/{values["dataset"]}'
speechbrain.create_experiment_directory(experiment_directory=tmp_dir)
stats_meta = {
f'[{values["dataset"]}] - {"BEFORE" if updates_dir is None else "AFTER"}': repo
}
# Load pretrained
model = get_model(
repo=repo, values=values, updates_dir=updates_dir, run_opts=run_opts
) # noqa
# Dataio preparation; we need the test sets only
with open(values["recipe_yaml"]) as fin:
recipe_hparams = load_hyperpyyaml(
fin, values["overrides"] | recipe_overrides
)
# Dataset preparation is assumed to be done through recipes; before running this.
exec(values["dataio"])
test_datasets = deepcopy(eval(values["test_datasets"]))
# harmonise
if type(test_datasets) is not dict:
tmp = {}
if type(test_datasets) is list:
for i, x in enumerate(test_datasets):
tmp[i] = x
else:
tmp[0] = test_datasets
test_datasets = tmp
# prepare testing
logger = FileTrainLogger(save_file=f"{tmp_dir}/{repo}.log")
reporting = deepcopy(values["performance"])
for metric, specs in reporting.items():
reporting[metric]["tracker"] = deepcopy(
recipe_hparams[specs["handler"]]()
)
test_loader_kwargs = deepcopy(recipe_hparams[values["test_loader"]])
del recipe_hparams
stats = {}
for k in test_datasets.keys(): # keys are test_clean, test_other etc
test_set = test_datasets[k]
if not (
isinstance(test_set, DataLoader)
or isinstance(test_set, LoopedLoader)
):
test_set = make_dataloader(test_set, **test_loader_kwargs)
with torch.no_grad():
for batch in tqdm(test_set, dynamic_ncols=True, disable=False):
batch = batch.to(model.device)
wavs, wav_lens = batch.sig
wavs, wav_lens = ( # noqa
wavs.to(model.device),
wav_lens.to(model.device),
)
predictions = eval( # noqa
f'model.{values["fnx"]}(wavs, wav_lens)'
)
predicted = eval(values["predicted"]) # noqa
targeted = eval(values["targeted"]) # noqa
ids = batch.id # noqa
for metric in reporting.keys():
reporting[metric]["tracker"].append(
*eval(values["to_stats"])
)
stats[k] = {}
for metric, specs in reporting.items():
stats[k][metric] = specs["tracker"].summarize(specs["field"])
logger.log_stats(
stats_meta=stats_meta | {"set": k}, test_stats=stats[k],
)
return stats
# run first w/ "--after=False" on latest develop, then checkout the refactoring branch and run w/ "--after=True"
# PYTHONPATH=`realpath .` python tests/utils/refactoring_checks.py tests/utils/overrides.yaml --LibriSpeech_data="" --CommonVoice_EN_data="" --CommonVoice_FR_data="" --IEMOCAP_data="" --after=False
if __name__ == "__main__":
hparams_file, run_opts, overrides = speechbrain.parse_arguments(
sys.argv[1:]
)
with open(hparams_file) as fin:
dataset_overrides = load_hyperpyyaml(fin, overrides)
# go through each repo
updates_dir = init(
dataset_overrides["new_interfaces_git"],
dataset_overrides["new_interfaces_branch"],
dataset_overrides["new_interfaces_local_dir"],
)
# load results, if existing -or- new from scratch
yaml_path = f'{dataset_overrides["new_interfaces_local_dir"]}.yaml'
if os.path.exists(yaml_path):
with open(yaml_path) as yaml_in:
results = yaml.safe_load(yaml_in)
else:
results = {}
repos = map(
os.path.basename,
glob(f'{updates_dir}/{dataset_overrides["glob_filter"]}'),
)
for repo in repos:
# get values
with open(f"{updates_dir}/{repo}/test.yaml") as yaml_test:
values = load_hyperpyyaml(yaml_test)
# for this testing, some fields need to exist; skip otherwise
if any(
[
entry not in values
for entry in [
"dataset",
"overrides",
"dataio",
"test_datasets",
"test_loader",
"performance",
"predicted",
]
]
):
continue
# skip if datasets is not given
if not dataset_overrides[f'{values["dataset"]}_data']:
continue
print(f"Run tests on: {repo}")
if repo not in results.keys():
results[repo] = {}
# Before refactoring
if "before" not in results[repo].keys():
results[repo]["before"] = test_performance(
repo,
values,
updates_dir=None,
run_opts=run_opts,
recipe_overrides=dataset_overrides[values["dataset"]],
)
# update
with open(yaml_path, "w") as yaml_out:
yaml.dump(results, yaml_out, default_flow_style=None)
# After refactoring
if (
"after" not in results[repo].keys()
and dataset_overrides["after"] is True
):
results[repo]["after"] = test_performance(
repo,
values,
run_opts=run_opts,
updates_dir=updates_dir,
recipe_overrides=dataset_overrides[values["dataset"]],
)
results[repo]["same"] = (
results[repo]["before"] == results[repo]["after"]
)
print(f'\tbefore: {results[repo]["before"]}')
print(f'\t after: {results[repo]["after"]}')
print(f'\t same: {results[repo]["same"]}')
# update
with open(yaml_path, "w") as yaml_out:
yaml.dump(results, yaml_out, default_flow_style=None)
# update
with open(yaml_path, "w") as yaml_out:
yaml.dump(results, yaml_out, default_flow_style=None)
| 19,156 | 36.489237 | 197 | py |
speechbrain | speechbrain-main/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import hyperpyyaml
sys.path.insert(-1, os.path.abspath("../"))
# -- Project information -----------------------------------------------------
project = "SpeechBrain"
copyright = "2021, SpeechBrain"
author = "SpeechBrain"
# The full version, including alpha/beta/rc tags
release = "0.5.0"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon",
"recommonmark",
]
# Napoleon settings
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = True
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# Intersphinx mapping:
intersphinx_mapping = {
"python": ("https://docs.python.org/", None),
"numpy": ("http://docs.scipy.org/doc/numpy/", None),
"torch": ("https://pytorch.org/docs/master/", None),
}
# AUTODOC:
autodoc_default_options = {}
# Autodoc mock extra dependencies:
autodoc_mock_imports = []
# Order of API items:
autodoc_member_order = "bysource"
autodoc_default_options = {"member-order": "bysource"}
# Don't show inherited docstrings:
autodoc_inherit_docstrings = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_apidoc_templates"]
# -- Better apidoc -----------------------------------------------------------
def run_apidoc(app):
"""Generage API documentation"""
import better_apidoc
better_apidoc.APP = app
better_apidoc.main(
[
"better-apidoc",
"-t",
"_apidoc_templates",
"--force",
"--no-toc",
"--separate",
"-o",
"API",
os.path.dirname(hyperpyyaml.__file__),
]
)
better_apidoc.main(
[
"better-apidoc",
"-t",
"_apidoc_templates",
"--force",
"--no-toc",
"--separate",
"-o",
"API",
os.path.join("../", "speechbrain"),
]
)
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# See https://sphinx-rtd-theme.readthedocs.io/en/stable/configuring.html
# for rtd theme options
html_theme_options = {
# Toc options
"collapse_navigation": False,
"sticky_navigation": True,
"navigation_depth": 4,
"includehidden": True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
source_suffix = {
".rst": "restructuredtext",
".txt": "markdown",
".md": "markdown",
}
def setup(app):
app.connect("builder-inited", run_apidoc)
| 4,241 | 26.192308 | 79 | py |
Dink-Net | Dink-Net-main/main.py | import os
import wandb
import argparse
from utils import *
from tqdm import tqdm
from model import DinkNet, DinkNet_dgl
def train(args=None):
# setup random seed
setup_seed(args.seed)
# load graph data
if args.dataset in ["cora", "citeseer"]:
x, adj, y, n, k, d = load_data(args.dataset)
elif args.dataset in ["amazon_photo"]:
x, adj, y, n, k, d = load_amazon_photo()
# label of discriminative task
disc_y = torch.cat((torch.ones(n), torch.zeros(n)), 0)
# model
if args.dataset in ["cora", "citeseer"]:
model = DinkNet(n_in=d, n_h=args.hid_units, n_cluster=k, tradeoff=args.tradeoff, activation=args.activate)
elif args.dataset in ["amazon_photo"]:
model = DinkNet_dgl(g=adj, n_in=d, n_h=args.hid_units, n_cluster=k,
tradeoff=args.tradeoff, n_layers=1, activation=args.activate)
# to device
x, adj, disc_y, model = map(lambda tmp: tmp.to(args.device), [x, adj, disc_y, model])
# load pre-trained model parameter
model.load_state_dict(torch.load("./models/DinkNet_{}.pt".format(args.dataset)))
# optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
best_acc = 0
# training
if args.wandb:
if not os.path.exists("./wandb/"):
os.makedirs("./wandb")
wandb.init(config=args,
project="ICML23_DinkNet",
name="baseline_{}".format(args.dataset),
dir="./wandb/",
job_type="training",
reinit=True)
for epoch in tqdm(range(args.epochs)):
model.train()
optimizer.zero_grad()
loss, sample_center_distance = model.cal_loss(x, adj, disc_y)
loss.backward()
optimizer.step()
# evaluation
if (epoch + 1) % args.eval_inter == 0:
model.eval()
y_hat = model.clustering(x, adj)
acc, nmi, ari, f1 = evaluation(y, y_hat)
if best_acc < acc:
best_acc = acc
torch.save(model.state_dict(), "./models/DinkNet_" + args.dataset + "_final.pt")
# logging
tqdm.write("epoch {:03d} | acc:{:.2f} | nmi:{:.2f} | ari:{:.2f} | f1:{:.2f}".format(epoch, acc, nmi, ari, f1))
if args.wandb:
wandb.log({"epoch": epoch, "loss": loss, "acc": acc, "nmi": nmi, "ari": ari, "f1": f1})
else:
if args.wandb:
wandb.log({"epoch": epoch, "loss": loss})
# testing
model.load_state_dict(torch.load("./models/DinkNet_" + args.dataset + "_final.pt"))
model.eval()
y_hat = model.clustering(x, adj)
acc, nmi, ari, f1 = evaluation(y, y_hat)
# logging
tqdm.write("test | acc:{:.2f} | nmi:{:.2f} | ari:{:.2f} | f1:{:.2f}".format(acc, nmi, ari, f1))
if __name__ == '__main__':
# hyper-parameter settings
parser = argparse.ArgumentParser("DinkNet")
# data
parser.add_argument("--seed", type=int, default=2023, help="random seed")
parser.add_argument("--device", type=str, default="cpu", help="training device")
parser.add_argument("--dataset", type=str, default="citeseer", help="dataset name")
# model
parser.add_argument("--tradeoff", type=float, default=1e-10, help="tradeoff parameter")
parser.add_argument("--activate", type=str, default="prelu", help="activation function")
parser.add_argument("--hid_units", type=int, default=1536, help="number of hidden units")
# training
parser.add_argument("--lr", type=float, default=1e-2, help="learning rate")
parser.add_argument("--wandb", action='store_true', default=False, help="enable wandb")
parser.add_argument("--epochs", type=int, default=200, help="number of epochs")
parser.add_argument("--eval_inter", type=int, default=10, help="interval of evaluation")
args = parser.parse_args()
train(args=args)
| 3,933 | 32.338983 | 122 | py |
Dink-Net | Dink-Net-main/utils.py | import dgl
import sys
import copy
import torch
import random
import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
from munkres import Munkres
from collections import Counter
from sklearn.metrics import accuracy_score, f1_score
from sklearn.metrics import adjusted_rand_score as ari_score
from sklearn.metrics.cluster import normalized_mutual_info_score as nmi_score
def evaluation(y_true, y_pred):
"""
evaluate the clustering performance.
args:
y_true: ground truth
y_pred: prediction
returns:
acc: accuracy
nmi: normalized mutual information
ari: adjust rand index
f1: f1 score
"""
nmi = nmi_score(y_true, y_pred, average_method='arithmetic')
ari = ari_score(y_true, y_pred)
y_true = y_true - np.min(y_true)
l1 = list(set(y_true))
num_class1 = len(l1)
l2 = list(set(y_pred))
num_class2 = len(l2)
ind = 0
if num_class1 != num_class2:
for i in l1:
if i in l2:
pass
else:
y_pred[ind] = i
ind += 1
l2 = list(set(y_pred))
num_class2 = len(l2)
if num_class1 != num_class2:
print('error')
return
cost = np.zeros((num_class1, num_class2), dtype=int)
for i, c1 in enumerate(l1):
mps = [i1 for i1, e1 in enumerate(y_true) if e1 == c1]
for j, c2 in enumerate(l2):
mps_d = [i1 for i1 in mps if y_pred[i1] == c2]
cost[i][j] = len(mps_d)
m = Munkres()
cost = cost.__neg__().tolist()
indexes = m.compute(cost)
new_predict = np.zeros(len(y_pred))
for i, c in enumerate(l1):
c2 = l2[indexes[i][1]]
ai = [ind for ind, elm in enumerate(y_pred) if elm == c2]
new_predict[ai] = c
acc = accuracy_score(y_true, new_predict)
f1 = f1_score(y_true, new_predict, average='macro')
acc, nmi, ari, f1 = map(lambda x: round(x * 100, 2), [acc, nmi, ari, f1])
return acc, nmi, ari, f1
def setup_seed(seed):
"""
fix the random seed.
args:
seed: the random seed
returns:
none
"""
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
return None
def aug_feature_dropout(input_feat, drop_rate=0.2):
"""
dropout features for augmentation.
args:
input_feat: input features
drop_rate: dropout rate
returns:
aug_input_feat: augmented features
"""
aug_input_feat = copy.deepcopy(input_feat).squeeze(0)
drop_feat_num = int(aug_input_feat.shape[1] * drop_rate)
drop_idx = random.sample([i for i in range(aug_input_feat.shape[1])], drop_feat_num)
aug_input_feat[:, drop_idx] = 0
aug_input_feat = aug_input_feat.unsqueeze(0)
return aug_input_feat
def aug_feature_shuffle(input_feat):
"""
shuffle the features for fake samples.
args:
input_feat: input features
returns:
aug_input_feat: augmented features
"""
fake_input_feat = input_feat[:, np.random.permutation(input_feat.shape[1]), :]
return fake_input_feat
def load_data(dataset_name):
"""
Load data for cora and citeseer datasets.
args:
dataset_name: dataset name
returns:
features: node attributes
sp_adj: sparse normalized adjacency matrix
labels: node labels
n: number of nodes
k: number of clusters
d: dimension number of node attributes
"""
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_name, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
filename = "data/ind.{}.test.index".format(dataset_name)
test_idx_reorder = []
for line in open(filename):
test_idx_reorder.append(int(line.strip()))
test_idx_range = np.sort(test_idx_reorder)
if dataset_name == 'citeseer':
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
features = preprocess_features(features)
adj = normalize_adj(adj + sp.eye(adj.shape[0]))
sp_adj = sparse_mx_to_torch_sparse_tensor(adj)
features = torch.FloatTensor(features).unsqueeze(0)
n = features.shape[1]
k = labels.shape[-1]
d = features.shape[-1]
labels = labels.argmax(1)
return features, sp_adj, labels, n, k, d
def load_amazon_photo():
"""
Load data for cora and amazon photo dataset.
args:
returns:
features: node attributes
g: dgl graph
labels: node labels
n: number of nodes
k: number of clusters
d: dimension number of node attributes
"""
adj, features, labels, train_mask, val_mask, test_mask = load_pitfall_dataset("photo")
adj = normalize_adj(adj + sp.eye(adj.shape[0])).todense()
features = preprocess_features(features)
src, dst = np.nonzero(adj)
g = dgl.graph((src, dst))
g.ndata['feat'] = torch.FloatTensor(features)
g.ndata['label'] = torch.LongTensor(labels)
g = g.remove_self_loop().add_self_loop()
n = g.ndata['feat'].shape[0]
k = labels.shape[-1]
d = features.shape[-1]
labels = labels.argmax(1)
return g.ndata['feat'], g, labels, n, k, d
def sparse_to_tuple(sparse_mx, insert_batch=False):
"""
convert sparse matrix to tuple representation.
args:
sparse_mx: sparse matirx
insert_batch: set insert_batch=True if you want to insert a batch dimension.
returns:
sparse_mx: tuple representation
"""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
if insert_batch:
coords = np.vstack((np.zeros(mx.row.shape[0]), mx.row, mx.col)).transpose()
values = mx.data
shape = (1,) + mx.shape
else:
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def normalize_adj(adj):
"""
symmetrically normalize adjacency matrix.
args:
adj: original adjacency matrix
returns:
norm_adj: normalized adjacency matrix
"""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
norm_adj = adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
return norm_adj
def preprocess_features(features):
"""
row-normalize node attributes
args:
features: input node attributes
returns:
normalized node attributes
"""
rowsum = np.array(features.sum(1))
rowsum[rowsum==0] = -np.inf
r_inv = np.power(rowsum, -1).flatten()
r_mat_inv = sp.diags(r_inv)
norm_features = r_mat_inv.dot(features).todense()
return norm_features
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""
convert a scipy sparse matrix to a torch sparse tensor.
args:
sparse_mx: sparse matrix
returns:
sparse_tensor: sparse tensor
"""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
sparse_tensor = torch.sparse.FloatTensor(indices, values, shape)
return sparse_tensor
class SparseGraph:
def __init__(self, adj_matrix, attr_matrix=None, labels=None,
node_names=None, attr_names=None, class_names=None, metadata=None):
if sp.isspmatrix(adj_matrix):
adj_matrix = adj_matrix.tocsr().astype(np.float32)
else:
raise ValueError("Adjacency matrix must be in sparse format (got {0} instead)"
.format(type(adj_matrix)))
if adj_matrix.shape[0] != adj_matrix.shape[1]:
raise ValueError("Dimensions of the adjacency matrix don't agree")
if attr_matrix is not None:
if sp.isspmatrix(attr_matrix):
attr_matrix = attr_matrix.tocsr().astype(np.float32)
elif isinstance(attr_matrix, np.ndarray):
attr_matrix = attr_matrix.astype(np.float32)
else:
raise ValueError("Attribute matrix must be a sp.spmatrix or a np.ndarray (got {0} instead)"
.format(type(attr_matrix)))
if attr_matrix.shape[0] != adj_matrix.shape[0]:
raise ValueError("Dimensions of the adjacency and attribute matrices don't agree")
if labels is not None:
if labels.shape[0] != adj_matrix.shape[0]:
raise ValueError("Dimensions of the adjacency matrix and the label vector don't agree")
if node_names is not None:
if len(node_names) != adj_matrix.shape[0]:
raise ValueError("Dimensions of the adjacency matrix and the node names don't agree")
if attr_names is not None:
if len(attr_names) != attr_matrix.shape[1]:
raise ValueError("Dimensions of the attribute matrix and the attribute names don't agree")
self.adj_matrix = adj_matrix
self.attr_matrix = attr_matrix
self.labels = labels
self.node_names = node_names
self.attr_names = attr_names
self.class_names = class_names
self.metadata = metadata
def num_nodes(self):
return self.adj_matrix.shape[0]
def num_edges(self):
if self.is_directed():
return int(self.adj_matrix.nnz)
else:
return int(self.adj_matrix.nnz / 2)
def get_neighbors(self, idx):
return self.adj_matrix[idx].indices
def is_directed(self):
return (self.adj_matrix != self.adj_matrix.T).sum() != 0
def to_undirected(self):
if self.is_weighted():
raise ValueError("Convert to unweighted graph first.")
else:
self.adj_matrix = self.adj_matrix + self.adj_matrix.T
self.adj_matrix[self.adj_matrix != 0] = 1
return self
def is_weighted(self):
return np.any(np.unique(self.adj_matrix[self.adj_matrix != 0].A1) != 1)
def to_unweighted(self):
self.adj_matrix.data = np.ones_like(self.adj_matrix.data)
return self
def standardize(self):
G = self.to_unweighted().to_undirected()
G = eliminate_self_loops(G)
G = largest_connected_components(G, 1)
return G
def unpack(self):
return self.adj_matrix, self.attr_matrix, self.labels
def load_npz_to_sparse_graph(data_path):
if not str(data_path).endswith('.npz'):
data_path = data_path.joinpath('.npz')
with np.load(data_path, allow_pickle=True) as loader:
loader = dict(loader)
adj_matrix = sp.csr_matrix((loader['adj_data'], loader['adj_indices'], loader['adj_indptr']),
shape=loader['adj_shape'])
if 'attr_data' in loader:
attr_matrix = sp.csr_matrix((loader['attr_data'], loader['attr_indices'], loader['attr_indptr']),
shape=loader['attr_shape'])
elif 'attr_matrix' in loader:
attr_matrix = loader['attr_matrix']
else:
attr_matrix = None
if 'labels_data' in loader:
labels = sp.csr_matrix((loader['labels_data'], loader['labels_indices'], loader['labels_indptr']),
shape=loader['labels_shape'])
elif 'labels' in loader:
labels = loader['labels']
else:
labels = None
node_names = loader.get('node_names')
attr_names = loader.get('attr_names')
class_names = loader.get('class_names')
metadata = loader.get('metadata')
return SparseGraph(adj_matrix, attr_matrix, labels, node_names, attr_names, class_names, metadata)
def eliminate_self_loops(G):
def remove_self_loop(A):
A = A.tolil()
A.setdiag(0)
A = A.tocsr()
A.eliminate_zeros()
return A
G.adj_matrix = remove_self_loop(G.adj_matrix)
return G
def remove_underrepresented_classes(g, train_examples_per_class, val_examples_per_class):
min_examples_per_class = train_examples_per_class + val_examples_per_class
examples_counter = Counter(g.labels)
keep_classes = set(class_ for class_, count in examples_counter.items() if count > min_examples_per_class)
keep_indices = [i for i in range(len(g.labels)) if g.labels[i] in keep_classes]
return create_subgraph(g, nodes_to_keep=keep_indices)
def create_subgraph(sparse_graph, _sentinel=None, nodes_to_remove=None, nodes_to_keep=None):
if _sentinel is not None:
raise ValueError("Only call `create_subgraph` with named arguments',"
" (nodes_to_remove=...) or (nodes_to_keep=...)")
if nodes_to_remove is None and nodes_to_keep is None:
raise ValueError("Either nodes_to_remove or nodes_to_keep must be provided.")
elif nodes_to_remove is not None and nodes_to_keep is not None:
raise ValueError("Only one of nodes_to_remove or nodes_to_keep must be provided.")
elif nodes_to_remove is not None:
nodes_to_keep = [i for i in range(sparse_graph.num_nodes()) if i not in nodes_to_remove]
elif nodes_to_keep is not None:
nodes_to_keep = sorted(nodes_to_keep)
else:
raise RuntimeError("This should never happen.")
sparse_graph.adj_matrix = sparse_graph.adj_matrix[nodes_to_keep][:, nodes_to_keep]
if sparse_graph.attr_matrix is not None:
sparse_graph.attr_matrix = sparse_graph.attr_matrix[nodes_to_keep]
if sparse_graph.labels is not None:
sparse_graph.labels = sparse_graph.labels[nodes_to_keep]
if sparse_graph.node_names is not None:
sparse_graph.node_names = sparse_graph.node_names[nodes_to_keep]
return sparse_graph
def binarize_labels(labels, sparse_output=False, return_classes=False):
from sklearn.preprocessing import MultiLabelBinarizer, LabelBinarizer
if hasattr(labels[0], '__iter__'): # labels[0] is iterable <=> multilabel format
binarizer = MultiLabelBinarizer(sparse_output=sparse_output)
else:
binarizer = LabelBinarizer(sparse_output=sparse_output)
label_matrix = binarizer.fit_transform(labels).astype(np.float32)
return (label_matrix, binarizer.classes_) if return_classes else label_matrix
def is_binary_bag_of_words(features):
features_coo = features.tocoo()
return all(single_entry == 1.0 for _, _, single_entry in zip(features_coo.row, features_coo.col, features_coo.data))
def to_binary_bag_of_words(features):
features_copy = features.tocsr()
features_copy.data[:] = 1.0
return features_copy
def sample_per_class(labels, num_examples_per_class, forbidden_indices=None):
num_samples, num_classes = labels.shape
sample_indices_per_class = {index: [] for index in range(num_classes)}
for class_index in range(num_classes):
for sample_index in range(num_samples):
if labels[sample_index, class_index] > 0.0:
if forbidden_indices is None or sample_index not in forbidden_indices:
sample_indices_per_class[class_index].append(sample_index)
return np.concatenate(
[np.random.choice(sample_indices_per_class[class_index], num_examples_per_class, replace=False)
for class_index in range(len(sample_indices_per_class))
])
def largest_connected_components(sparse_graph, n_components=1):
_, component_indices = sp.csgraph.connected_components(sparse_graph.adj_matrix)
component_sizes = np.bincount(component_indices)
components_to_keep = np.argsort(component_sizes)[::-1][:n_components] # reverse order to sort descending
nodes_to_keep = [
idx for (idx, component) in enumerate(component_indices) if component in components_to_keep
]
return create_subgraph(sparse_graph, nodes_to_keep=nodes_to_keep)
def load_pitfall_dataset(dataset_str,
standardize_graph=True,
train_ratio=0.1,
val_ratio=0.1):
file_map = {'computer':'amazon_electronics_computers.npz',
'photo':'amazon_electronics_photo.npz',
'phy': 'ms_academic_phy.npz',
'cs': 'ms_academic_cs.npz'}
file_name = file_map[dataset_str]
data_path = './data/' + file_name
dataset_graph = load_npz_to_sparse_graph(data_path)
if standardize_graph:
dataset_graph = dataset_graph.standardize()
else:
dataset_graph = dataset_graph.to_undirected()
dataset_graph = eliminate_self_loops(dataset_graph)
adj, features, labels = dataset_graph.unpack()
labels = binarize_labels(labels)
if not is_binary_bag_of_words(features):
features = to_binary_bag_of_words(features)
assert (adj != adj.T).nnz == 0
assert is_binary_bag_of_words(features), f"Non-binary node_features entry!"
idx_train, idx_val, idx_test = get_train_val_test_split(labels,train_examples_per_class = 30, val_examples_per_class=30)
return adj, features, labels, idx_train, idx_val, idx_test
def get_train_val_test_split(labels,
train_examples_per_class=30, val_examples_per_class=30,
test_examples_per_class=None,
train_size=None, val_size=None, test_size=None):
num_samples, num_classes = labels.shape
remaining_indices = list(range(num_samples))
if train_examples_per_class is not None:
train_indices = sample_per_class(labels, train_examples_per_class)
else:
train_indices = np.random.choice(remaining_indices, train_size, replace=False)
if val_examples_per_class is not None:
val_indices = sample_per_class(labels, val_examples_per_class, forbidden_indices=train_indices)
else:
remaining_indices = np.setdiff1d(remaining_indices, train_indices)
val_indices = np.random.choice(remaining_indices, val_size, replace=False)
forbidden_indices = np.concatenate((train_indices, val_indices))
if test_examples_per_class is not None:
test_indices = sample_per_class(labels, test_examples_per_class,
forbidden_indices=forbidden_indices)
elif test_size is not None:
remaining_indices = np.setdiff1d(remaining_indices, forbidden_indices)
test_indices = np.random.choice(remaining_indices, test_size, replace=False)
else:
test_indices = np.setdiff1d(remaining_indices, forbidden_indices)
assert len(set(train_indices)) == len(train_indices)
assert len(set(val_indices)) == len(val_indices)
assert len(set(test_indices)) == len(test_indices)
assert len(set(train_indices) - set(val_indices)) == len(set(train_indices))
assert len(set(train_indices) - set(test_indices)) == len(set(train_indices))
assert len(set(val_indices) - set(test_indices)) == len(set(val_indices))
if test_size is None and test_examples_per_class is None:
assert len(np.concatenate((train_indices, val_indices, test_indices))) == num_samples
if train_examples_per_class is not None:
train_labels = labels[train_indices, :]
train_sum = np.sum(train_labels, axis=0)
assert np.unique(train_sum).size == 1
if val_examples_per_class is not None:
val_labels = labels[val_indices, :]
val_sum = np.sum(val_labels, axis=0)
assert np.unique(val_sum).size == 1
if test_examples_per_class is not None:
test_labels = labels[test_indices, :]
test_sum = np.sum(test_labels, axis=0)
assert np.unique(test_sum).size == 1
return train_indices, val_indices, test_indices
| 21,067 | 34.7691 | 124 | py |
Dink-Net | Dink-Net-main/model.py | from utils import *
import torch.nn as nn
import dgl.function as fn
import torch.nn.functional as F
from dgl.nn.pytorch import GraphConv
# ------------------------from scratch------------------------
class GCN(nn.Module):
def __init__(self, in_ft, out_ft, act):
super(GCN, self).__init__()
self.fc = nn.Linear(in_ft, out_ft, bias=False)
self.act = nn.PReLU() if act == 'prelu' else act
self.bias = nn.Parameter(torch.FloatTensor(out_ft))
# init parameters
torch.nn.init.xavier_uniform_(self.fc.weight.data)
self.bias.data.fill_(0.0)
def forward(self, feat, adj, sparse=False):
h = self.fc(feat)
if sparse:
out = torch.unsqueeze(torch.spmm(adj, torch.squeeze(h, 0)), 0)
else:
out = torch.bmm(adj, h)
out += self.bias
return self.act(out)
class DinkNet(nn.Module):
def __init__(self, n_in, n_h, n_cluster, tradeoff=1e-10, activation="prelu"):
super(DinkNet, self).__init__()
self.cluster_center = torch.nn.Parameter(torch.Tensor(n_cluster, n_h))
self.gcn = GCN(n_in, n_h, activation)
self.lin = nn.Linear(n_h, n_h)
self.discrimination_loss = nn.BCEWithLogitsLoss()
self.tradeoff = tradeoff
def forward(self, x_1, x_2, adj, sparse):
h_1 = self.gcn(x_1, adj, sparse)
h_2 = self.gcn(x_2, adj, sparse)
z_1 = ((self.lin(h_1.squeeze(0))).sum(1))
z_2 = ((self.lin(h_2.squeeze(0))).sum(1))
logit = torch.cat((z_1, z_2), 0)
return logit
def embed(self, x, adj, power=5, sparse=True):
local_h = self.gcn(x, adj, sparse)
global_h = local_h.clone().squeeze(0)
for i in range(power):
global_h = adj @ global_h
global_h = global_h.unsqueeze(0)
local_h, global_h = map(lambda tmp: tmp.detach(), [local_h, global_h])
h = local_h + global_h
h = h.squeeze(0)
h = F.normalize(h, p=2, dim=-1)
return h
@staticmethod
def dis_fun(x, c):
xx = (x * x).sum(-1).reshape(-1, 1).repeat(1, c.shape[0])
cc = (c * c).sum(-1).reshape(1, -1).repeat(x.shape[0], 1)
xx_cc = xx + cc
xc = x @ c.T
distance = xx_cc - 2 * xc
return distance
@staticmethod
def no_diag(x, n):
x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
def cal_loss(self, x, adj, disc_y):
# augmentations
x_aug = aug_feature_dropout(x)
x_shuffle = aug_feature_shuffle(x_aug)
# discrimination loss
logit = self.forward(x_aug, x_shuffle, adj, sparse=True)
loss_disc = self.discrimination_loss(logit, disc_y)
# clustering loss
h = self.embed(x, adj, power=5, sparse=True)
sample_center_distance = self.dis_fun(h, self.cluster_center)
center_distance = self.dis_fun(self.cluster_center, self.cluster_center)
self.no_diag(center_distance, self.cluster_center.shape[0])
clustering_loss = sample_center_distance.mean() - center_distance.mean()
# tradeoff
loss = clustering_loss + self.tradeoff * loss_disc
return loss, sample_center_distance
def clustering(self, x, adj):
h = self.embed(x, adj, sparse=True)
sample_center_distance = self.dis_fun(h, self.cluster_center)
cluster_results = torch.argmin(sample_center_distance, dim=-1)
return cluster_results.cpu().detach().numpy()
# ------------------------from dgl------------------------
class Encoder(nn.Module):
def __init__(self, g, in_feats, n_hidden, n_layers, activation, gnn_encoder, k = 1):
super(Encoder, self).__init__()
self.g = g
self.gnn_encoder = gnn_encoder
activation = nn.PReLU(n_hidden) if activation == 'prelu' else activation
if gnn_encoder == 'gcn':
self.conv = GCN_dgl(g, in_feats, n_hidden, n_layers, activation)
elif gnn_encoder == 'sgc':
self.conv = SGConv(in_feats, n_hidden, k=10, cached=True)
def forward(self, features, corrupt=False):
if corrupt:
perm = torch.randperm(self.g.number_of_nodes())
features = features[perm]
if self.gnn_encoder == 'gcn':
features = self.conv(features)
elif self.gnn_encoder == 'sgc':
features = self.conv(self.g, features)
return features
class GCN_dgl(nn.Module):
def __init__(self, g, n_in, n_h, n_layers, activation, bias=True, weight=True):
super(GCN_dgl, self).__init__()
self.g = g
self.layers = nn.ModuleList()
# input layer
self.layers.append(GraphConv(n_in, n_h, weight=weight, bias=bias, activation=activation))
# hidden layers
for i in range(n_layers - 1):
self.layers.append(GraphConv(n_h, n_h, weight=weight, bias=bias, activation=activation))
def forward(self, feat):
h = feat.squeeze(0)
g = self.g.to(h.device)
for i, layer in enumerate(self.layers):
h = layer(g, h)
return h
class DinkNet_dgl(nn.Module):
def __init__(self, g, n_in, n_h, n_cluster, tradeoff, n_layers, activation, proj_layers=1, gnn_encoder='gcn', n_hop=10):
super(DinkNet_dgl, self).__init__()
self.cluster_center = torch.nn.Parameter(torch.Tensor(n_cluster, n_h))
self.encoder = Encoder(g, n_in, n_h, n_layers, activation, gnn_encoder, n_hop)
self.mlp = torch.nn.ModuleList()
for i in range(proj_layers):
self.mlp.append(nn.Linear(n_h, n_h))
self.discrimination_loss = nn.BCEWithLogitsLoss()
self.tradeoff = tradeoff
def forward(self, x):
z_1 = self.encoder(x, corrupt=False)
z_2 = self.encoder(x, corrupt=True)
for i, lin in enumerate(self.mlp):
z_1 = lin(z_1)
z_2 = lin(z_2)
logit = torch.cat((z_1.sum(1), z_2.sum(1)), 0)
return logit
def embed(self, x, g, power=10):
local_h = self.encoder(x, corrupt=False)
feat = local_h.clone().squeeze(0)
norm = torch.pow(g.in_degrees().float().clamp(min=1), -0.5).unsqueeze(1).to(local_h.device)
for i in range(power):
feat = feat * norm
g.ndata['h2'] = feat
g.update_all(fn.copy_u('h2', 'm'), fn.sum('m', 'h2'))
feat = g.ndata.pop('h2')
feat = feat * norm
global_h = feat.unsqueeze(0)
local_h, global_h = map(lambda tmp: tmp.detach(), [local_h, global_h])
h = local_h + global_h
h = h.squeeze(0)
h = F.normalize(h, p=2, dim=-1)
return h
@staticmethod
def dis_fun(x, c):
xx = (x * x).sum(-1).reshape(-1, 1).repeat(1, c.shape[0])
cc = (c * c).sum(-1).reshape(1, -1).repeat(x.shape[0], 1)
xx_cc = xx + cc
xc = x @ c.T
distance = xx_cc - 2 * xc
return distance
@staticmethod
def no_diag(x, n):
x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
def cal_loss(self, x, g, disc_y):
# augmentations
x_aug = aug_feature_dropout(x).squeeze(0)
# discrimination loss
logit = self.forward(x_aug)
loss_disc = self.discrimination_loss(logit, disc_y)
# clustering loss
h = self.embed(x, g, power=10)
sample_center_distance = self.dis_fun(h, self.cluster_center)
center_distance = self.dis_fun(self.cluster_center, self.cluster_center)
self.no_diag(center_distance, self.cluster_center.shape[0])
clustering_loss = sample_center_distance.mean() - center_distance.mean()
# tradeoff
loss = clustering_loss + self.tradeoff * loss_disc
return loss, sample_center_distance
def clustering(self, x, adj):
h = self.embed(x, adj, power=10)
sample_center_distance = self.dis_fun(h, self.cluster_center)
cluster_results = torch.argmin(sample_center_distance, dim=-1)
return cluster_results.cpu().detach().numpy()
| 8,030 | 34.852679 | 124 | py |
ice-ice | ice-ice/legacy.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import click
import pickle
import re
import copy
import numpy as np
import torch
import dnnlib
from torch_utils import misc
#----------------------------------------------------------------------------
def load_network_pkl(f, force_fp16=False):
data = _LegacyUnpickler(f).load()
# Legacy TensorFlow pickle => convert.
if isinstance(data, tuple) and len(data) == 3 and all(isinstance(net, _TFNetworkStub) for net in data):
tf_G, tf_D, tf_Gs = data
G = convert_tf_generator(tf_G)
D = convert_tf_discriminator(tf_D)
G_ema = convert_tf_generator(tf_Gs)
data = dict(G=G, D=D, G_ema=G_ema)
# Add missing fields.
if 'training_set_kwargs' not in data:
data['training_set_kwargs'] = None
if 'augment_pipe' not in data:
data['augment_pipe'] = None
# Validate contents.
assert isinstance(data['G'], torch.nn.Module)
assert isinstance(data['D'], torch.nn.Module)
assert isinstance(data['G_ema'], torch.nn.Module)
assert isinstance(data['training_set_kwargs'], (dict, type(None)))
assert isinstance(data['augment_pipe'], (torch.nn.Module, type(None)))
# Force FP16.
if force_fp16:
for key in ['G', 'D', 'G_ema']:
old = data[key]
kwargs = copy.deepcopy(old.init_kwargs)
if key.startswith('G'):
kwargs.synthesis_kwargs = dnnlib.EasyDict(kwargs.get('synthesis_kwargs', {}))
kwargs.synthesis_kwargs.num_fp16_res = 4
kwargs.synthesis_kwargs.conv_clamp = 256
if key.startswith('D'):
kwargs.num_fp16_res = 4
kwargs.conv_clamp = 256
if kwargs != old.init_kwargs:
new = type(old)(**kwargs).eval().requires_grad_(False)
misc.copy_params_and_buffers(old, new, require_all=True)
data[key] = new
return data
#----------------------------------------------------------------------------
class _TFNetworkStub(dnnlib.EasyDict):
pass
class _LegacyUnpickler(pickle.Unpickler):
def find_class(self, module, name):
if module == 'dnnlib.tflib.network' and name == 'Network':
return _TFNetworkStub
return super().find_class(module, name)
#----------------------------------------------------------------------------
def _collect_tf_params(tf_net):
# pylint: disable=protected-access
tf_params = dict()
def recurse(prefix, tf_net):
for name, value in tf_net.variables:
tf_params[prefix + name] = value
for name, comp in tf_net.components.items():
recurse(prefix + name + '/', comp)
recurse('', tf_net)
return tf_params
#----------------------------------------------------------------------------
def _populate_module_params(module, *patterns):
for name, tensor in misc.named_params_and_buffers(module):
found = False
value = None
for pattern, value_fn in zip(patterns[0::2], patterns[1::2]):
match = re.fullmatch(pattern, name)
if match:
found = True
if value_fn is not None:
value = value_fn(*match.groups())
break
try:
assert found
if value is not None:
tensor.copy_(torch.from_numpy(np.array(value)))
except:
print(name, list(tensor.shape))
raise
#----------------------------------------------------------------------------
def convert_tf_generator(tf_G):
if tf_G.version < 4:
raise ValueError('TensorFlow pickle version too low')
# Collect kwargs.
tf_kwargs = tf_G.static_kwargs
known_kwargs = set()
def kwarg(tf_name, default=None, none=None):
known_kwargs.add(tf_name)
val = tf_kwargs.get(tf_name, default)
return val if val is not None else none
# Convert kwargs.
kwargs = dnnlib.EasyDict(
z_dim = kwarg('latent_size', 512),
c_dim = kwarg('label_size', 0),
w_dim = kwarg('dlatent_size', 512),
img_resolution = kwarg('resolution', 1024),
img_channels = kwarg('num_channels', 3),
mapping_kwargs = dnnlib.EasyDict(
num_layers = kwarg('mapping_layers', 8),
embed_features = kwarg('label_fmaps', None),
layer_features = kwarg('mapping_fmaps', None),
activation = kwarg('mapping_nonlinearity', 'lrelu'),
lr_multiplier = kwarg('mapping_lrmul', 0.01),
w_avg_beta = kwarg('w_avg_beta', 0.995, none=1),
),
synthesis_kwargs = dnnlib.EasyDict(
channel_base = kwarg('fmap_base', 16384) * 2,
channel_max = kwarg('fmap_max', 512),
num_fp16_res = kwarg('num_fp16_res', 0),
conv_clamp = kwarg('conv_clamp', None),
architecture = kwarg('architecture', 'skip'),
resample_filter = kwarg('resample_kernel', [1,3,3,1]),
use_noise = kwarg('use_noise', True),
activation = kwarg('nonlinearity', 'lrelu'),
),
)
# Check for unknown kwargs.
kwarg('truncation_psi')
kwarg('truncation_cutoff')
kwarg('style_mixing_prob')
kwarg('structure')
unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs)
if len(unknown_kwargs) > 0:
raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0])
# Collect params.
tf_params = _collect_tf_params(tf_G)
for name, value in list(tf_params.items()):
match = re.fullmatch(r'ToRGB_lod(\d+)/(.*)', name)
if match:
r = kwargs.img_resolution // (2 ** int(match.group(1)))
tf_params[f'{r}x{r}/ToRGB/{match.group(2)}'] = value
kwargs.synthesis.kwargs.architecture = 'orig'
#for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}')
# Convert params.
from training import networks
G = networks.Generator(**kwargs).eval().requires_grad_(False)
# pylint: disable=unnecessary-lambda
_populate_module_params(G,
r'mapping\.w_avg', lambda: tf_params[f'dlatent_avg'],
r'mapping\.embed\.weight', lambda: tf_params[f'mapping/LabelEmbed/weight'].transpose(),
r'mapping\.embed\.bias', lambda: tf_params[f'mapping/LabelEmbed/bias'],
r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'mapping/Dense{i}/weight'].transpose(),
r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'mapping/Dense{i}/bias'],
r'synthesis\.b4\.const', lambda: tf_params[f'synthesis/4x4/Const/const'][0],
r'synthesis\.b4\.conv1\.weight', lambda: tf_params[f'synthesis/4x4/Conv/weight'].transpose(3, 2, 0, 1),
r'synthesis\.b4\.conv1\.bias', lambda: tf_params[f'synthesis/4x4/Conv/bias'],
r'synthesis\.b4\.conv1\.noise_const', lambda: tf_params[f'synthesis/noise0'][0, 0],
r'synthesis\.b4\.conv1\.noise_strength', lambda: tf_params[f'synthesis/4x4/Conv/noise_strength'],
r'synthesis\.b4\.conv1\.affine\.weight', lambda: tf_params[f'synthesis/4x4/Conv/mod_weight'].transpose(),
r'synthesis\.b4\.conv1\.affine\.bias', lambda: tf_params[f'synthesis/4x4/Conv/mod_bias'] + 1,
r'synthesis\.b(\d+)\.conv0\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/weight'][::-1, ::-1].transpose(3, 2, 0, 1),
r'synthesis\.b(\d+)\.conv0\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/bias'],
r'synthesis\.b(\d+)\.conv0\.noise_const', lambda r: tf_params[f'synthesis/noise{int(np.log2(int(r)))*2-5}'][0, 0],
r'synthesis\.b(\d+)\.conv0\.noise_strength', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/noise_strength'],
r'synthesis\.b(\d+)\.conv0\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_weight'].transpose(),
r'synthesis\.b(\d+)\.conv0\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_bias'] + 1,
r'synthesis\.b(\d+)\.conv1\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/weight'].transpose(3, 2, 0, 1),
r'synthesis\.b(\d+)\.conv1\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/bias'],
r'synthesis\.b(\d+)\.conv1\.noise_const', lambda r: tf_params[f'synthesis/noise{int(np.log2(int(r)))*2-4}'][0, 0],
r'synthesis\.b(\d+)\.conv1\.noise_strength', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/noise_strength'],
r'synthesis\.b(\d+)\.conv1\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_weight'].transpose(),
r'synthesis\.b(\d+)\.conv1\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_bias'] + 1,
r'synthesis\.b(\d+)\.torgb\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/weight'].transpose(3, 2, 0, 1),
r'synthesis\.b(\d+)\.torgb\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/bias'],
r'synthesis\.b(\d+)\.torgb\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_weight'].transpose(),
r'synthesis\.b(\d+)\.torgb\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_bias'] + 1,
r'synthesis\.b(\d+)\.skip\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Skip/weight'][::-1, ::-1].transpose(3, 2, 0, 1),
r'.*\.resample_filter', None,
)
return G
#----------------------------------------------------------------------------
def convert_tf_discriminator(tf_D):
if tf_D.version < 4:
raise ValueError('TensorFlow pickle version too low')
# Collect kwargs.
tf_kwargs = tf_D.static_kwargs
known_kwargs = set()
def kwarg(tf_name, default=None):
known_kwargs.add(tf_name)
return tf_kwargs.get(tf_name, default)
# Convert kwargs.
kwargs = dnnlib.EasyDict(
c_dim = kwarg('label_size', 0),
img_resolution = kwarg('resolution', 1024),
img_channels = kwarg('num_channels', 3),
architecture = kwarg('architecture', 'resnet'),
channel_base = kwarg('fmap_base', 16384) * 2,
channel_max = kwarg('fmap_max', 512),
num_fp16_res = kwarg('num_fp16_res', 0),
conv_clamp = kwarg('conv_clamp', None),
cmap_dim = kwarg('mapping_fmaps', None),
block_kwargs = dnnlib.EasyDict(
activation = kwarg('nonlinearity', 'lrelu'),
resample_filter = kwarg('resample_kernel', [1,3,3,1]),
freeze_layers = kwarg('freeze_layers', 0),
),
mapping_kwargs = dnnlib.EasyDict(
num_layers = kwarg('mapping_layers', 0),
embed_features = kwarg('mapping_fmaps', None),
layer_features = kwarg('mapping_fmaps', None),
activation = kwarg('nonlinearity', 'lrelu'),
lr_multiplier = kwarg('mapping_lrmul', 0.1),
),
epilogue_kwargs = dnnlib.EasyDict(
mbstd_group_size = kwarg('mbstd_group_size', None),
mbstd_num_channels = kwarg('mbstd_num_features', 1),
activation = kwarg('nonlinearity', 'lrelu'),
),
)
# Check for unknown kwargs.
kwarg('structure')
unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs)
if len(unknown_kwargs) > 0:
raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0])
# Collect params.
tf_params = _collect_tf_params(tf_D)
for name, value in list(tf_params.items()):
match = re.fullmatch(r'FromRGB_lod(\d+)/(.*)', name)
if match:
r = kwargs.img_resolution // (2 ** int(match.group(1)))
tf_params[f'{r}x{r}/FromRGB/{match.group(2)}'] = value
kwargs.architecture = 'orig'
#for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}')
# Convert params.
from training import networks
D = networks.Discriminator(**kwargs).eval().requires_grad_(False)
# pylint: disable=unnecessary-lambda
_populate_module_params(D,
r'b(\d+)\.fromrgb\.weight', lambda r: tf_params[f'{r}x{r}/FromRGB/weight'].transpose(3, 2, 0, 1),
r'b(\d+)\.fromrgb\.bias', lambda r: tf_params[f'{r}x{r}/FromRGB/bias'],
r'b(\d+)\.conv(\d+)\.weight', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/weight'].transpose(3, 2, 0, 1),
r'b(\d+)\.conv(\d+)\.bias', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/bias'],
r'b(\d+)\.skip\.weight', lambda r: tf_params[f'{r}x{r}/Skip/weight'].transpose(3, 2, 0, 1),
r'mapping\.embed\.weight', lambda: tf_params[f'LabelEmbed/weight'].transpose(),
r'mapping\.embed\.bias', lambda: tf_params[f'LabelEmbed/bias'],
r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'Mapping{i}/weight'].transpose(),
r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'Mapping{i}/bias'],
r'b4\.conv\.weight', lambda: tf_params[f'4x4/Conv/weight'].transpose(3, 2, 0, 1),
r'b4\.conv\.bias', lambda: tf_params[f'4x4/Conv/bias'],
r'b4\.fc\.weight', lambda: tf_params[f'4x4/Dense0/weight'].transpose(),
r'b4\.fc\.bias', lambda: tf_params[f'4x4/Dense0/bias'],
r'b4\.out\.weight', lambda: tf_params[f'Output/weight'].transpose(),
r'b4\.out\.bias', lambda: tf_params[f'Output/bias'],
r'.*\.resample_filter', None,
)
return D
#----------------------------------------------------------------------------
@click.command()
@click.option('--source', help='Input pickle', required=True, metavar='PATH')
@click.option('--dest', help='Output pickle', required=True, metavar='PATH')
@click.option('--force-fp16', help='Force the networks to use FP16', type=bool, default=False, metavar='BOOL', show_default=True)
def convert_network_pickle(source, dest, force_fp16):
"""Convert legacy network pickle into the native PyTorch format.
The tool is able to load the main network configurations exported using the TensorFlow version of StyleGAN2 or StyleGAN2-ADA.
It does not support e.g. StyleGAN2-ADA comparison methods, StyleGAN2 configs A-D, or StyleGAN1 networks.
Example:
\b
python legacy.py \\
--source=https://nvlabs-fi-cdn.nvidia.com/stylegan2/networks/stylegan2-cat-config-f.pkl \\
--dest=stylegan2-cat-config-f.pkl
"""
print(f'Loading "{source}"...')
with dnnlib.util.open_url(source) as f:
data = load_network_pkl(f, force_fp16=force_fp16)
print(f'Saving "{dest}"...')
with open(dest, 'wb') as f:
pickle.dump(data, f)
print('Done.')
#----------------------------------------------------------------------------
if __name__ == "__main__":
convert_network_pickle() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 16,502 | 50.411215 | 154 | py |
ice-ice | ice-ice/style_mixing.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Generate style mixing image matrix using pretrained network pickle."""
import os
import re
from typing import List
import click
import dnnlib
import numpy as np
import PIL.Image
import torch
import legacy
#----------------------------------------------------------------------------
def num_range(s: str) -> List[int]:
'''Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints.'''
range_re = re.compile(r'^(\d+)-(\d+)$')
m = range_re.match(s)
if m:
return list(range(int(m.group(1)), int(m.group(2))+1))
vals = s.split(',')
return [int(x) for x in vals]
#----------------------------------------------------------------------------
@click.command()
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
@click.option('--rows', 'row_seeds', type=num_range, help='Random seeds to use for image rows', required=True)
@click.option('--cols', 'col_seeds', type=num_range, help='Random seeds to use for image columns', required=True)
@click.option('--styles', 'col_styles', type=num_range, help='Style layer range', default='0-6', show_default=True)
@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
@click.option('--outdir', type=str, required=True)
def generate_style_mix(
network_pkl: str,
row_seeds: List[int],
col_seeds: List[int],
col_styles: List[int],
truncation_psi: float,
noise_mode: str,
outdir: str
):
"""Generate images using pretrained network pickle.
Examples:
\b
python style_mixing.py --outdir=out --rows=85,100,75,458,1500 --cols=55,821,1789,293 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
"""
print('Loading networks from "%s"...' % network_pkl)
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
os.makedirs(outdir, exist_ok=True)
print('Generating W vectors...')
all_seeds = list(set(row_seeds + col_seeds))
all_z = np.stack([np.random.RandomState(seed).randn(G.z_dim) for seed in all_seeds])
all_w = G.mapping(torch.from_numpy(all_z).to(device), None)
w_avg = G.mapping.w_avg
all_w = w_avg + (all_w - w_avg) * truncation_psi
w_dict = {seed: w for seed, w in zip(all_seeds, list(all_w))}
print('Generating images...')
all_images = G.synthesis(all_w, noise_mode=noise_mode)
all_images = (all_images.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).cpu().numpy()
image_dict = {(seed, seed): image for seed, image in zip(all_seeds, list(all_images))}
print('Generating style-mixed images...')
for row_seed in row_seeds:
for col_seed in col_seeds:
w = w_dict[row_seed].clone()
w[col_styles] = w_dict[col_seed][col_styles]
image = G.synthesis(w[np.newaxis], noise_mode=noise_mode)
image = (image.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
image_dict[(row_seed, col_seed)] = image[0].cpu().numpy()
print('Saving images...')
os.makedirs(outdir, exist_ok=True)
for (row_seed, col_seed), image in image_dict.items():
PIL.Image.fromarray(image, 'RGB').save(f'{outdir}/{row_seed}-{col_seed}.png')
print('Saving image grid...')
W = G.img_resolution
H = G.img_resolution
canvas = PIL.Image.new('RGB', (W * (len(col_seeds) + 1), H * (len(row_seeds) + 1)), 'black')
for row_idx, row_seed in enumerate([0] + row_seeds):
for col_idx, col_seed in enumerate([0] + col_seeds):
if row_idx == 0 and col_idx == 0:
continue
key = (row_seed, col_seed)
if row_idx == 0:
key = (col_seed, col_seed)
if col_idx == 0:
key = (row_seed, row_seed)
canvas.paste(PIL.Image.fromarray(image_dict[key], 'RGB'), (W * col_idx, H * row_idx))
canvas.save(f'{outdir}/grid.png')
#----------------------------------------------------------------------------
if __name__ == "__main__":
generate_style_mix() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 4,891 | 40.109244 | 132 | py |
ice-ice | ice-ice/projector.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Project given image to the latent space of pretrained network pickle."""
import copy
import os
from time import perf_counter
import click
import imageio
import numpy as np
import PIL.Image
import torch
import torch.nn.functional as F
import dnnlib
import legacy
def project(
G,
target: torch.Tensor, # [C,H,W] and dynamic range [0,255], W & H must match G output resolution
*,
num_steps = 1000,
w_avg_samples = 10000,
initial_learning_rate = 0.1,
initial_noise_factor = 0.05,
lr_rampdown_length = 0.25,
lr_rampup_length = 0.05,
noise_ramp_length = 0.75,
regularize_noise_weight = 1e5,
verbose = False,
device: torch.device
):
assert target.shape == (G.img_channels, G.img_resolution, G.img_resolution)
def logprint(*args):
if verbose:
print(*args)
G = copy.deepcopy(G).eval().requires_grad_(False).to(device) # type: ignore
# Compute w stats.
logprint(f'Computing W midpoint and stddev using {w_avg_samples} samples...')
z_samples = np.random.RandomState(123).randn(w_avg_samples, G.z_dim)
w_samples = G.mapping(torch.from_numpy(z_samples).to(device), None) # [N, L, C]
w_samples = w_samples[:, :1, :].cpu().numpy().astype(np.float32) # [N, 1, C]
w_avg = np.mean(w_samples, axis=0, keepdims=True) # [1, 1, C]
w_std = (np.sum((w_samples - w_avg) ** 2) / w_avg_samples) ** 0.5
# Setup noise inputs.
noise_bufs = { name: buf for (name, buf) in G.synthesis.named_buffers() if 'noise_const' in name }
# Load VGG16 feature detector.
url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
with dnnlib.util.open_url(url) as f:
vgg16 = torch.jit.load(f).eval().to(device)
# Features for target image.
target_images = target.unsqueeze(0).to(device).to(torch.float32)
if target_images.shape[2] > 256:
target_images = F.interpolate(target_images, size=(256, 256), mode='area')
target_features = vgg16(target_images, resize_images=False, return_lpips=True)
w_opt = torch.tensor(w_avg, dtype=torch.float32, device=device, requires_grad=True) # pylint: disable=not-callable
w_out = torch.zeros([num_steps] + list(w_opt.shape[1:]), dtype=torch.float32, device=device)
optimizer = torch.optim.Adam([w_opt] + list(noise_bufs.values()), betas=(0.9, 0.999), lr=initial_learning_rate)
# Init noise.
for buf in noise_bufs.values():
buf[:] = torch.randn_like(buf)
buf.requires_grad = True
for step in range(num_steps):
# Learning rate schedule.
t = step / num_steps
w_noise_scale = w_std * initial_noise_factor * max(0.0, 1.0 - t / noise_ramp_length) ** 2
lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length)
lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi)
lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length)
lr = initial_learning_rate * lr_ramp
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Synth images from opt_w.
w_noise = torch.randn_like(w_opt) * w_noise_scale
ws = (w_opt + w_noise).repeat([1, G.mapping.num_ws, 1])
synth_images = G.synthesis(ws, noise_mode='const')
# Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images.
synth_images = (synth_images + 1) * (255/2)
if synth_images.shape[2] > 256:
synth_images = F.interpolate(synth_images, size=(256, 256), mode='area')
# Features for synth images.
synth_features = vgg16(synth_images, resize_images=False, return_lpips=True)
dist = (target_features - synth_features).square().sum()
# Noise regularization.
reg_loss = 0.0
for v in noise_bufs.values():
noise = v[None,None,:,:] # must be [1,1,H,W] for F.avg_pool2d()
while True:
reg_loss += (noise*torch.roll(noise, shifts=1, dims=3)).mean()**2
reg_loss += (noise*torch.roll(noise, shifts=1, dims=2)).mean()**2
if noise.shape[2] <= 8:
break
noise = F.avg_pool2d(noise, kernel_size=2)
loss = dist + reg_loss * regularize_noise_weight
# Step
optimizer.zero_grad(set_to_none=True)
loss.backward()
optimizer.step()
logprint(f'step {step+1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}')
# Save projected W for each optimization step.
w_out[step] = w_opt.detach()[0]
# Normalize noise.
with torch.no_grad():
for buf in noise_bufs.values():
buf -= buf.mean()
buf *= buf.square().mean().rsqrt()
return w_out.repeat([1, G.mapping.num_ws, 1])
#----------------------------------------------------------------------------
@click.command()
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
@click.option('--target', 'target_fname', help='Target image file to project to', required=True, metavar='FILE')
@click.option('--num-steps', help='Number of optimization steps', type=int, default=1000, show_default=True)
@click.option('--seed', help='Random seed', type=int, default=303, show_default=True)
@click.option('--save-video', help='Save an mp4 video of optimization progress', type=bool, default=True, show_default=True)
@click.option('--outdir', help='Where to save the output images', required=True, metavar='DIR')
def run_projection(
network_pkl: str,
target_fname: str,
outdir: str,
save_video: bool,
seed: int,
num_steps: int
):
"""Project given image to the latent space of pretrained network pickle.
Examples:
\b
python projector.py --outdir=out --target=~/mytargetimg.png \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/ffhq.pkl
"""
np.random.seed(seed)
torch.manual_seed(seed)
# Load networks.
print('Loading networks from "%s"...' % network_pkl)
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as fp:
G = legacy.load_network_pkl(fp)['G_ema'].requires_grad_(False).to(device) # type: ignore
# Load target image.
target_pil = PIL.Image.open(target_fname).convert('RGB')
w, h = target_pil.size
s = min(w, h)
target_pil = target_pil.crop(((w - s) // 2, (h - s) // 2, (w + s) // 2, (h + s) // 2))
target_pil = target_pil.resize((G.img_resolution, G.img_resolution), PIL.Image.LANCZOS)
target_uint8 = np.array(target_pil, dtype=np.uint8)
# Optimize projection.
start_time = perf_counter()
projected_w_steps = project(
G,
target=torch.tensor(target_uint8.transpose([2, 0, 1]), device=device), # pylint: disable=not-callable
num_steps=num_steps,
device=device,
verbose=True
)
print (f'Elapsed: {(perf_counter()-start_time):.1f} s')
# Render debug output: optional video and projected image and W vector.
os.makedirs(outdir, exist_ok=True)
if save_video:
video = imageio.get_writer(f'{outdir}/proj.mp4', mode='I', fps=10, codec='libx264', bitrate='16M')
print (f'Saving optimization progress video "{outdir}/proj.mp4"')
for projected_w in projected_w_steps:
synth_image = G.synthesis(projected_w.unsqueeze(0), noise_mode='const')
synth_image = (synth_image + 1) * (255/2)
synth_image = synth_image.permute(0, 2, 3, 1).clamp(0, 255).to(torch.uint8)[0].cpu().numpy()
video.append_data(np.concatenate([target_uint8, synth_image], axis=1))
video.close()
# Save final projected frame and W vector.
target_pil.save(f'{outdir}/target.png')
projected_w = projected_w_steps[-1]
synth_image = G.synthesis(projected_w.unsqueeze(0), noise_mode='const')
synth_image = (synth_image + 1) * (255/2)
synth_image = synth_image.permute(0, 2, 3, 1).clamp(0, 255).to(torch.uint8)[0].cpu().numpy()
PIL.Image.fromarray(synth_image, 'RGB').save(f'{outdir}/proj.png')
np.savez(f'{outdir}/projected_w.npz', w=projected_w.unsqueeze(0).cpu().numpy())
#----------------------------------------------------------------------------
if __name__ == "__main__":
run_projection() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 8,990 | 41.211268 | 136 | py |
ice-ice | ice-ice/generate.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Generate images using pretrained network pickle."""
import os
import re
from typing import List, Optional
import click
import dnnlib
import numpy as np
import PIL.Image
import torch
import legacy
#----------------------------------------------------------------------------
def num_range(s: str) -> List[int]:
'''Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints.'''
range_re = re.compile(r'^(\d+)-(\d+)$')
m = range_re.match(s)
if m:
return list(range(int(m.group(1)), int(m.group(2))+1))
vals = s.split(',')
return [int(x) for x in vals]
#----------------------------------------------------------------------------
@click.command()
@click.pass_context
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
@click.option('--seeds', type=num_range, help='List of random seeds')
@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
@click.option('--class', 'class_idx', type=int, help='Class label (unconditional if not specified)')
@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
@click.option('--projected-w', help='Projection result file', type=str, metavar='FILE')
@click.option('--outdir', help='Where to save the output images', type=str, required=True, metavar='DIR')
def generate_images(
ctx: click.Context,
network_pkl: str,
seeds: Optional[List[int]],
truncation_psi: float,
noise_mode: str,
outdir: str,
class_idx: Optional[int],
projected_w: Optional[str]
):
"""Generate images using pretrained network pickle.
Examples:
\b
# Generate curated MetFaces images without truncation (Fig.10 left)
python generate.py --outdir=out --trunc=1 --seeds=85,265,297,849 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
\b
# Generate uncurated MetFaces images with truncation (Fig.12 upper left)
python generate.py --outdir=out --trunc=0.7 --seeds=600-605 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
\b
# Generate class conditional CIFAR-10 images (Fig.17 left, Car)
python generate.py --outdir=out --seeds=0-35 --class=1 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/cifar10.pkl
\b
# Render an image from projected W
python generate.py --outdir=out --projected_w=projected_w.npz \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
"""
print('Loading networks from "%s"...' % network_pkl)
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
os.makedirs(outdir, exist_ok=True)
# Synthesize the result of a W projection.
if projected_w is not None:
if seeds is not None:
print ('warn: --seeds is ignored when using --projected-w')
print(f'Generating images from projected W "{projected_w}"')
ws = np.load(projected_w)['w']
ws = torch.tensor(ws, device=device) # pylint: disable=not-callable
assert ws.shape[1:] == (G.num_ws, G.w_dim)
for idx, w in enumerate(ws):
img = G.synthesis(w.unsqueeze(0), noise_mode=noise_mode)
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
img = PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{outdir}/proj{idx:02d}.png')
return
if seeds is None:
ctx.fail('--seeds option is required when not using --projected-w')
# Labels.
label = torch.zeros([1, G.c_dim], device=device)
if G.c_dim != 0:
if class_idx is None:
ctx.fail('Must specify class label with --class when using a conditional network')
label[:, class_idx] = 1
else:
if class_idx is not None:
print ('warn: --class=lbl ignored when running on an unconditional network')
# Generate images.
for seed_idx, seed in enumerate(seeds):
print('Generating image for seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
z = torch.from_numpy(np.random.RandomState(seed).randn(1, G.z_dim)).to(device)
img = G(z, label, truncation_psi=truncation_psi, noise_mode=noise_mode)
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{outdir}/seed{seed:04d}.png')
#----------------------------------------------------------------------------
if __name__ == "__main__":
generate_images() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 5,338 | 40.069231 | 132 | py |
ice-ice | ice-ice/train.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Train a GAN using the techniques described in the paper
"Training Generative Adversarial Networks with Limited Data"."""
import os
import click
import re
import json
import tempfile
import torch
import dnnlib
from training import training_loop
from metrics import metric_main
from torch_utils import training_stats
from torch_utils import custom_ops
#----------------------------------------------------------------------------
class UserError(Exception):
pass
#----------------------------------------------------------------------------
def setup_training_loop_kwargs(
# General options (not included in desc).
gpus = None, # Number of GPUs: <int>, default = 1 gpu
snap = None, # Snapshot interval: <int>, default = 50 ticks
metrics = None, # List of metric names: [], ['fid50k_full'] (default), ...
seed = None, # Random seed: <int>, default = 0
# Dataset.
data = None, # Training dataset (required): <path>
cond = None, # Train conditional model based on dataset labels: <bool>, default = False
subset = None, # Train with only N images: <int>, default = all
mirror = None, # Augment dataset with x-flips: <bool>, default = False
# Base config.
cfg = None, # Base config: 'auto' (default), 'stylegan2', 'paper256', 'paper512', 'paper1024', 'cifar'
gamma = None, # Override R1 gamma: <float>
kimg = None, # Override training duration: <int>
batch = None, # Override batch size: <int>
# Discriminator augmentation.
aug = None, # Augmentation mode: 'ada' (default), 'noaug', 'fixed'
p = None, # Specify p for 'fixed' (required): <float>
target = None, # Override ADA target for 'ada': <float>, default = depends on aug
augpipe = None, # Augmentation pipeline: 'blit', 'geom', 'color', 'filter', 'noise', 'cutout', 'bg', 'bgc' (default), ..., 'bgcfnc'
# Transfer learning.
resume = None, # Load previous network: 'noresume' (default), 'ffhq256', 'ffhq512', 'ffhq1024', 'celebahq256', 'lsundog256', <file>, <url>
freezed = None, # Freeze-D: <int>, default = 0 discriminator layers
# Performance options (not included in desc).
fp32 = None, # Disable mixed-precision training: <bool>, default = False
nhwc = None, # Use NHWC memory format with FP16: <bool>, default = False
allow_tf32 = None, # Allow PyTorch to use TF32 for matmul and convolutions: <bool>, default = False
nobench = None, # Disable cuDNN benchmarking: <bool>, default = False
workers = None, # Override number of DataLoader workers: <int>, default = 3
):
args = dnnlib.EasyDict()
# ------------------------------------------
# General options: gpus, snap, metrics, seed
# ------------------------------------------
if gpus is None:
gpus = 1
assert isinstance(gpus, int)
if not (gpus >= 1 and gpus & (gpus - 1) == 0):
raise UserError('--gpus must be a power of two')
args.num_gpus = gpus
if snap is None:
snap = 50
assert isinstance(snap, int)
if snap < 1:
raise UserError('--snap must be at least 1')
args.image_snapshot_ticks = snap
args.network_snapshot_ticks = snap
if metrics is None:
metrics = ['fid50k_full']
assert isinstance(metrics, list)
if not all(metric_main.is_valid_metric(metric) for metric in metrics):
raise UserError('\n'.join(['--metrics can only contain the following values:'] + metric_main.list_valid_metrics()))
args.metrics = metrics
if seed is None:
seed = 0
assert isinstance(seed, int)
args.random_seed = seed
# -----------------------------------
# Dataset: data, cond, subset, mirror
# -----------------------------------
assert data is not None
assert isinstance(data, str)
args.training_set_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=data, use_labels=True, max_size=None, xflip=False)
args.data_loader_kwargs = dnnlib.EasyDict(pin_memory=True, num_workers=3, prefetch_factor=2)
try:
training_set = dnnlib.util.construct_class_by_name(**args.training_set_kwargs) # subclass of training.dataset.Dataset
args.training_set_kwargs.resolution = training_set.resolution # be explicit about resolution
args.training_set_kwargs.use_labels = training_set.has_labels # be explicit about labels
args.training_set_kwargs.max_size = len(training_set) # be explicit about dataset size
desc = training_set.name
del training_set # conserve memory
except IOError as err:
raise UserError(f'--data: {err}')
if cond is None:
cond = False
assert isinstance(cond, bool)
if cond:
if not args.training_set_kwargs.use_labels:
raise UserError('--cond=True requires labels specified in dataset.json')
desc += '-cond'
else:
args.training_set_kwargs.use_labels = False
if subset is not None:
assert isinstance(subset, int)
if not 1 <= subset <= args.training_set_kwargs.max_size:
raise UserError(f'--subset must be between 1 and {args.training_set_kwargs.max_size}')
desc += f'-subset{subset}'
if subset < args.training_set_kwargs.max_size:
args.training_set_kwargs.max_size = subset
args.training_set_kwargs.random_seed = args.random_seed
if mirror is None:
mirror = False
assert isinstance(mirror, bool)
if mirror:
desc += '-mirror'
args.training_set_kwargs.xflip = True
# ------------------------------------
# Base config: cfg, gamma, kimg, batch
# ------------------------------------
if cfg is None:
cfg = 'auto'
assert isinstance(cfg, str)
desc += f'-{cfg}'
cfg_specs = {
'auto': dict(ref_gpus=-1, kimg=25000, mb=-1, mbstd=-1, fmaps=-1, lrate=-1, gamma=-1, ema=-1, ramp=0.05, map=2), # Populated dynamically based on resolution and GPU count.
'stylegan2': dict(ref_gpus=8, kimg=25000, mb=32, mbstd=4, fmaps=1, lrate=0.002, gamma=10, ema=10, ramp=None, map=8), # Uses mixed-precision, unlike the original StyleGAN2.
'paper256': dict(ref_gpus=8, kimg=25000, mb=64, mbstd=8, fmaps=0.5, lrate=0.0025, gamma=1, ema=20, ramp=None, map=8),
'paper512': dict(ref_gpus=8, kimg=25000, mb=64, mbstd=8, fmaps=1, lrate=0.0025, gamma=0.5, ema=20, ramp=None, map=8),
'paper1024': dict(ref_gpus=8, kimg=25000, mb=32, mbstd=4, fmaps=1, lrate=0.002, gamma=2, ema=10, ramp=None, map=8),
'cifar': dict(ref_gpus=2, kimg=100000, mb=64, mbstd=32, fmaps=1, lrate=0.0025, gamma=0.01, ema=500, ramp=0.05, map=2),
}
assert cfg in cfg_specs
spec = dnnlib.EasyDict(cfg_specs[cfg])
if cfg == 'auto':
desc += f'{gpus:d}'
spec.ref_gpus = gpus
res = args.training_set_kwargs.resolution
spec.mb = max(min(gpus * min(4096 // res, 32), 64), gpus) # keep gpu memory consumption at bay
spec.mbstd = min(spec.mb // gpus, 4) # other hyperparams behave more predictably if mbstd group size remains fixed
spec.fmaps = 1 if res >= 512 else 0.5
spec.lrate = 0.002 if res >= 1024 else 0.0025
spec.gamma = 0.0002 * (res ** 2) / spec.mb # heuristic formula
spec.ema = spec.mb * 10 / 32
args.G_kwargs = dnnlib.EasyDict(class_name='training.networks.Generator', z_dim=512, w_dim=512, mapping_kwargs=dnnlib.EasyDict(), synthesis_kwargs=dnnlib.EasyDict())
args.D_kwargs = dnnlib.EasyDict(class_name='training.networks.Discriminator', block_kwargs=dnnlib.EasyDict(), mapping_kwargs=dnnlib.EasyDict(), epilogue_kwargs=dnnlib.EasyDict())
args.G_kwargs.synthesis_kwargs.channel_base = args.D_kwargs.channel_base = int(spec.fmaps * 32768)
args.G_kwargs.synthesis_kwargs.channel_max = args.D_kwargs.channel_max = 512
args.G_kwargs.mapping_kwargs.num_layers = spec.map
args.G_kwargs.synthesis_kwargs.num_fp16_res = args.D_kwargs.num_fp16_res = 4 # enable mixed-precision training
args.G_kwargs.synthesis_kwargs.conv_clamp = args.D_kwargs.conv_clamp = 256 # clamp activations to avoid float16 overflow
args.D_kwargs.epilogue_kwargs.mbstd_group_size = spec.mbstd
args.G_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=spec.lrate, betas=[0,0.99], eps=1e-8)
args.D_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=spec.lrate, betas=[0,0.99], eps=1e-8)
args.loss_kwargs = dnnlib.EasyDict(class_name='training.loss.StyleGAN2Loss', r1_gamma=spec.gamma)
args.total_kimg = spec.kimg
args.batch_size = spec.mb
args.batch_gpu = spec.mb // spec.ref_gpus
args.ema_kimg = spec.ema
args.ema_rampup = spec.ramp
if cfg == 'cifar':
args.loss_kwargs.pl_weight = 0 # disable path length regularization
args.loss_kwargs.style_mixing_prob = 0 # disable style mixing
args.D_kwargs.architecture = 'orig' # disable residual skip connections
if gamma is not None:
assert isinstance(gamma, float)
if not gamma >= 0:
raise UserError('--gamma must be non-negative')
desc += f'-gamma{gamma:g}'
args.loss_kwargs.r1_gamma = gamma
if kimg is not None:
assert isinstance(kimg, int)
if not kimg >= 1:
raise UserError('--kimg must be at least 1')
desc += f'-kimg{kimg:d}'
args.total_kimg = kimg
if batch is not None:
assert isinstance(batch, int)
if not (batch >= 1 and batch % gpus == 0):
raise UserError('--batch must be at least 1 and divisible by --gpus')
desc += f'-batch{batch}'
args.batch_size = batch
args.batch_gpu = batch // gpus
# ---------------------------------------------------
# Discriminator augmentation: aug, p, target, augpipe
# ---------------------------------------------------
if aug is None:
aug = 'ada'
else:
assert isinstance(aug, str)
desc += f'-{aug}'
if aug == 'ada':
args.ada_target = 0.6
elif aug == 'noaug':
pass
elif aug == 'fixed':
if p is None:
raise UserError(f'--aug={aug} requires specifying --p')
else:
raise UserError(f'--aug={aug} not supported')
if p is not None:
assert isinstance(p, float)
if aug != 'fixed':
raise UserError('--p can only be specified with --aug=fixed')
if not 0 <= p <= 1:
raise UserError('--p must be between 0 and 1')
desc += f'-p{p:g}'
args.augment_p = p
if target is not None:
assert isinstance(target, float)
if aug != 'ada':
raise UserError('--target can only be specified with --aug=ada')
if not 0 <= target <= 1:
raise UserError('--target must be between 0 and 1')
desc += f'-target{target:g}'
args.ada_target = target
assert augpipe is None or isinstance(augpipe, str)
if augpipe is None:
augpipe = 'bgc'
else:
if aug == 'noaug':
raise UserError('--augpipe cannot be specified with --aug=noaug')
desc += f'-{augpipe}'
augpipe_specs = {
'blit': dict(xflip=1, rotate90=1, xint=1),
'geom': dict(scale=1, rotate=1, aniso=1, xfrac=1),
'color': dict(brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1),
'filter': dict(imgfilter=1),
'noise': dict(noise=1),
'cutout': dict(cutout=1),
'bg': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1),
'bgc': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1),
'bgcf': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=1),
'bgcfn': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=1, noise=1),
'bgcfnc': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=1, noise=1, cutout=1),
}
assert augpipe in augpipe_specs
if aug != 'noaug':
args.augment_kwargs = dnnlib.EasyDict(class_name='training.augment.AugmentPipe', **augpipe_specs[augpipe])
# ----------------------------------
# Transfer learning: resume, freezed
# ----------------------------------
resume_specs = {
'ffhq256': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/ffhq-res256-mirror-paper256-noaug.pkl',
'ffhq512': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/ffhq-res512-mirror-stylegan2-noaug.pkl',
'ffhq1024': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/ffhq-res1024-mirror-stylegan2-noaug.pkl',
'celebahq256': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/celebahq-res256-mirror-paper256-kimg100000-ada-target0.5.pkl',
'lsundog256': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/lsundog-res256-paper256-kimg100000-noaug.pkl',
}
assert resume is None or isinstance(resume, str)
if resume is None:
resume = 'noresume'
elif resume == 'noresume':
desc += '-noresume'
elif resume in resume_specs:
desc += f'-resume{resume}'
args.resume_pkl = resume_specs[resume] # predefined url
else:
desc += '-resumecustom'
args.resume_pkl = resume # custom path or url
if resume != 'noresume':
args.ada_kimg = 100 # make ADA react faster at the beginning
args.ema_rampup = None # disable EMA rampup
if freezed is not None:
assert isinstance(freezed, int)
if not freezed >= 0:
raise UserError('--freezed must be non-negative')
desc += f'-freezed{freezed:d}'
args.D_kwargs.block_kwargs.freeze_layers = freezed
# -------------------------------------------------
# Performance options: fp32, nhwc, nobench, workers
# -------------------------------------------------
if fp32 is None:
fp32 = False
assert isinstance(fp32, bool)
if fp32:
args.G_kwargs.synthesis_kwargs.num_fp16_res = args.D_kwargs.num_fp16_res = 0
args.G_kwargs.synthesis_kwargs.conv_clamp = args.D_kwargs.conv_clamp = None
if nhwc is None:
nhwc = False
assert isinstance(nhwc, bool)
if nhwc:
args.G_kwargs.synthesis_kwargs.fp16_channels_last = args.D_kwargs.block_kwargs.fp16_channels_last = True
if nobench is None:
nobench = False
assert isinstance(nobench, bool)
if nobench:
args.cudnn_benchmark = False
if allow_tf32 is None:
allow_tf32 = False
assert isinstance(allow_tf32, bool)
if allow_tf32:
args.allow_tf32 = True
if workers is not None:
assert isinstance(workers, int)
if not workers >= 1:
raise UserError('--workers must be at least 1')
args.data_loader_kwargs.num_workers = workers
return desc, args
#----------------------------------------------------------------------------
def subprocess_fn(rank, args, temp_dir):
dnnlib.util.Logger(file_name=os.path.join(args.run_dir, 'log.txt'), file_mode='a', should_flush=True)
# Init torch.distributed.
if args.num_gpus > 1:
init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))
if os.name == 'nt':
init_method = 'file:///' + init_file.replace('\\', '/')
torch.distributed.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=args.num_gpus)
else:
init_method = f'file://{init_file}'
torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=args.num_gpus)
# Init torch_utils.
sync_device = torch.device('cuda', rank) if args.num_gpus > 1 else None
training_stats.init_multiprocessing(rank=rank, sync_device=sync_device)
if rank != 0:
custom_ops.verbosity = 'none'
# Execute training loop.
training_loop.training_loop(rank=rank, **args)
#----------------------------------------------------------------------------
class CommaSeparatedList(click.ParamType):
name = 'list'
def convert(self, value, param, ctx):
_ = param, ctx
if value is None or value.lower() == 'none' or value == '':
return []
return value.split(',')
#----------------------------------------------------------------------------
@click.command()
@click.pass_context
# General options.
@click.option('--outdir', help='Where to save the results', required=True, metavar='DIR')
@click.option('--gpus', help='Number of GPUs to use [default: 1]', type=int, metavar='INT')
@click.option('--snap', help='Snapshot interval [default: 50 ticks]', type=int, metavar='INT')
@click.option('--metrics', help='Comma-separated list or "none" [default: fid50k_full]', type=CommaSeparatedList())
@click.option('--seed', help='Random seed [default: 0]', type=int, metavar='INT')
@click.option('-n', '--dry-run', help='Print training options and exit', is_flag=True)
# Dataset.
@click.option('--data', help='Training data (directory or zip)', metavar='PATH', required=True)
@click.option('--cond', help='Train conditional model based on dataset labels [default: false]', type=bool, metavar='BOOL')
@click.option('--subset', help='Train with only N images [default: all]', type=int, metavar='INT')
@click.option('--mirror', help='Enable dataset x-flips [default: false]', type=bool, metavar='BOOL')
# Base config.
@click.option('--cfg', help='Base config [default: auto]', type=click.Choice(['auto', 'stylegan2', 'paper256', 'paper512', 'paper1024', 'cifar']))
@click.option('--gamma', help='Override R1 gamma', type=float)
@click.option('--kimg', help='Override training duration', type=int, metavar='INT')
@click.option('--batch', help='Override batch size', type=int, metavar='INT')
# Discriminator augmentation.
@click.option('--aug', help='Augmentation mode [default: ada]', type=click.Choice(['noaug', 'ada', 'fixed']))
@click.option('--p', help='Augmentation probability for --aug=fixed', type=float)
@click.option('--target', help='ADA target value for --aug=ada', type=float)
@click.option('--augpipe', help='Augmentation pipeline [default: bgc]', type=click.Choice(['blit', 'geom', 'color', 'filter', 'noise', 'cutout', 'bg', 'bgc', 'bgcf', 'bgcfn', 'bgcfnc']))
# Transfer learning.
@click.option('--resume', help='Resume training [default: noresume]', metavar='PKL')
@click.option('--freezed', help='Freeze-D [default: 0 layers]', type=int, metavar='INT')
# Performance options.
@click.option('--fp32', help='Disable mixed-precision training', type=bool, metavar='BOOL')
@click.option('--nhwc', help='Use NHWC memory format with FP16', type=bool, metavar='BOOL')
@click.option('--nobench', help='Disable cuDNN benchmarking', type=bool, metavar='BOOL')
@click.option('--allow-tf32', help='Allow PyTorch to use TF32 internally', type=bool, metavar='BOOL')
@click.option('--workers', help='Override number of DataLoader workers', type=int, metavar='INT')
def main(ctx, outdir, dry_run, **config_kwargs):
"""Train a GAN using the techniques described in the paper
"Training Generative Adversarial Networks with Limited Data".
Examples:
\b
# Train with custom dataset using 1 GPU.
python train.py --outdir=~/training-runs --data=~/mydataset.zip --gpus=1
\b
# Train class-conditional CIFAR-10 using 2 GPUs.
python train.py --outdir=~/training-runs --data=~/datasets/cifar10.zip \\
--gpus=2 --cfg=cifar --cond=1
\b
# Transfer learn MetFaces from FFHQ using 4 GPUs.
python train.py --outdir=~/training-runs --data=~/datasets/metfaces.zip \\
--gpus=4 --cfg=paper1024 --mirror=1 --resume=ffhq1024 --snap=10
\b
# Reproduce original StyleGAN2 config F.
python train.py --outdir=~/training-runs --data=~/datasets/ffhq.zip \\
--gpus=8 --cfg=stylegan2 --mirror=1 --aug=noaug
\b
Base configs (--cfg):
auto Automatically select reasonable defaults based on resolution
and GPU count. Good starting point for new datasets.
stylegan2 Reproduce results for StyleGAN2 config F at 1024x1024.
paper256 Reproduce results for FFHQ and LSUN Cat at 256x256.
paper512 Reproduce results for BreCaHAD and AFHQ at 512x512.
paper1024 Reproduce results for MetFaces at 1024x1024.
cifar Reproduce results for CIFAR-10 at 32x32.
\b
Transfer learning source networks (--resume):
ffhq256 FFHQ trained at 256x256 resolution.
ffhq512 FFHQ trained at 512x512 resolution.
ffhq1024 FFHQ trained at 1024x1024 resolution.
celebahq256 CelebA-HQ trained at 256x256 resolution.
lsundog256 LSUN Dog trained at 256x256 resolution.
<PATH or URL> Custom network pickle.
"""
dnnlib.util.Logger(should_flush=True)
# Setup training options.
try:
run_desc, args = setup_training_loop_kwargs(**config_kwargs)
except UserError as err:
ctx.fail(err)
# Pick output directory.
prev_run_dirs = []
if os.path.isdir(outdir):
prev_run_dirs = [x for x in os.listdir(outdir) if os.path.isdir(os.path.join(outdir, x))]
prev_run_ids = [re.match(r'^\d+', x) for x in prev_run_dirs]
prev_run_ids = [int(x.group()) for x in prev_run_ids if x is not None]
cur_run_id = max(prev_run_ids, default=-1) + 1
args.run_dir = os.path.join(outdir, f'{cur_run_id:05d}-{run_desc}')
assert not os.path.exists(args.run_dir)
# Print options.
print()
print('Training options:')
print(json.dumps(args, indent=2))
print()
print(f'Output directory: {args.run_dir}')
print(f'Training data: {args.training_set_kwargs.path}')
print(f'Training duration: {args.total_kimg} kimg')
print(f'Number of GPUs: {args.num_gpus}')
print(f'Number of images: {args.training_set_kwargs.max_size}')
print(f'Image resolution: {args.training_set_kwargs.resolution}')
print(f'Conditional model: {args.training_set_kwargs.use_labels}')
print(f'Dataset x-flips: {args.training_set_kwargs.xflip}')
print()
# Dry run?
if dry_run:
print('Dry run; exiting.')
return
# Create output directory.
print('Creating output directory...')
os.makedirs(args.run_dir)
with open(os.path.join(args.run_dir, 'training_options.json'), 'wt') as f:
json.dump(args, f, indent=2)
# Launch processes.
print('Launching processes...')
torch.multiprocessing.set_start_method('spawn')
with tempfile.TemporaryDirectory() as temp_dir:
if args.num_gpus == 1:
subprocess_fn(rank=0, args=args, temp_dir=temp_dir)
else:
torch.multiprocessing.spawn(fn=subprocess_fn, args=(args, temp_dir), nprocs=args.num_gpus)
#----------------------------------------------------------------------------
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 24,067 | 43.487985 | 192 | py |
ice-ice | ice-ice/calc_metrics.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Calculate quality metrics for previous training run or pretrained network pickle."""
import os
import click
import json
import tempfile
import copy
import torch
import dnnlib
import legacy
from metrics import metric_main
from metrics import metric_utils
from torch_utils import training_stats
from torch_utils import custom_ops
from torch_utils import misc
#----------------------------------------------------------------------------
def subprocess_fn(rank, args, temp_dir):
dnnlib.util.Logger(should_flush=True)
# Init torch.distributed.
if args.num_gpus > 1:
init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))
if os.name == 'nt':
init_method = 'file:///' + init_file.replace('\\', '/')
torch.distributed.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=args.num_gpus)
else:
init_method = f'file://{init_file}'
torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=args.num_gpus)
# Init torch_utils.
sync_device = torch.device('cuda', rank) if args.num_gpus > 1 else None
training_stats.init_multiprocessing(rank=rank, sync_device=sync_device)
if rank != 0 or not args.verbose:
custom_ops.verbosity = 'none'
# Print network summary.
device = torch.device('cuda', rank)
torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
G = copy.deepcopy(args.G).eval().requires_grad_(False).to(device)
if rank == 0 and args.verbose:
z = torch.empty([1, G.z_dim], device=device)
c = torch.empty([1, G.c_dim], device=device)
misc.print_module_summary(G, [z, c])
# Calculate each metric.
for metric in args.metrics:
if rank == 0 and args.verbose:
print(f'Calculating {metric}...')
progress = metric_utils.ProgressMonitor(verbose=args.verbose)
result_dict = metric_main.calc_metric(metric=metric, G=G, dataset_kwargs=args.dataset_kwargs,
num_gpus=args.num_gpus, rank=rank, device=device, progress=progress)
if rank == 0:
metric_main.report_metric(result_dict, run_dir=args.run_dir, snapshot_pkl=args.network_pkl)
if rank == 0 and args.verbose:
print()
# Done.
if rank == 0 and args.verbose:
print('Exiting...')
#----------------------------------------------------------------------------
class CommaSeparatedList(click.ParamType):
name = 'list'
def convert(self, value, param, ctx):
_ = param, ctx
if value is None or value.lower() == 'none' or value == '':
return []
return value.split(',')
#----------------------------------------------------------------------------
@click.command()
@click.pass_context
@click.option('network_pkl', '--network', help='Network pickle filename or URL', metavar='PATH', required=True)
@click.option('--metrics', help='Comma-separated list or "none"', type=CommaSeparatedList(), default='fid50k_full', show_default=True)
@click.option('--data', help='Dataset to evaluate metrics against (directory or zip) [default: same as training data]', metavar='PATH')
@click.option('--mirror', help='Whether the dataset was augmented with x-flips during training [default: look up]', type=bool, metavar='BOOL')
@click.option('--gpus', help='Number of GPUs to use', type=int, default=1, metavar='INT', show_default=True)
@click.option('--verbose', help='Print optional information', type=bool, default=True, metavar='BOOL', show_default=True)
def calc_metrics(ctx, network_pkl, metrics, data, mirror, gpus, verbose):
"""Calculate quality metrics for previous training run or pretrained network pickle.
Examples:
\b
# Previous training run: look up options automatically, save result to JSONL file.
python calc_metrics.py --metrics=pr50k3_full \\
--network=~/training-runs/00000-ffhq10k-res64-auto1/network-snapshot-000000.pkl
\b
# Pre-trained network pickle: specify dataset explicitly, print result to stdout.
python calc_metrics.py --metrics=fid50k_full --data=~/datasets/ffhq.zip --mirror=1 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/ffhq.pkl
Available metrics:
\b
ADA paper:
fid50k_full Frechet inception distance against the full dataset.
kid50k_full Kernel inception distance against the full dataset.
pr50k3_full Precision and recall againt the full dataset.
is50k Inception score for CIFAR-10.
\b
StyleGAN and StyleGAN2 papers:
fid50k Frechet inception distance against 50k real images.
kid50k Kernel inception distance against 50k real images.
pr50k3 Precision and recall against 50k real images.
ppl2_wend Perceptual path length in W at path endpoints against full image.
ppl_zfull Perceptual path length in Z for full paths against cropped image.
ppl_wfull Perceptual path length in W for full paths against cropped image.
ppl_zend Perceptual path length in Z at path endpoints against cropped image.
ppl_wend Perceptual path length in W at path endpoints against cropped image.
"""
dnnlib.util.Logger(should_flush=True)
# Validate arguments.
args = dnnlib.EasyDict(metrics=metrics, num_gpus=gpus, network_pkl=network_pkl, verbose=verbose)
if not all(metric_main.is_valid_metric(metric) for metric in args.metrics):
ctx.fail('\n'.join(['--metrics can only contain the following values:'] + metric_main.list_valid_metrics()))
if not args.num_gpus >= 1:
ctx.fail('--gpus must be at least 1')
# Load network.
if not dnnlib.util.is_url(network_pkl, allow_file_urls=True) and not os.path.isfile(network_pkl):
ctx.fail('--network must point to a file or URL')
if args.verbose:
print(f'Loading network from "{network_pkl}"...')
with dnnlib.util.open_url(network_pkl, verbose=args.verbose) as f:
network_dict = legacy.load_network_pkl(f)
args.G = network_dict['G_ema'] # subclass of torch.nn.Module
# Initialize dataset options.
if data is not None:
args.dataset_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=data)
elif network_dict['training_set_kwargs'] is not None:
args.dataset_kwargs = dnnlib.EasyDict(network_dict['training_set_kwargs'])
else:
ctx.fail('Could not look up dataset options; please specify --data')
# Finalize dataset options.
args.dataset_kwargs.resolution = args.G.img_resolution
args.dataset_kwargs.use_labels = (args.G.c_dim != 0)
if mirror is not None:
args.dataset_kwargs.xflip = mirror
# Print dataset options.
if args.verbose:
print('Dataset options:')
print(json.dumps(args.dataset_kwargs, indent=2))
# Locate run dir.
args.run_dir = None
if os.path.isfile(network_pkl):
pkl_dir = os.path.dirname(network_pkl)
if os.path.isfile(os.path.join(pkl_dir, 'training_options.json')):
args.run_dir = pkl_dir
# Launch processes.
if args.verbose:
print('Launching processes...')
torch.multiprocessing.set_start_method('spawn')
with tempfile.TemporaryDirectory() as temp_dir:
if args.num_gpus == 1:
subprocess_fn(rank=0, args=args, temp_dir=temp_dir)
else:
torch.multiprocessing.spawn(fn=subprocess_fn, args=(args, temp_dir), nprocs=args.num_gpus)
#----------------------------------------------------------------------------
if __name__ == "__main__":
calc_metrics() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 8,336 | 42.649215 | 142 | py |
ice-ice | ice-ice/training/loss.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import numpy as np
import torch
from torch_utils import training_stats
from torch_utils import misc
from torch_utils.ops import conv2d_gradfix
#----------------------------------------------------------------------------
class Loss:
def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, sync, gain): # to be overridden by subclass
raise NotImplementedError()
#----------------------------------------------------------------------------
class StyleGAN2Loss(Loss):
def __init__(self, device, G_mapping, G_synthesis, D, augment_pipe=None, style_mixing_prob=0.9, r1_gamma=10, pl_batch_shrink=2, pl_decay=0.01, pl_weight=2):
super().__init__()
self.device = device
self.G_mapping = G_mapping
self.G_synthesis = G_synthesis
self.D = D
self.augment_pipe = augment_pipe
self.style_mixing_prob = style_mixing_prob
self.r1_gamma = r1_gamma
self.pl_batch_shrink = pl_batch_shrink
self.pl_decay = pl_decay
self.pl_weight = pl_weight
self.pl_mean = torch.zeros([], device=device)
def run_G(self, z, c, sync):
with misc.ddp_sync(self.G_mapping, sync):
ws = self.G_mapping(z, c)
if self.style_mixing_prob > 0:
with torch.autograd.profiler.record_function('style_mixing'):
cutoff = torch.empty([], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1])
cutoff = torch.where(torch.rand([], device=ws.device) < self.style_mixing_prob, cutoff, torch.full_like(cutoff, ws.shape[1]))
ws[:, cutoff:] = self.G_mapping(torch.randn_like(z), c, skip_w_avg_update=True)[:, cutoff:]
with misc.ddp_sync(self.G_synthesis, sync):
img = self.G_synthesis(ws)
return img, ws
def run_D(self, img, c, sync):
if self.augment_pipe is not None:
img = self.augment_pipe(img)
with misc.ddp_sync(self.D, sync):
logits = self.D(img, c)
return logits
def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, sync, gain):
assert phase in ['Gmain', 'Greg', 'Gboth', 'Dmain', 'Dreg', 'Dboth']
do_Gmain = (phase in ['Gmain', 'Gboth'])
do_Dmain = (phase in ['Dmain', 'Dboth'])
do_Gpl = (phase in ['Greg', 'Gboth']) and (self.pl_weight != 0)
do_Dr1 = (phase in ['Dreg', 'Dboth']) and (self.r1_gamma != 0)
# Gmain: Maximize logits for generated images.
if do_Gmain:
with torch.autograd.profiler.record_function('Gmain_forward'):
gen_img, _gen_ws = self.run_G(gen_z, gen_c, sync=(sync and not do_Gpl)) # May get synced by Gpl.
gen_logits = self.run_D(gen_img, gen_c, sync=False)
training_stats.report('Loss/scores/fake', gen_logits)
training_stats.report('Loss/signs/fake', gen_logits.sign())
loss_Gmain = torch.nn.functional.softplus(-gen_logits) # -log(sigmoid(gen_logits))
training_stats.report('Loss/G/loss', loss_Gmain)
with torch.autograd.profiler.record_function('Gmain_backward'):
loss_Gmain.mean().mul(gain).backward()
# Gpl: Apply path length regularization.
if do_Gpl:
with torch.autograd.profiler.record_function('Gpl_forward'):
batch_size = gen_z.shape[0] // self.pl_batch_shrink
gen_img, gen_ws = self.run_G(gen_z[:batch_size], gen_c[:batch_size], sync=sync)
pl_noise = torch.randn_like(gen_img) / np.sqrt(gen_img.shape[2] * gen_img.shape[3])
with torch.autograd.profiler.record_function('pl_grads'), conv2d_gradfix.no_weight_gradients():
pl_grads = torch.autograd.grad(outputs=[(gen_img * pl_noise).sum()], inputs=[gen_ws], create_graph=True, only_inputs=True)[0]
pl_lengths = pl_grads.square().sum(2).mean(1).sqrt()
pl_mean = self.pl_mean.lerp(pl_lengths.mean(), self.pl_decay)
self.pl_mean.copy_(pl_mean.detach())
pl_penalty = (pl_lengths - pl_mean).square()
training_stats.report('Loss/pl_penalty', pl_penalty)
loss_Gpl = pl_penalty * self.pl_weight
training_stats.report('Loss/G/reg', loss_Gpl)
with torch.autograd.profiler.record_function('Gpl_backward'):
(gen_img[:, 0, 0, 0] * 0 + loss_Gpl).mean().mul(gain).backward()
# Dmain: Minimize logits for generated images.
loss_Dgen = 0
if do_Dmain:
with torch.autograd.profiler.record_function('Dgen_forward'):
gen_img, _gen_ws = self.run_G(gen_z, gen_c, sync=False)
gen_logits = self.run_D(gen_img, gen_c, sync=False) # Gets synced by loss_Dreal.
training_stats.report('Loss/scores/fake', gen_logits)
training_stats.report('Loss/signs/fake', gen_logits.sign())
loss_Dgen = torch.nn.functional.softplus(gen_logits) # -log(1 - sigmoid(gen_logits))
with torch.autograd.profiler.record_function('Dgen_backward'):
loss_Dgen.mean().mul(gain).backward()
# Dmain: Maximize logits for real images.
# Dr1: Apply R1 regularization.
if do_Dmain or do_Dr1:
name = 'Dreal_Dr1' if do_Dmain and do_Dr1 else 'Dreal' if do_Dmain else 'Dr1'
with torch.autograd.profiler.record_function(name + '_forward'):
real_img_tmp = real_img.detach().requires_grad_(do_Dr1)
real_logits = self.run_D(real_img_tmp, real_c, sync=sync)
training_stats.report('Loss/scores/real', real_logits)
training_stats.report('Loss/signs/real', real_logits.sign())
loss_Dreal = 0
if do_Dmain:
loss_Dreal = torch.nn.functional.softplus(-real_logits) # -log(sigmoid(real_logits))
training_stats.report('Loss/D/loss', loss_Dgen + loss_Dreal)
loss_Dr1 = 0
if do_Dr1:
with torch.autograd.profiler.record_function('r1_grads'), conv2d_gradfix.no_weight_gradients():
r1_grads = torch.autograd.grad(outputs=[real_logits.sum()], inputs=[real_img_tmp], create_graph=True, only_inputs=True)[0]
r1_penalty = r1_grads.square().sum([1,2,3])
loss_Dr1 = r1_penalty * (self.r1_gamma / 2)
training_stats.report('Loss/r1_penalty', r1_penalty)
training_stats.report('Loss/D/reg', loss_Dr1)
with torch.autograd.profiler.record_function(name + '_backward'):
(real_logits * 0 + loss_Dreal + loss_Dr1).mean().mul(gain).backward()
#----------------------------------------------------------------------------
| 7,297 | 53.462687 | 160 | py |
ice-ice | ice-ice/training/augment.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import numpy as np
import scipy.signal
import torch
from torch_utils import persistence
from torch_utils import misc
from torch_utils.ops import upfirdn2d
from torch_utils.ops import grid_sample_gradfix
from torch_utils.ops import conv2d_gradfix
#----------------------------------------------------------------------------
# Coefficients of various wavelet decomposition low-pass filters.
wavelets = {
'haar': [0.7071067811865476, 0.7071067811865476],
'db1': [0.7071067811865476, 0.7071067811865476],
'db2': [-0.12940952255092145, 0.22414386804185735, 0.836516303737469, 0.48296291314469025],
'db3': [0.035226291882100656, -0.08544127388224149, -0.13501102001039084, 0.4598775021193313, 0.8068915093133388, 0.3326705529509569],
'db4': [-0.010597401784997278, 0.032883011666982945, 0.030841381835986965, -0.18703481171888114, -0.02798376941698385, 0.6308807679295904, 0.7148465705525415, 0.23037781330885523],
'db5': [0.003335725285001549, -0.012580751999015526, -0.006241490213011705, 0.07757149384006515, -0.03224486958502952, -0.24229488706619015, 0.13842814590110342, 0.7243085284385744, 0.6038292697974729, 0.160102397974125],
'db6': [-0.00107730108499558, 0.004777257511010651, 0.0005538422009938016, -0.031582039318031156, 0.02752286553001629, 0.09750160558707936, -0.12976686756709563, -0.22626469396516913, 0.3152503517092432, 0.7511339080215775, 0.4946238903983854, 0.11154074335008017],
'db7': [0.0003537138000010399, -0.0018016407039998328, 0.00042957797300470274, 0.012550998556013784, -0.01657454163101562, -0.03802993693503463, 0.0806126091510659, 0.07130921926705004, -0.22403618499416572, -0.14390600392910627, 0.4697822874053586, 0.7291320908465551, 0.39653931948230575, 0.07785205408506236],
'db8': [-0.00011747678400228192, 0.0006754494059985568, -0.0003917403729959771, -0.00487035299301066, 0.008746094047015655, 0.013981027917015516, -0.04408825393106472, -0.01736930100202211, 0.128747426620186, 0.00047248457399797254, -0.2840155429624281, -0.015829105256023893, 0.5853546836548691, 0.6756307362980128, 0.3128715909144659, 0.05441584224308161],
'sym2': [-0.12940952255092145, 0.22414386804185735, 0.836516303737469, 0.48296291314469025],
'sym3': [0.035226291882100656, -0.08544127388224149, -0.13501102001039084, 0.4598775021193313, 0.8068915093133388, 0.3326705529509569],
'sym4': [-0.07576571478927333, -0.02963552764599851, 0.49761866763201545, 0.8037387518059161, 0.29785779560527736, -0.09921954357684722, -0.012603967262037833, 0.0322231006040427],
'sym5': [0.027333068345077982, 0.029519490925774643, -0.039134249302383094, 0.1993975339773936, 0.7234076904024206, 0.6339789634582119, 0.01660210576452232, -0.17532808990845047, -0.021101834024758855, 0.019538882735286728],
'sym6': [0.015404109327027373, 0.0034907120842174702, -0.11799011114819057, -0.048311742585633, 0.4910559419267466, 0.787641141030194, 0.3379294217276218, -0.07263752278646252, -0.021060292512300564, 0.04472490177066578, 0.0017677118642428036, -0.007800708325034148],
'sym7': [0.002681814568257878, -0.0010473848886829163, -0.01263630340325193, 0.03051551316596357, 0.0678926935013727, -0.049552834937127255, 0.017441255086855827, 0.5361019170917628, 0.767764317003164, 0.2886296317515146, -0.14004724044296152, -0.10780823770381774, 0.004010244871533663, 0.010268176708511255],
'sym8': [-0.0033824159510061256, -0.0005421323317911481, 0.03169508781149298, 0.007607487324917605, -0.1432942383508097, -0.061273359067658524, 0.4813596512583722, 0.7771857517005235, 0.3644418948353314, -0.05194583810770904, -0.027219029917056003, 0.049137179673607506, 0.003808752013890615, -0.01495225833704823, -0.0003029205147213668, 0.0018899503327594609],
}
#----------------------------------------------------------------------------
# Helpers for constructing transformation matrices.
def matrix(*rows, device=None):
assert all(len(row) == len(rows[0]) for row in rows)
elems = [x for row in rows for x in row]
ref = [x for x in elems if isinstance(x, torch.Tensor)]
if len(ref) == 0:
return misc.constant(np.asarray(rows), device=device)
assert device is None or device == ref[0].device
elems = [x if isinstance(x, torch.Tensor) else misc.constant(x, shape=ref[0].shape, device=ref[0].device) for x in elems]
return torch.stack(elems, dim=-1).reshape(ref[0].shape + (len(rows), -1))
def translate2d(tx, ty, **kwargs):
return matrix(
[1, 0, tx],
[0, 1, ty],
[0, 0, 1],
**kwargs)
def translate3d(tx, ty, tz, **kwargs):
return matrix(
[1, 0, 0, tx],
[0, 1, 0, ty],
[0, 0, 1, tz],
[0, 0, 0, 1],
**kwargs)
def scale2d(sx, sy, **kwargs):
return matrix(
[sx, 0, 0],
[0, sy, 0],
[0, 0, 1],
**kwargs)
def scale3d(sx, sy, sz, **kwargs):
return matrix(
[sx, 0, 0, 0],
[0, sy, 0, 0],
[0, 0, sz, 0],
[0, 0, 0, 1],
**kwargs)
def rotate2d(theta, **kwargs):
return matrix(
[torch.cos(theta), torch.sin(-theta), 0],
[torch.sin(theta), torch.cos(theta), 0],
[0, 0, 1],
**kwargs)
def rotate3d(v, theta, **kwargs):
vx = v[..., 0]; vy = v[..., 1]; vz = v[..., 2]
s = torch.sin(theta); c = torch.cos(theta); cc = 1 - c
return matrix(
[vx*vx*cc+c, vx*vy*cc-vz*s, vx*vz*cc+vy*s, 0],
[vy*vx*cc+vz*s, vy*vy*cc+c, vy*vz*cc-vx*s, 0],
[vz*vx*cc-vy*s, vz*vy*cc+vx*s, vz*vz*cc+c, 0],
[0, 0, 0, 1],
**kwargs)
def translate2d_inv(tx, ty, **kwargs):
return translate2d(-tx, -ty, **kwargs)
def scale2d_inv(sx, sy, **kwargs):
return scale2d(1 / sx, 1 / sy, **kwargs)
def rotate2d_inv(theta, **kwargs):
return rotate2d(-theta, **kwargs)
#----------------------------------------------------------------------------
# Versatile image augmentation pipeline from the paper
# "Training Generative Adversarial Networks with Limited Data".
#
# All augmentations are disabled by default; individual augmentations can
# be enabled by setting their probability multipliers to 1.
@persistence.persistent_class
class AugmentPipe(torch.nn.Module):
def __init__(self,
xflip=0, rotate90=0, xint=0, xint_max=0.125,
scale=0, rotate=0, aniso=0, xfrac=0, scale_std=0.2, rotate_max=1, aniso_std=0.2, xfrac_std=0.125,
brightness=0, contrast=0, lumaflip=0, hue=0, saturation=0, brightness_std=0.2, contrast_std=0.5, hue_max=1, saturation_std=1,
imgfilter=0, imgfilter_bands=[1,1,1,1], imgfilter_std=1,
noise=0, cutout=0, noise_std=0.1, cutout_size=0.5,
):
super().__init__()
self.register_buffer('p', torch.ones([])) # Overall multiplier for augmentation probability.
# Pixel blitting.
self.xflip = float(xflip) # Probability multiplier for x-flip.
self.rotate90 = float(rotate90) # Probability multiplier for 90 degree rotations.
self.xint = float(xint) # Probability multiplier for integer translation.
self.xint_max = float(xint_max) # Range of integer translation, relative to image dimensions.
# General geometric transformations.
self.scale = float(scale) # Probability multiplier for isotropic scaling.
self.rotate = float(rotate) # Probability multiplier for arbitrary rotation.
self.aniso = float(aniso) # Probability multiplier for anisotropic scaling.
self.xfrac = float(xfrac) # Probability multiplier for fractional translation.
self.scale_std = float(scale_std) # Log2 standard deviation of isotropic scaling.
self.rotate_max = float(rotate_max) # Range of arbitrary rotation, 1 = full circle.
self.aniso_std = float(aniso_std) # Log2 standard deviation of anisotropic scaling.
self.xfrac_std = float(xfrac_std) # Standard deviation of frational translation, relative to image dimensions.
# Color transformations.
self.brightness = float(brightness) # Probability multiplier for brightness.
self.contrast = float(contrast) # Probability multiplier for contrast.
self.lumaflip = float(lumaflip) # Probability multiplier for luma flip.
self.hue = float(hue) # Probability multiplier for hue rotation.
self.saturation = float(saturation) # Probability multiplier for saturation.
self.brightness_std = float(brightness_std) # Standard deviation of brightness.
self.contrast_std = float(contrast_std) # Log2 standard deviation of contrast.
self.hue_max = float(hue_max) # Range of hue rotation, 1 = full circle.
self.saturation_std = float(saturation_std) # Log2 standard deviation of saturation.
# Image-space filtering.
self.imgfilter = float(imgfilter) # Probability multiplier for image-space filtering.
self.imgfilter_bands = list(imgfilter_bands) # Probability multipliers for individual frequency bands.
self.imgfilter_std = float(imgfilter_std) # Log2 standard deviation of image-space filter amplification.
# Image-space corruptions.
self.noise = float(noise) # Probability multiplier for additive RGB noise.
self.cutout = float(cutout) # Probability multiplier for cutout.
self.noise_std = float(noise_std) # Standard deviation of additive RGB noise.
self.cutout_size = float(cutout_size) # Size of the cutout rectangle, relative to image dimensions.
# Setup orthogonal lowpass filter for geometric augmentations.
self.register_buffer('Hz_geom', upfirdn2d.setup_filter(wavelets['sym6']))
# Construct filter bank for image-space filtering.
Hz_lo = np.asarray(wavelets['sym2']) # H(z)
Hz_hi = Hz_lo * ((-1) ** np.arange(Hz_lo.size)) # H(-z)
Hz_lo2 = np.convolve(Hz_lo, Hz_lo[::-1]) / 2 # H(z) * H(z^-1) / 2
Hz_hi2 = np.convolve(Hz_hi, Hz_hi[::-1]) / 2 # H(-z) * H(-z^-1) / 2
Hz_fbank = np.eye(4, 1) # Bandpass(H(z), b_i)
for i in range(1, Hz_fbank.shape[0]):
Hz_fbank = np.dstack([Hz_fbank, np.zeros_like(Hz_fbank)]).reshape(Hz_fbank.shape[0], -1)[:, :-1]
Hz_fbank = scipy.signal.convolve(Hz_fbank, [Hz_lo2])
Hz_fbank[i, (Hz_fbank.shape[1] - Hz_hi2.size) // 2 : (Hz_fbank.shape[1] + Hz_hi2.size) // 2] += Hz_hi2
self.register_buffer('Hz_fbank', torch.as_tensor(Hz_fbank, dtype=torch.float32))
def forward(self, images, debug_percentile=None):
assert isinstance(images, torch.Tensor) and images.ndim == 4
batch_size, num_channels, height, width = images.shape
device = images.device
if debug_percentile is not None:
debug_percentile = torch.as_tensor(debug_percentile, dtype=torch.float32, device=device)
# -------------------------------------
# Select parameters for pixel blitting.
# -------------------------------------
# Initialize inverse homogeneous 2D transform: G_inv @ pixel_out ==> pixel_in
I_3 = torch.eye(3, device=device)
G_inv = I_3
# Apply x-flip with probability (xflip * strength).
if self.xflip > 0:
i = torch.floor(torch.rand([batch_size], device=device) * 2)
i = torch.where(torch.rand([batch_size], device=device) < self.xflip * self.p, i, torch.zeros_like(i))
if debug_percentile is not None:
i = torch.full_like(i, torch.floor(debug_percentile * 2))
G_inv = G_inv @ scale2d_inv(1 - 2 * i, 1)
# Apply 90 degree rotations with probability (rotate90 * strength).
if self.rotate90 > 0:
i = torch.floor(torch.rand([batch_size], device=device) * 4)
i = torch.where(torch.rand([batch_size], device=device) < self.rotate90 * self.p, i, torch.zeros_like(i))
if debug_percentile is not None:
i = torch.full_like(i, torch.floor(debug_percentile * 4))
G_inv = G_inv @ rotate2d_inv(-np.pi / 2 * i)
# Apply integer translation with probability (xint * strength).
if self.xint > 0:
t = (torch.rand([batch_size, 2], device=device) * 2 - 1) * self.xint_max
t = torch.where(torch.rand([batch_size, 1], device=device) < self.xint * self.p, t, torch.zeros_like(t))
if debug_percentile is not None:
t = torch.full_like(t, (debug_percentile * 2 - 1) * self.xint_max)
G_inv = G_inv @ translate2d_inv(torch.round(t[:,0] * width), torch.round(t[:,1] * height))
# --------------------------------------------------------
# Select parameters for general geometric transformations.
# --------------------------------------------------------
# Apply isotropic scaling with probability (scale * strength).
if self.scale > 0:
s = torch.exp2(torch.randn([batch_size], device=device) * self.scale_std)
s = torch.where(torch.rand([batch_size], device=device) < self.scale * self.p, s, torch.ones_like(s))
if debug_percentile is not None:
s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.scale_std))
G_inv = G_inv @ scale2d_inv(s, s)
# Apply pre-rotation with probability p_rot.
p_rot = 1 - torch.sqrt((1 - self.rotate * self.p).clamp(0, 1)) # P(pre OR post) = p
if self.rotate > 0:
theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.rotate_max
theta = torch.where(torch.rand([batch_size], device=device) < p_rot, theta, torch.zeros_like(theta))
if debug_percentile is not None:
theta = torch.full_like(theta, (debug_percentile * 2 - 1) * np.pi * self.rotate_max)
G_inv = G_inv @ rotate2d_inv(-theta) # Before anisotropic scaling.
# Apply anisotropic scaling with probability (aniso * strength).
if self.aniso > 0:
s = torch.exp2(torch.randn([batch_size], device=device) * self.aniso_std)
s = torch.where(torch.rand([batch_size], device=device) < self.aniso * self.p, s, torch.ones_like(s))
if debug_percentile is not None:
s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.aniso_std))
G_inv = G_inv @ scale2d_inv(s, 1 / s)
# Apply post-rotation with probability p_rot.
if self.rotate > 0:
theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.rotate_max
theta = torch.where(torch.rand([batch_size], device=device) < p_rot, theta, torch.zeros_like(theta))
if debug_percentile is not None:
theta = torch.zeros_like(theta)
G_inv = G_inv @ rotate2d_inv(-theta) # After anisotropic scaling.
# Apply fractional translation with probability (xfrac * strength).
if self.xfrac > 0:
t = torch.randn([batch_size, 2], device=device) * self.xfrac_std
t = torch.where(torch.rand([batch_size, 1], device=device) < self.xfrac * self.p, t, torch.zeros_like(t))
if debug_percentile is not None:
t = torch.full_like(t, torch.erfinv(debug_percentile * 2 - 1) * self.xfrac_std)
G_inv = G_inv @ translate2d_inv(t[:,0] * width, t[:,1] * height)
# ----------------------------------
# Execute geometric transformations.
# ----------------------------------
# Execute if the transform is not identity.
if G_inv is not I_3:
# Calculate padding.
cx = (width - 1) / 2
cy = (height - 1) / 2
cp = matrix([-cx, -cy, 1], [cx, -cy, 1], [cx, cy, 1], [-cx, cy, 1], device=device) # [idx, xyz]
cp = G_inv @ cp.t() # [batch, xyz, idx]
Hz_pad = self.Hz_geom.shape[0] // 4
margin = cp[:, :2, :].permute(1, 0, 2).flatten(1) # [xy, batch * idx]
margin = torch.cat([-margin, margin]).max(dim=1).values # [x0, y0, x1, y1]
margin = margin + misc.constant([Hz_pad * 2 - cx, Hz_pad * 2 - cy] * 2, device=device)
margin = margin.max(misc.constant([0, 0] * 2, device=device))
margin = margin.min(misc.constant([width-1, height-1] * 2, device=device))
mx0, my0, mx1, my1 = margin.ceil().to(torch.int32)
# Pad image and adjust origin.
images = torch.nn.functional.pad(input=images, pad=[mx0,mx1,my0,my1], mode='reflect')
G_inv = translate2d((mx0 - mx1) / 2, (my0 - my1) / 2) @ G_inv
# Upsample.
images = upfirdn2d.upsample2d(x=images, f=self.Hz_geom, up=2)
G_inv = scale2d(2, 2, device=device) @ G_inv @ scale2d_inv(2, 2, device=device)
G_inv = translate2d(-0.5, -0.5, device=device) @ G_inv @ translate2d_inv(-0.5, -0.5, device=device)
# Execute transformation.
shape = [batch_size, num_channels, (height + Hz_pad * 2) * 2, (width + Hz_pad * 2) * 2]
G_inv = scale2d(2 / images.shape[3], 2 / images.shape[2], device=device) @ G_inv @ scale2d_inv(2 / shape[3], 2 / shape[2], device=device)
grid = torch.nn.functional.affine_grid(theta=G_inv[:,:2,:], size=shape, align_corners=False)
images = grid_sample_gradfix.grid_sample(images, grid)
# Downsample and crop.
images = upfirdn2d.downsample2d(x=images, f=self.Hz_geom, down=2, padding=-Hz_pad*2, flip_filter=True)
# --------------------------------------------
# Select parameters for color transformations.
# --------------------------------------------
# Initialize homogeneous 3D transformation matrix: C @ color_in ==> color_out
I_4 = torch.eye(4, device=device)
C = I_4
# Apply brightness with probability (brightness * strength).
if self.brightness > 0:
b = torch.randn([batch_size], device=device) * self.brightness_std
b = torch.where(torch.rand([batch_size], device=device) < self.brightness * self.p, b, torch.zeros_like(b))
if debug_percentile is not None:
b = torch.full_like(b, torch.erfinv(debug_percentile * 2 - 1) * self.brightness_std)
C = translate3d(b, b, b) @ C
# Apply contrast with probability (contrast * strength).
if self.contrast > 0:
c = torch.exp2(torch.randn([batch_size], device=device) * self.contrast_std)
c = torch.where(torch.rand([batch_size], device=device) < self.contrast * self.p, c, torch.ones_like(c))
if debug_percentile is not None:
c = torch.full_like(c, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.contrast_std))
C = scale3d(c, c, c) @ C
# Apply luma flip with probability (lumaflip * strength).
v = misc.constant(np.asarray([1, 1, 1, 0]) / np.sqrt(3), device=device) # Luma axis.
if self.lumaflip > 0:
i = torch.floor(torch.rand([batch_size, 1, 1], device=device) * 2)
i = torch.where(torch.rand([batch_size, 1, 1], device=device) < self.lumaflip * self.p, i, torch.zeros_like(i))
if debug_percentile is not None:
i = torch.full_like(i, torch.floor(debug_percentile * 2))
C = (I_4 - 2 * v.ger(v) * i) @ C # Householder reflection.
# Apply hue rotation with probability (hue * strength).
if self.hue > 0 and num_channels > 1:
theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.hue_max
theta = torch.where(torch.rand([batch_size], device=device) < self.hue * self.p, theta, torch.zeros_like(theta))
if debug_percentile is not None:
theta = torch.full_like(theta, (debug_percentile * 2 - 1) * np.pi * self.hue_max)
C = rotate3d(v, theta) @ C # Rotate around v.
# Apply saturation with probability (saturation * strength).
if self.saturation > 0 and num_channels > 1:
s = torch.exp2(torch.randn([batch_size, 1, 1], device=device) * self.saturation_std)
s = torch.where(torch.rand([batch_size, 1, 1], device=device) < self.saturation * self.p, s, torch.ones_like(s))
if debug_percentile is not None:
s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.saturation_std))
C = (v.ger(v) + (I_4 - v.ger(v)) * s) @ C
# ------------------------------
# Execute color transformations.
# ------------------------------
# Execute if the transform is not identity.
if C is not I_4:
images = images.reshape([batch_size, num_channels, height * width])
if num_channels == 3:
images = C[:, :3, :3] @ images + C[:, :3, 3:]
elif num_channels == 1:
C = C[:, :3, :].mean(dim=1, keepdims=True)
images = images * C[:, :, :3].sum(dim=2, keepdims=True) + C[:, :, 3:]
else:
raise ValueError('Image must be RGB (3 channels) or L (1 channel)')
images = images.reshape([batch_size, num_channels, height, width])
# ----------------------
# Image-space filtering.
# ----------------------
if self.imgfilter > 0:
num_bands = self.Hz_fbank.shape[0]
assert len(self.imgfilter_bands) == num_bands
expected_power = misc.constant(np.array([10, 1, 1, 1]) / 13, device=device) # Expected power spectrum (1/f).
# Apply amplification for each band with probability (imgfilter * strength * band_strength).
g = torch.ones([batch_size, num_bands], device=device) # Global gain vector (identity).
for i, band_strength in enumerate(self.imgfilter_bands):
t_i = torch.exp2(torch.randn([batch_size], device=device) * self.imgfilter_std)
t_i = torch.where(torch.rand([batch_size], device=device) < self.imgfilter * self.p * band_strength, t_i, torch.ones_like(t_i))
if debug_percentile is not None:
t_i = torch.full_like(t_i, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.imgfilter_std)) if band_strength > 0 else torch.ones_like(t_i)
t = torch.ones([batch_size, num_bands], device=device) # Temporary gain vector.
t[:, i] = t_i # Replace i'th element.
t = t / (expected_power * t.square()).sum(dim=-1, keepdims=True).sqrt() # Normalize power.
g = g * t # Accumulate into global gain.
# Construct combined amplification filter.
Hz_prime = g @ self.Hz_fbank # [batch, tap]
Hz_prime = Hz_prime.unsqueeze(1).repeat([1, num_channels, 1]) # [batch, channels, tap]
Hz_prime = Hz_prime.reshape([batch_size * num_channels, 1, -1]) # [batch * channels, 1, tap]
# Apply filter.
p = self.Hz_fbank.shape[1] // 2
images = images.reshape([1, batch_size * num_channels, height, width])
images = torch.nn.functional.pad(input=images, pad=[p,p,p,p], mode='reflect')
images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(2), groups=batch_size*num_channels)
images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(3), groups=batch_size*num_channels)
images = images.reshape([batch_size, num_channels, height, width])
# ------------------------
# Image-space corruptions.
# ------------------------
# Apply additive RGB noise with probability (noise * strength).
if self.noise > 0:
sigma = torch.randn([batch_size, 1, 1, 1], device=device).abs() * self.noise_std
sigma = torch.where(torch.rand([batch_size, 1, 1, 1], device=device) < self.noise * self.p, sigma, torch.zeros_like(sigma))
if debug_percentile is not None:
sigma = torch.full_like(sigma, torch.erfinv(debug_percentile) * self.noise_std)
images = images + torch.randn([batch_size, num_channels, height, width], device=device) * sigma
# Apply cutout with probability (cutout * strength).
if self.cutout > 0:
size = torch.full([batch_size, 2, 1, 1, 1], self.cutout_size, device=device)
size = torch.where(torch.rand([batch_size, 1, 1, 1, 1], device=device) < self.cutout * self.p, size, torch.zeros_like(size))
center = torch.rand([batch_size, 2, 1, 1, 1], device=device)
if debug_percentile is not None:
size = torch.full_like(size, self.cutout_size)
center = torch.full_like(center, debug_percentile)
coord_x = torch.arange(width, device=device).reshape([1, 1, 1, -1])
coord_y = torch.arange(height, device=device).reshape([1, 1, -1, 1])
mask_x = (((coord_x + 0.5) / width - center[:, 0]).abs() >= size[:, 0] / 2)
mask_y = (((coord_y + 0.5) / height - center[:, 1]).abs() >= size[:, 1] / 2)
mask = torch.logical_or(mask_x, mask_y).to(torch.float32)
images = images * mask
return images
#----------------------------------------------------------------------------
| 26,373 | 60.050926 | 366 | py |
ice-ice | ice-ice/training/dataset.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import numpy as np
import zipfile
import PIL.Image
import json
import torch
import dnnlib
try:
import pyspng
except ImportError:
pyspng = None
#----------------------------------------------------------------------------
class Dataset(torch.utils.data.Dataset):
def __init__(self,
name, # Name of the dataset.
raw_shape, # Shape of the raw image data (NCHW).
max_size = None, # Artificially limit the size of the dataset. None = no limit. Applied before xflip.
use_labels = False, # Enable conditioning labels? False = label dimension is zero.
xflip = False, # Artificially double the size of the dataset via x-flips. Applied after max_size.
random_seed = 0, # Random seed to use when applying max_size.
):
self._name = name
self._raw_shape = list(raw_shape)
self._use_labels = use_labels
self._raw_labels = None
self._label_shape = None
# Apply max_size.
self._raw_idx = np.arange(self._raw_shape[0], dtype=np.int64)
if (max_size is not None) and (self._raw_idx.size > max_size):
np.random.RandomState(random_seed).shuffle(self._raw_idx)
self._raw_idx = np.sort(self._raw_idx[:max_size])
# Apply xflip.
self._xflip = np.zeros(self._raw_idx.size, dtype=np.uint8)
if xflip:
self._raw_idx = np.tile(self._raw_idx, 2)
self._xflip = np.concatenate([self._xflip, np.ones_like(self._xflip)])
def _get_raw_labels(self):
if self._raw_labels is None:
self._raw_labels = self._load_raw_labels() if self._use_labels else None
if self._raw_labels is None:
self._raw_labels = np.zeros([self._raw_shape[0], 0], dtype=np.float32)
assert isinstance(self._raw_labels, np.ndarray)
assert self._raw_labels.shape[0] == self._raw_shape[0]
assert self._raw_labels.dtype in [np.float32, np.int64]
if self._raw_labels.dtype == np.int64:
assert self._raw_labels.ndim == 1
assert np.all(self._raw_labels >= 0)
return self._raw_labels
def close(self): # to be overridden by subclass
pass
def _load_raw_image(self, raw_idx): # to be overridden by subclass
raise NotImplementedError
def _load_raw_labels(self): # to be overridden by subclass
raise NotImplementedError
def __getstate__(self):
return dict(self.__dict__, _raw_labels=None)
def __del__(self):
try:
self.close()
except:
pass
def __len__(self):
return self._raw_idx.size
def __getitem__(self, idx):
image = self._load_raw_image(self._raw_idx[idx])
assert isinstance(image, np.ndarray)
assert list(image.shape) == self.image_shape
assert image.dtype == np.uint8
if self._xflip[idx]:
assert image.ndim == 3 # CHW
image = image[:, :, ::-1]
return image.copy(), self.get_label(idx)
def get_label(self, idx):
label = self._get_raw_labels()[self._raw_idx[idx]]
if label.dtype == np.int64:
onehot = np.zeros(self.label_shape, dtype=np.float32)
onehot[label] = 1
label = onehot
return label.copy()
def get_details(self, idx):
d = dnnlib.EasyDict()
d.raw_idx = int(self._raw_idx[idx])
d.xflip = (int(self._xflip[idx]) != 0)
d.raw_label = self._get_raw_labels()[d.raw_idx].copy()
return d
@property
def name(self):
return self._name
@property
def image_shape(self):
return list(self._raw_shape[1:])
@property
def num_channels(self):
assert len(self.image_shape) == 3 # CHW
return self.image_shape[0]
@property
def resolution(self):
assert len(self.image_shape) == 3 # CHW
assert self.image_shape[1] == self.image_shape[2]
return self.image_shape[1]
@property
def label_shape(self):
if self._label_shape is None:
raw_labels = self._get_raw_labels()
if raw_labels.dtype == np.int64:
self._label_shape = [int(np.max(raw_labels)) + 1]
else:
self._label_shape = raw_labels.shape[1:]
return list(self._label_shape)
@property
def label_dim(self):
assert len(self.label_shape) == 1
return self.label_shape[0]
@property
def has_labels(self):
return any(x != 0 for x in self.label_shape)
@property
def has_onehot_labels(self):
return self._get_raw_labels().dtype == np.int64
#----------------------------------------------------------------------------
class ImageFolderDataset(Dataset):
def __init__(self,
path, # Path to directory or zip.
resolution = None, # Ensure specific resolution, None = highest available.
**super_kwargs, # Additional arguments for the Dataset base class.
):
self._path = path
self._zipfile = None
if os.path.isdir(self._path):
self._type = 'dir'
self._all_fnames = {os.path.relpath(os.path.join(root, fname), start=self._path) for root, _dirs, files in os.walk(self._path) for fname in files}
elif self._file_ext(self._path) == '.zip':
self._type = 'zip'
self._all_fnames = set(self._get_zipfile().namelist())
else:
raise IOError('Path must point to a directory or zip')
PIL.Image.init()
self._image_fnames = sorted(fname for fname in self._all_fnames if self._file_ext(fname) in PIL.Image.EXTENSION)
if len(self._image_fnames) == 0:
raise IOError('No image files found in the specified path')
name = os.path.splitext(os.path.basename(self._path))[0]
raw_shape = [len(self._image_fnames)] + list(self._load_raw_image(0).shape)
if resolution is not None and (raw_shape[2] != resolution or raw_shape[3] != resolution):
raise IOError('Image files do not match the specified resolution')
super().__init__(name=name, raw_shape=raw_shape, **super_kwargs)
@staticmethod
def _file_ext(fname):
return os.path.splitext(fname)[1].lower()
def _get_zipfile(self):
assert self._type == 'zip'
if self._zipfile is None:
self._zipfile = zipfile.ZipFile(self._path)
return self._zipfile
def _open_file(self, fname):
if self._type == 'dir':
return open(os.path.join(self._path, fname), 'rb')
if self._type == 'zip':
return self._get_zipfile().open(fname, 'r')
return None
def close(self):
try:
if self._zipfile is not None:
self._zipfile.close()
finally:
self._zipfile = None
def __getstate__(self):
return dict(super().__getstate__(), _zipfile=None)
def _load_raw_image(self, raw_idx):
fname = self._image_fnames[raw_idx]
with self._open_file(fname) as f:
if pyspng is not None and self._file_ext(fname) == '.png':
image = pyspng.load(f.read())
else:
image = np.array(PIL.Image.open(f))
if image.ndim == 2:
image = image[:, :, np.newaxis] # HW => HWC
image = image.transpose(2, 0, 1) # HWC => CHW
return image
def _load_raw_labels(self):
fname = 'dataset.json'
if fname not in self._all_fnames:
return None
with self._open_file(fname) as f:
labels = json.load(f)['labels']
if labels is None:
return None
labels = dict(labels)
labels = [labels[fname.replace('\\', '/')] for fname in self._image_fnames]
labels = np.array(labels)
labels = labels.astype({1: np.int64, 2: np.float32}[labels.ndim])
return labels
#----------------------------------------------------------------------------
| 8,551 | 35.084388 | 158 | py |
ice-ice | ice-ice/training/networks.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import itertools
import numpy as np
import torch
from torch_utils import misc
from torch_utils import persistence
from torch_utils.ops import conv2d_resample
from torch_utils.ops import upfirdn2d
from torch_utils.ops import bias_act
from torch_utils.ops import fma
#----------------------------------------------------------------------------
@misc.profiled_function
def normalize_2nd_moment(x, dim=1, eps=1e-8):
return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt()
#----------------------------------------------------------------------------
@misc.profiled_function
def modulated_conv2d(
x, # Input tensor of shape [batch_size, in_channels, in_height, in_width].
weight, # Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width].
styles, # Modulation coefficients of shape [batch_size, in_channels].
noise = None, # Optional noise tensor to add to the output activations.
up = 1, # Integer upsampling factor.
down = 1, # Integer downsampling factor.
padding = 0, # Padding with respect to the upsampled image.
resample_filter = None, # Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter().
demodulate = True, # Apply weight demodulation?
flip_weight = True, # False = convolution, True = correlation (matches torch.nn.functional.conv2d).
fused_modconv = True, # Perform modulation, convolution, and demodulation as a single fused operation?
):
batch_size = x.shape[0]
out_channels, in_channels, kh, kw = weight.shape
misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk]
misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
misc.assert_shape(styles, [batch_size, in_channels]) # [NI]
# Pre-normalize inputs to avoid FP16 overflow.
if x.dtype == torch.float16 and demodulate:
weight = weight * (1 / np.sqrt(in_channels * kh * kw) / weight.norm(float('inf'), dim=[1,2,3], keepdim=True)) # max_Ikk
styles = styles / styles.norm(float('inf'), dim=1, keepdim=True) # max_I
# Calculate per-sample weights and demodulation coefficients.
w = None
dcoefs = None
if demodulate or fused_modconv:
w = weight.unsqueeze(0) # [NOIkk]
w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk]
if demodulate:
dcoefs = (w.square().sum(dim=[2,3,4]) + 1e-8).rsqrt() # [NO]
if demodulate and fused_modconv:
w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk]
# Execute by scaling the activations before and after the convolution.
if not fused_modconv:
x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1)
x = conv2d_resample.conv2d_resample(x=x, w=weight.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, flip_weight=flip_weight)
if demodulate and noise is not None:
x = fma.fma(x, dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1), noise.to(x.dtype))
elif demodulate:
x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1)
elif noise is not None:
x = x.add_(noise.to(x.dtype))
return x
# Execute as one fused op using grouped convolution.
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
batch_size = int(batch_size)
misc.assert_shape(x, [batch_size, in_channels, None, None])
x = x.reshape(1, -1, *x.shape[2:])
w = w.reshape(-1, in_channels, kh, kw)
x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, groups=batch_size, flip_weight=flip_weight)
x = x.reshape(batch_size, -1, *x.shape[2:])
if noise is not None:
x = x.add_(noise)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class FullyConnectedLayer(torch.nn.Module):
def __init__(self,
in_features, # Number of input features.
out_features, # Number of output features.
bias = True, # Apply additive bias before the activation function?
activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
lr_multiplier = 1, # Learning rate multiplier.
bias_init = 0, # Initial value for the additive bias.
):
super().__init__()
self.activation = activation
self.weight = torch.nn.Parameter(torch.randn([out_features, in_features]) / lr_multiplier)
self.bias = torch.nn.Parameter(torch.full([out_features], np.float32(bias_init))) if bias else None
self.weight_gain = lr_multiplier / np.sqrt(in_features)
self.bias_gain = lr_multiplier
def forward(self, x):
w = self.weight.to(x.dtype) * self.weight_gain
b = self.bias
if b is not None:
b = b.to(x.dtype)
if self.bias_gain != 1:
b = b * self.bias_gain
if self.activation == 'linear' and b is not None:
x = torch.addmm(b.unsqueeze(0), x, w.t())
else:
x = x.matmul(w.t())
x = bias_act.bias_act(x, b, act=self.activation)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class Conv2dLayer(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels.
out_channels, # Number of output channels.
kernel_size, # Width and height of the convolution kernel.
bias = True, # Apply additive bias before the activation function?
activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
up = 1, # Integer upsampling factor.
down = 1, # Integer downsampling factor.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output to +-X, None = disable clamping.
channels_last = False, # Expect the input to have memory_format=channels_last?
trainable = True, # Update the weights of this layer during training?
):
super().__init__()
self.activation = activation
self.up = up
self.down = down
self.conv_clamp = conv_clamp
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.padding = kernel_size // 2
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
self.act_gain = bias_act.activation_funcs[activation].def_gain
memory_format = torch.channels_last if channels_last else torch.contiguous_format
weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format)
bias = torch.zeros([out_channels]) if bias else None
if trainable:
self.weight = torch.nn.Parameter(weight)
self.bias = torch.nn.Parameter(bias) if bias is not None else None
else:
self.register_buffer('weight', weight)
if bias is not None:
self.register_buffer('bias', bias)
else:
self.bias = None
def forward(self, x, gain=1):
w = self.weight * self.weight_gain
b = self.bias.to(x.dtype) if self.bias is not None else None
flip_weight = (self.up == 1) # slightly faster
x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=self.resample_filter, up=self.up, down=self.down, padding=self.padding, flip_weight=flip_weight)
act_gain = self.act_gain * gain
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
x = bias_act.bias_act(x, b, act=self.activation, gain=act_gain, clamp=act_clamp)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class MappingNetwork(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality, 0 = no latent.
c_dim, # Conditioning label (C) dimensionality, 0 = no label.
w_dim, # Intermediate latent (W) dimensionality.
num_ws, # Number of intermediate latents to output, None = do not broadcast.
num_layers = 8, # Number of mapping layers.
embed_features = None, # Label embedding dimensionality, None = same as w_dim.
layer_features = None, # Number of intermediate features in the mapping layers, None = same as w_dim.
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
lr_multiplier = 0.01, # Learning rate multiplier for the mapping layers.
w_avg_beta = 0.995, # Decay for tracking the moving average of W during training, None = do not track.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.num_ws = num_ws
self.num_layers = num_layers
self.w_avg_beta = w_avg_beta
if embed_features is None:
embed_features = w_dim
if c_dim == 0:
embed_features = 0
if layer_features is None:
layer_features = w_dim
features_list = [z_dim + embed_features] + [layer_features] * (num_layers - 1) + [w_dim]
if c_dim > 0:
self.embed = FullyConnectedLayer(c_dim, embed_features)
for idx in range(num_layers):
in_features = features_list[idx]
out_features = features_list[idx + 1]
layer = FullyConnectedLayer(in_features, out_features, activation=activation, lr_multiplier=lr_multiplier)
setattr(self, f'fc{idx}', layer)
if num_ws is not None and w_avg_beta is not None:
self.register_buffer('w_avg', torch.zeros([w_dim]))
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, skip_w_avg_update=False):
# Embed, normalize, and concat inputs.
x = None
with torch.autograd.profiler.record_function('input'):
if self.z_dim > 0:
misc.assert_shape(z, [None, self.z_dim])
x = normalize_2nd_moment(z.to(torch.float32))
if self.c_dim > 0:
misc.assert_shape(c, [None, self.c_dim])
y = normalize_2nd_moment(self.embed(c.to(torch.float32)))
x = torch.cat([x, y], dim=1) if x is not None else y
# Main layers.
for idx in range(self.num_layers):
layer = getattr(self, f'fc{idx}')
x = layer(x)
# Update moving average of W.
if self.w_avg_beta is not None and self.training and not skip_w_avg_update:
with torch.autograd.profiler.record_function('update_w_avg'):
self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta))
# Broadcast.
if self.num_ws is not None:
with torch.autograd.profiler.record_function('broadcast'):
x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
# Apply truncation.
if truncation_psi != 1:
with torch.autograd.profiler.record_function('truncate'):
assert self.w_avg_beta is not None
if self.num_ws is None or truncation_cutoff is None:
x = self.w_avg.lerp(x, truncation_psi)
else:
x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisLayer(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels.
out_channels, # Number of output channels.
w_dim, # Intermediate latent (W) dimensionality.
resolution, # Resolution of this layer.
kernel_size = 3, # Convolution kernel size.
up = 1, # Integer upsampling factor.
use_noise = True, # Enable noise input?
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
channels_last = False, # Use channels_last format for the weights?
):
super().__init__()
self.resolution = resolution
self.up = up
self.use_noise = use_noise
self.activation = activation
self.conv_clamp = conv_clamp
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.padding = kernel_size // 2
self.act_gain = bias_act.activation_funcs[activation].def_gain
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
memory_format = torch.channels_last if channels_last else torch.contiguous_format
self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
if use_noise:
self.register_buffer('noise_const', torch.randn([resolution, resolution]))
self.noise_strength = torch.nn.Parameter(torch.zeros([]))
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
def forward(self, x, styles, noise_mode='random', fused_modconv=True, gain=1):
assert noise_mode in ['random', 'const', 'none']
in_resolution = self.resolution // self.up
misc.assert_shape(x, [None, self.weight.shape[1], in_resolution, in_resolution])
# styles = self.affine(w)
noise = None
if self.use_noise and noise_mode == 'random':
noise = torch.randn([x.shape[0], 1, self.resolution, self.resolution], device=x.device) * self.noise_strength
if self.use_noise and noise_mode == 'const':
noise = self.noise_const * self.noise_strength
flip_weight = (self.up == 1) # slightly faster
x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up,
padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, fused_modconv=fused_modconv)
act_gain = self.act_gain * gain
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
x = bias_act.bias_act(x, self.bias.to(x.dtype), act=self.activation, gain=act_gain, clamp=act_clamp)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class ToRGBLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, w_dim, kernel_size=1, conv_clamp=None, channels_last=False):
super().__init__()
self.conv_clamp = conv_clamp
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
memory_format = torch.channels_last if channels_last else torch.contiguous_format
self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
def forward(self, x, styles, fused_modconv=True):
styles = styles * self.weight_gain
x = modulated_conv2d(x=x, weight=self.weight, styles=styles, demodulate=False, fused_modconv=fused_modconv)
x = bias_act.bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisBlock(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels, 0 = first block.
out_channels, # Number of output channels.
w_dim, # Intermediate latent (W) dimensionality.
resolution, # Resolution of this block.
img_channels, # Number of output color channels.
is_last, # Is this the last block?
architecture = 'skip', # Architecture: 'orig', 'skip', 'resnet'.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
use_fp16 = False, # Use FP16 for this block?
fp16_channels_last = False, # Use channels-last memory format with FP16?
**layer_kwargs, # Arguments for SynthesisLayer.
):
assert architecture in ['orig', 'skip', 'resnet']
super().__init__()
self.in_channels = in_channels
self.w_dim = w_dim
self.resolution = resolution
self.img_channels = img_channels
self.is_last = is_last
self.architecture = architecture
self.use_fp16 = use_fp16
self.channels_last = (use_fp16 and fp16_channels_last)
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.num_conv = 0
self.num_torgb = 0
if in_channels == 0:
self.const = torch.nn.Parameter(torch.randn([out_channels, resolution, resolution]))
if in_channels != 0:
self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, up=2,
resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
self.num_conv += 1
self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution,
conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
self.num_conv += 1
if is_last or architecture == 'skip':
self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim,
conv_clamp=conv_clamp, channels_last=self.channels_last)
self.num_torgb += 1
if in_channels != 0 and architecture == 'resnet':
self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2,
resample_filter=resample_filter, channels_last=self.channels_last)
def forward(self, x, img, styles, force_fp32=False, fused_modconv=None, **layer_kwargs):
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
if fused_modconv is None:
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
fused_modconv = (not self.training) and (dtype == torch.float32 or int(x.shape[0]) == 1)
s_iter = iter(styles)
# Input.
if self.in_channels == 0:
x = self.const.to(dtype=dtype, memory_format=memory_format)
x = x.unsqueeze(0).repeat([styles[0].shape[0], 1, 1, 1])
else:
misc.assert_shape(x, [None, self.in_channels, self.resolution // 2, self.resolution // 2])
x = x.to(dtype=dtype, memory_format=memory_format)
# Main layers.
if self.in_channels == 0:
x = self.conv1(x, next(s_iter), fused_modconv=fused_modconv, **layer_kwargs)
elif self.architecture == 'resnet':
y = self.skip(x, gain=np.sqrt(0.5))
x = self.conv0(x, next(s_iter), fused_modconv=fused_modconv, **layer_kwargs)
x = self.conv1(x, next(s_iter), fused_modconv=fused_modconv, gain=np.sqrt(0.5), **layer_kwargs)
x = y.add_(x)
else:
x = self.conv0(x, next(s_iter), fused_modconv=fused_modconv, **layer_kwargs)
x = self.conv1(x, next(s_iter), fused_modconv=fused_modconv, **layer_kwargs)
# ToRGB.
if img is not None:
misc.assert_shape(img, [None, self.img_channels, self.resolution // 2, self.resolution // 2])
img = upfirdn2d.upsample2d(img, self.resample_filter)
if self.is_last or self.architecture == 'skip':
y = self.torgb(x, next(s_iter), fused_modconv=fused_modconv)
y = y.to(dtype=torch.float32, memory_format=torch.contiguous_format)
img = img.add_(y) if img is not None else y
assert x.dtype == dtype
assert img is None or img.dtype == torch.float32
return x, img
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisNetwork(torch.nn.Module):
def __init__(self,
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output image resolution.
img_channels, # Number of color channels.
channel_base = 32768, # Overall multiplier for the number of channels.
channel_max = 512, # Maximum number of channels in any layer.
num_fp16_res = 0, # Use FP16 for the N highest resolutions.
**block_kwargs, # Arguments for SynthesisBlock.
):
assert img_resolution >= 4 and img_resolution & (img_resolution - 1) == 0
super().__init__()
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.block_resolutions = [2 ** i for i in range(2, self.img_resolution_log2 + 1)]
channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
self.num_ws = 0
for res in self.block_resolutions:
in_channels = channels_dict[res // 2] if res > 4 else 0
out_channels = channels_dict[res]
use_fp16 = (res >= fp16_resolution)
is_last = (res == self.img_resolution)
block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res,
img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, **block_kwargs)
self.num_ws += block.num_conv
if is_last:
self.num_ws += block.num_torgb
setattr(self, f'b{res}', block)
# def forward(self, ws, **block_kwargs):
def forward(self, styles, **block_kwargs):
x = img = None
for res, cur_styles in zip(self.block_resolutions, styles):
block = getattr(self, f'b{res}')
x, img = block(x, img, cur_styles, **block_kwargs)
return img
#----------------------------------------------------------------------------
@persistence.persistent_class
class Generator(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality.
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output resolution.
img_channels, # Number of output color channels.
mapping_kwargs = {}, # Arguments for MappingNetwork.
synthesis_kwargs = {}, # Arguments for SynthesisNetwork.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_channels = img_channels
self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs)
self.num_ws = self.synthesis.num_ws
self.mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)
def compute_styles(self, ws):
syn = self.synthesis
misc.assert_shape(ws, [None, syn.num_ws, syn.w_dim])
ws = ws.to(torch.float32)
w_idx = 0
block_ws = []
for res in syn.block_resolutions:
block = getattr(syn, f'b{res}')
block_ws.append(ws.narrow(1, w_idx, block.num_conv + block.num_torgb))
w_idx += block.num_conv
x = img = None
styles = []
for res, cur_ws in zip(syn.block_resolutions, block_ws):
block = getattr(syn, f'b{res}')
cur_style = []
w_iter = iter(cur_ws.unbind(dim=1))
if block.in_channels == 0:
cur_style.append(block.conv1.affine(next(w_iter)))
else:
cur_style.append(block.conv0.affine(next(w_iter)))
cur_style.append(block.conv1.affine(next(w_iter)))
if block.is_last or block.architecture == 'skip':
cur_style.append(block.torgb.affine(next(w_iter)))
styles.append(cur_style)
return styles
def pack_styles(self, styles):
return torch.cat(tuple(itertools.chain(*styles)), dim=1)
def unpack_styles(self, svec):
syn = self.synthesis
offset = 0
out = []
styles = []
for res in syn.block_resolutions:
block = getattr(syn, f'b{res}')
cur_style = []
if block.in_channels == 0:
length = block.conv1.affine.weight.shape[0]
cur_style.append(svec[:, offset: offset + length])
offset += length
else:
length = block.conv0.affine.weight.shape[0]
cur_style.append(svec[:, offset: offset + length])
offset += length
length = block.conv1.affine.weight.shape[0]
cur_style.append(svec[:, offset: offset + length])
offset += length
if block.is_last or block.architecture == 'skip':
length = block.torgb.affine.weight.shape[0]
cur_style.append(svec[:, offset: offset + length])
offset += length
styles.append(cur_style)
assert offset == svec.shape[1]
return styles
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, **synthesis_kwargs):
self.ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff)
self.styles = self.compute_styles(self.ws)
img = self.synthesis(self.styles, **synthesis_kwargs)
return img
#----------------------------------------------------------------------------
@persistence.persistent_class
class DiscriminatorBlock(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels, 0 = first block.
tmp_channels, # Number of intermediate channels.
out_channels, # Number of output channels.
resolution, # Resolution of this block.
img_channels, # Number of input color channels.
first_layer_idx, # Index of the first layer.
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
use_fp16 = False, # Use FP16 for this block?
fp16_channels_last = False, # Use channels-last memory format with FP16?
freeze_layers = 0, # Freeze-D: Number of layers to freeze.
):
assert in_channels in [0, tmp_channels]
assert architecture in ['orig', 'skip', 'resnet']
super().__init__()
self.in_channels = in_channels
self.resolution = resolution
self.img_channels = img_channels
self.first_layer_idx = first_layer_idx
self.architecture = architecture
self.use_fp16 = use_fp16
self.channels_last = (use_fp16 and fp16_channels_last)
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.num_layers = 0
def trainable_gen():
while True:
layer_idx = self.first_layer_idx + self.num_layers
trainable = (layer_idx >= freeze_layers)
self.num_layers += 1
yield trainable
trainable_iter = trainable_gen()
if in_channels == 0 or architecture == 'skip':
self.fromrgb = Conv2dLayer(img_channels, tmp_channels, kernel_size=1, activation=activation,
trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
self.conv0 = Conv2dLayer(tmp_channels, tmp_channels, kernel_size=3, activation=activation,
trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
self.conv1 = Conv2dLayer(tmp_channels, out_channels, kernel_size=3, activation=activation, down=2,
trainable=next(trainable_iter), resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last)
if architecture == 'resnet':
self.skip = Conv2dLayer(tmp_channels, out_channels, kernel_size=1, bias=False, down=2,
trainable=next(trainable_iter), resample_filter=resample_filter, channels_last=self.channels_last)
def forward(self, x, img, force_fp32=False):
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
# Input.
if x is not None:
misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution])
x = x.to(dtype=dtype, memory_format=memory_format)
# FromRGB.
if self.in_channels == 0 or self.architecture == 'skip':
misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution])
img = img.to(dtype=dtype, memory_format=memory_format)
y = self.fromrgb(img)
x = x + y if x is not None else y
img = upfirdn2d.downsample2d(img, self.resample_filter) if self.architecture == 'skip' else None
# Main layers.
if self.architecture == 'resnet':
y = self.skip(x, gain=np.sqrt(0.5))
x = self.conv0(x)
x = self.conv1(x, gain=np.sqrt(0.5))
x = y.add_(x)
else:
x = self.conv0(x)
x = self.conv1(x)
assert x.dtype == dtype
return x, img
#----------------------------------------------------------------------------
@persistence.persistent_class
class MinibatchStdLayer(torch.nn.Module):
def __init__(self, group_size, num_channels=1):
super().__init__()
self.group_size = group_size
self.num_channels = num_channels
def forward(self, x):
N, C, H, W = x.shape
with misc.suppress_tracer_warnings(): # as_tensor results are registered as constants
G = torch.min(torch.as_tensor(self.group_size), torch.as_tensor(N)) if self.group_size is not None else N
F = self.num_channels
c = C // F
y = x.reshape(G, -1, F, c, H, W) # [GnFcHW] Split minibatch N into n groups of size G, and channels C into F groups of size c.
y = y - y.mean(dim=0) # [GnFcHW] Subtract mean over group.
y = y.square().mean(dim=0) # [nFcHW] Calc variance over group.
y = (y + 1e-8).sqrt() # [nFcHW] Calc stddev over group.
y = y.mean(dim=[2,3,4]) # [nF] Take average over channels and pixels.
y = y.reshape(-1, F, 1, 1) # [nF11] Add missing dimensions.
y = y.repeat(G, 1, H, W) # [NFHW] Replicate over group and pixels.
x = torch.cat([x, y], dim=1) # [NCHW] Append to input as new channels.
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class DiscriminatorEpilogue(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels.
cmap_dim, # Dimensionality of mapped conditioning label, 0 = no label.
resolution, # Resolution of this block.
img_channels, # Number of input color channels.
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, None = entire minibatch.
mbstd_num_channels = 1, # Number of features for the minibatch standard deviation layer, 0 = disable.
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
):
assert architecture in ['orig', 'skip', 'resnet']
super().__init__()
self.in_channels = in_channels
self.cmap_dim = cmap_dim
self.resolution = resolution
self.img_channels = img_channels
self.architecture = architecture
if architecture == 'skip':
self.fromrgb = Conv2dLayer(img_channels, in_channels, kernel_size=1, activation=activation)
self.mbstd = MinibatchStdLayer(group_size=mbstd_group_size, num_channels=mbstd_num_channels) if mbstd_num_channels > 0 else None
self.conv = Conv2dLayer(in_channels + mbstd_num_channels, in_channels, kernel_size=3, activation=activation, conv_clamp=conv_clamp)
self.fc = FullyConnectedLayer(in_channels * (resolution ** 2), in_channels, activation=activation)
self.out = FullyConnectedLayer(in_channels, 1 if cmap_dim == 0 else cmap_dim)
def forward(self, x, img, cmap, force_fp32=False):
misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution]) # [NCHW]
_ = force_fp32 # unused
dtype = torch.float32
memory_format = torch.contiguous_format
# FromRGB.
x = x.to(dtype=dtype, memory_format=memory_format)
if self.architecture == 'skip':
misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution])
img = img.to(dtype=dtype, memory_format=memory_format)
x = x + self.fromrgb(img)
# Main layers.
if self.mbstd is not None:
x = self.mbstd(x)
x = self.conv(x)
x = self.fc(x.flatten(1))
x = self.out(x)
# Conditioning.
if self.cmap_dim > 0:
misc.assert_shape(cmap, [None, self.cmap_dim])
x = (x * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim))
assert x.dtype == dtype
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class Discriminator(torch.nn.Module):
def __init__(self,
c_dim, # Conditioning label (C) dimensionality.
img_resolution, # Input resolution.
img_channels, # Number of input color channels.
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
channel_base = 32768, # Overall multiplier for the number of channels.
channel_max = 512, # Maximum number of channels in any layer.
num_fp16_res = 0, # Use FP16 for the N highest resolutions.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
cmap_dim = None, # Dimensionality of mapped conditioning label, None = default.
block_kwargs = {}, # Arguments for DiscriminatorBlock.
mapping_kwargs = {}, # Arguments for MappingNetwork.
epilogue_kwargs = {}, # Arguments for DiscriminatorEpilogue.
):
super().__init__()
self.c_dim = c_dim
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, 2, -1)]
channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
if cmap_dim is None:
cmap_dim = channels_dict[4]
if c_dim == 0:
cmap_dim = 0
common_kwargs = dict(img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp)
cur_layer_idx = 0
for res in self.block_resolutions:
in_channels = channels_dict[res] if res < img_resolution else 0
tmp_channels = channels_dict[res]
out_channels = channels_dict[res // 2]
use_fp16 = (res >= fp16_resolution)
block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res,
first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, **common_kwargs)
setattr(self, f'b{res}', block)
cur_layer_idx += block.num_layers
if c_dim > 0:
self.mapping = MappingNetwork(z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, **mapping_kwargs)
self.b4 = DiscriminatorEpilogue(channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, **common_kwargs)
def forward(self, img, c, **block_kwargs):
x = None
for res in self.block_resolutions:
block = getattr(self, f'b{res}')
x, img = block(x, img, **block_kwargs)
cmap = None
if self.c_dim > 0:
cmap = self.mapping(None, c)
x = self.b4(x, img, cmap)
return x
#----------------------------------------------------------------------------
| 39,286 | 49.23913 | 164 | py |
ice-ice | ice-ice/training/training_loop.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import time
import copy
import json
import pickle
import psutil
import PIL.Image
import numpy as np
import torch
import dnnlib
from torch_utils import misc
from torch_utils import training_stats
from torch_utils.ops import conv2d_gradfix
from torch_utils.ops import grid_sample_gradfix
import legacy
from metrics import metric_main
#----------------------------------------------------------------------------
def setup_snapshot_image_grid(training_set, random_seed=0):
rnd = np.random.RandomState(random_seed)
gw = np.clip(7680 // training_set.image_shape[2], 7, 32)
gh = np.clip(4320 // training_set.image_shape[1], 4, 32)
# No labels => show random subset of training samples.
if not training_set.has_labels:
all_indices = list(range(len(training_set)))
rnd.shuffle(all_indices)
grid_indices = [all_indices[i % len(all_indices)] for i in range(gw * gh)]
else:
# Group training samples by label.
label_groups = dict() # label => [idx, ...]
for idx in range(len(training_set)):
label = tuple(training_set.get_details(idx).raw_label.flat[::-1])
if label not in label_groups:
label_groups[label] = []
label_groups[label].append(idx)
# Reorder.
label_order = sorted(label_groups.keys())
for label in label_order:
rnd.shuffle(label_groups[label])
# Organize into grid.
grid_indices = []
for y in range(gh):
label = label_order[y % len(label_order)]
indices = label_groups[label]
grid_indices += [indices[x % len(indices)] for x in range(gw)]
label_groups[label] = [indices[(i + gw) % len(indices)] for i in range(len(indices))]
# Load data.
images, labels = zip(*[training_set[i] for i in grid_indices])
return (gw, gh), np.stack(images), np.stack(labels)
#----------------------------------------------------------------------------
def save_image_grid(img, fname, drange, grid_size):
lo, hi = drange
img = np.asarray(img, dtype=np.float32)
img = (img - lo) * (255 / (hi - lo))
img = np.rint(img).clip(0, 255).astype(np.uint8)
gw, gh = grid_size
_N, C, H, W = img.shape
img = img.reshape(gh, gw, C, H, W)
img = img.transpose(0, 3, 1, 4, 2)
img = img.reshape(gh * H, gw * W, C)
assert C in [1, 3]
if C == 1:
PIL.Image.fromarray(img[:, :, 0], 'L').save(fname)
if C == 3:
PIL.Image.fromarray(img, 'RGB').save(fname)
#----------------------------------------------------------------------------
def training_loop(
run_dir = '.', # Output directory.
training_set_kwargs = {}, # Options for training set.
data_loader_kwargs = {}, # Options for torch.utils.data.DataLoader.
G_kwargs = {}, # Options for generator network.
D_kwargs = {}, # Options for discriminator network.
G_opt_kwargs = {}, # Options for generator optimizer.
D_opt_kwargs = {}, # Options for discriminator optimizer.
augment_kwargs = None, # Options for augmentation pipeline. None = disable.
loss_kwargs = {}, # Options for loss function.
metrics = [], # Metrics to evaluate during training.
random_seed = 0, # Global random seed.
num_gpus = 1, # Number of GPUs participating in the training.
rank = 0, # Rank of the current process in [0, num_gpus[.
batch_size = 4, # Total batch size for one training iteration. Can be larger than batch_gpu * num_gpus.
batch_gpu = 4, # Number of samples processed at a time by one GPU.
ema_kimg = 10, # Half-life of the exponential moving average (EMA) of generator weights.
ema_rampup = None, # EMA ramp-up coefficient.
G_reg_interval = 4, # How often to perform regularization for G? None = disable lazy regularization.
D_reg_interval = 16, # How often to perform regularization for D? None = disable lazy regularization.
augment_p = 0, # Initial value of augmentation probability.
ada_target = None, # ADA target value. None = fixed p.
ada_interval = 4, # How often to perform ADA adjustment?
ada_kimg = 500, # ADA adjustment speed, measured in how many kimg it takes for p to increase/decrease by one unit.
total_kimg = 25000, # Total length of the training, measured in thousands of real images.
kimg_per_tick = 4, # Progress snapshot interval.
image_snapshot_ticks = 50, # How often to save image snapshots? None = disable.
network_snapshot_ticks = 50, # How often to save network snapshots? None = disable.
resume_pkl = None, # Network pickle to resume training from.
cudnn_benchmark = True, # Enable torch.backends.cudnn.benchmark?
allow_tf32 = False, # Enable torch.backends.cuda.matmul.allow_tf32 and torch.backends.cudnn.allow_tf32?
abort_fn = None, # Callback function for determining whether to abort training. Must return consistent results across ranks.
progress_fn = None, # Callback function for updating training progress. Called for all ranks.
):
# Initialize.
start_time = time.time()
device = torch.device('cuda', rank)
np.random.seed(random_seed * num_gpus + rank)
torch.manual_seed(random_seed * num_gpus + rank)
torch.backends.cudnn.benchmark = cudnn_benchmark # Improves training speed.
torch.backends.cuda.matmul.allow_tf32 = allow_tf32 # Allow PyTorch to internally use tf32 for matmul
torch.backends.cudnn.allow_tf32 = allow_tf32 # Allow PyTorch to internally use tf32 for convolutions
conv2d_gradfix.enabled = True # Improves training speed.
grid_sample_gradfix.enabled = True # Avoids errors with the augmentation pipe.
# Load training set.
if rank == 0:
print('Loading training set...')
training_set = dnnlib.util.construct_class_by_name(**training_set_kwargs) # subclass of training.dataset.Dataset
training_set_sampler = misc.InfiniteSampler(dataset=training_set, rank=rank, num_replicas=num_gpus, seed=random_seed)
training_set_iterator = iter(torch.utils.data.DataLoader(dataset=training_set, sampler=training_set_sampler, batch_size=batch_size//num_gpus, **data_loader_kwargs))
if rank == 0:
print()
print('Num images: ', len(training_set))
print('Image shape:', training_set.image_shape)
print('Label shape:', training_set.label_shape)
print()
# Construct networks.
if rank == 0:
print('Constructing networks...')
common_kwargs = dict(c_dim=training_set.label_dim, img_resolution=training_set.resolution, img_channels=training_set.num_channels)
G = dnnlib.util.construct_class_by_name(**G_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
D = dnnlib.util.construct_class_by_name(**D_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
G_ema = copy.deepcopy(G).eval()
# Resume from existing pickle.
if (resume_pkl is not None) and (rank == 0):
print(f'Resuming from "{resume_pkl}"')
with dnnlib.util.open_url(resume_pkl) as f:
resume_data = legacy.load_network_pkl(f)
for name, module in [('G', G), ('D', D), ('G_ema', G_ema)]:
misc.copy_params_and_buffers(resume_data[name], module, require_all=False)
# Print network summary tables.
if rank == 0:
z = torch.empty([batch_gpu, G.z_dim], device=device)
c = torch.empty([batch_gpu, G.c_dim], device=device)
img = misc.print_module_summary(G, [z, c])
misc.print_module_summary(D, [img, c])
# Setup augmentation.
if rank == 0:
print('Setting up augmentation...')
augment_pipe = None
ada_stats = None
if (augment_kwargs is not None) and (augment_p > 0 or ada_target is not None):
augment_pipe = dnnlib.util.construct_class_by_name(**augment_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
augment_pipe.p.copy_(torch.as_tensor(augment_p))
if ada_target is not None:
ada_stats = training_stats.Collector(regex='Loss/signs/real')
# Distribute across GPUs.
if rank == 0:
print(f'Distributing across {num_gpus} GPUs...')
ddp_modules = dict()
for name, module in [('G_mapping', G.mapping), ('G_synthesis', G.synthesis), ('D', D), (None, G_ema), ('augment_pipe', augment_pipe)]:
if (num_gpus > 1) and (module is not None) and len(list(module.parameters())) != 0:
module.requires_grad_(True)
module = torch.nn.parallel.DistributedDataParallel(module, device_ids=[device], broadcast_buffers=False)
module.requires_grad_(False)
if name is not None:
ddp_modules[name] = module
# Setup training phases.
if rank == 0:
print('Setting up training phases...')
loss = dnnlib.util.construct_class_by_name(device=device, **ddp_modules, **loss_kwargs) # subclass of training.loss.Loss
phases = []
for name, module, opt_kwargs, reg_interval in [('G', G, G_opt_kwargs, G_reg_interval), ('D', D, D_opt_kwargs, D_reg_interval)]:
if reg_interval is None:
opt = dnnlib.util.construct_class_by_name(params=module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer
phases += [dnnlib.EasyDict(name=name+'both', module=module, opt=opt, interval=1)]
else: # Lazy regularization.
mb_ratio = reg_interval / (reg_interval + 1)
opt_kwargs = dnnlib.EasyDict(opt_kwargs)
opt_kwargs.lr = opt_kwargs.lr * mb_ratio
opt_kwargs.betas = [beta ** mb_ratio for beta in opt_kwargs.betas]
opt = dnnlib.util.construct_class_by_name(module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer
phases += [dnnlib.EasyDict(name=name+'main', module=module, opt=opt, interval=1)]
phases += [dnnlib.EasyDict(name=name+'reg', module=module, opt=opt, interval=reg_interval)]
for phase in phases:
phase.start_event = None
phase.end_event = None
if rank == 0:
phase.start_event = torch.cuda.Event(enable_timing=True)
phase.end_event = torch.cuda.Event(enable_timing=True)
# Export sample images.
grid_size = None
grid_z = None
grid_c = None
if rank == 0:
print('Exporting sample images...')
grid_size, images, labels = setup_snapshot_image_grid(training_set=training_set)
save_image_grid(images, os.path.join(run_dir, 'reals.png'), drange=[0,255], grid_size=grid_size)
grid_z = torch.randn([labels.shape[0], G.z_dim], device=device).split(batch_gpu)
grid_c = torch.from_numpy(labels).to(device).split(batch_gpu)
images = torch.cat([G_ema(z=z, c=c, noise_mode='const').cpu() for z, c in zip(grid_z, grid_c)]).numpy()
save_image_grid(images, os.path.join(run_dir, 'fakes_init.png'), drange=[-1,1], grid_size=grid_size)
# Initialize logs.
if rank == 0:
print('Initializing logs...')
stats_collector = training_stats.Collector(regex='.*')
stats_metrics = dict()
stats_jsonl = None
stats_tfevents = None
if rank == 0:
stats_jsonl = open(os.path.join(run_dir, 'stats.jsonl'), 'wt')
try:
import torch.utils.tensorboard as tensorboard
stats_tfevents = tensorboard.SummaryWriter(run_dir)
except ImportError as err:
print('Skipping tfevents export:', err)
# Train.
if rank == 0:
print(f'Training for {total_kimg} kimg...')
print()
cur_nimg = 0
cur_tick = 0
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = tick_start_time - start_time
batch_idx = 0
if progress_fn is not None:
progress_fn(0, total_kimg)
while True:
# Fetch training data.
with torch.autograd.profiler.record_function('data_fetch'):
phase_real_img, phase_real_c = next(training_set_iterator)
phase_real_img = (phase_real_img.to(device).to(torch.float32) / 127.5 - 1).split(batch_gpu)
phase_real_c = phase_real_c.to(device).split(batch_gpu)
all_gen_z = torch.randn([len(phases) * batch_size, G.z_dim], device=device)
all_gen_z = [phase_gen_z.split(batch_gpu) for phase_gen_z in all_gen_z.split(batch_size)]
all_gen_c = [training_set.get_label(np.random.randint(len(training_set))) for _ in range(len(phases) * batch_size)]
all_gen_c = torch.from_numpy(np.stack(all_gen_c)).pin_memory().to(device)
all_gen_c = [phase_gen_c.split(batch_gpu) for phase_gen_c in all_gen_c.split(batch_size)]
# Execute training phases.
for phase, phase_gen_z, phase_gen_c in zip(phases, all_gen_z, all_gen_c):
if batch_idx % phase.interval != 0:
continue
# Initialize gradient accumulation.
if phase.start_event is not None:
phase.start_event.record(torch.cuda.current_stream(device))
phase.opt.zero_grad(set_to_none=True)
phase.module.requires_grad_(True)
# Accumulate gradients over multiple rounds.
for round_idx, (real_img, real_c, gen_z, gen_c) in enumerate(zip(phase_real_img, phase_real_c, phase_gen_z, phase_gen_c)):
sync = (round_idx == batch_size // (batch_gpu * num_gpus) - 1)
gain = phase.interval
loss.accumulate_gradients(phase=phase.name, real_img=real_img, real_c=real_c, gen_z=gen_z, gen_c=gen_c, sync=sync, gain=gain)
# Update weights.
phase.module.requires_grad_(False)
with torch.autograd.profiler.record_function(phase.name + '_opt'):
for param in phase.module.parameters():
if param.grad is not None:
misc.nan_to_num(param.grad, nan=0, posinf=1e5, neginf=-1e5, out=param.grad)
phase.opt.step()
if phase.end_event is not None:
phase.end_event.record(torch.cuda.current_stream(device))
# Update G_ema.
with torch.autograd.profiler.record_function('Gema'):
ema_nimg = ema_kimg * 1000
if ema_rampup is not None:
ema_nimg = min(ema_nimg, cur_nimg * ema_rampup)
ema_beta = 0.5 ** (batch_size / max(ema_nimg, 1e-8))
for p_ema, p in zip(G_ema.parameters(), G.parameters()):
p_ema.copy_(p.lerp(p_ema, ema_beta))
for b_ema, b in zip(G_ema.buffers(), G.buffers()):
b_ema.copy_(b)
# Update state.
cur_nimg += batch_size
batch_idx += 1
# Execute ADA heuristic.
if (ada_stats is not None) and (batch_idx % ada_interval == 0):
ada_stats.update()
adjust = np.sign(ada_stats['Loss/signs/real'] - ada_target) * (batch_size * ada_interval) / (ada_kimg * 1000)
augment_pipe.p.copy_((augment_pipe.p + adjust).max(misc.constant(0, device=device)))
# Perform maintenance tasks once per tick.
done = (cur_nimg >= total_kimg * 1000)
if (not done) and (cur_tick != 0) and (cur_nimg < tick_start_nimg + kimg_per_tick * 1000):
continue
# Print status line, accumulating the same information in stats_collector.
tick_end_time = time.time()
fields = []
fields += [f"tick {training_stats.report0('Progress/tick', cur_tick):<5d}"]
fields += [f"kimg {training_stats.report0('Progress/kimg', cur_nimg / 1e3):<8.1f}"]
fields += [f"time {dnnlib.util.format_time(training_stats.report0('Timing/total_sec', tick_end_time - start_time)):<12s}"]
fields += [f"sec/tick {training_stats.report0('Timing/sec_per_tick', tick_end_time - tick_start_time):<7.1f}"]
fields += [f"sec/kimg {training_stats.report0('Timing/sec_per_kimg', (tick_end_time - tick_start_time) / (cur_nimg - tick_start_nimg) * 1e3):<7.2f}"]
fields += [f"maintenance {training_stats.report0('Timing/maintenance_sec', maintenance_time):<6.1f}"]
fields += [f"cpumem {training_stats.report0('Resources/cpu_mem_gb', psutil.Process(os.getpid()).memory_info().rss / 2**30):<6.2f}"]
fields += [f"gpumem {training_stats.report0('Resources/peak_gpu_mem_gb', torch.cuda.max_memory_allocated(device) / 2**30):<6.2f}"]
torch.cuda.reset_peak_memory_stats()
fields += [f"augment {training_stats.report0('Progress/augment', float(augment_pipe.p.cpu()) if augment_pipe is not None else 0):.3f}"]
training_stats.report0('Timing/total_hours', (tick_end_time - start_time) / (60 * 60))
training_stats.report0('Timing/total_days', (tick_end_time - start_time) / (24 * 60 * 60))
if rank == 0:
print(' '.join(fields))
# Check for abort.
if (not done) and (abort_fn is not None) and abort_fn():
done = True
if rank == 0:
print()
print('Aborting...')
# Save image snapshot.
if (rank == 0) and (image_snapshot_ticks is not None) and (done or cur_tick % image_snapshot_ticks == 0):
images = torch.cat([G_ema(z=z, c=c, noise_mode='const').cpu() for z, c in zip(grid_z, grid_c)]).numpy()
save_image_grid(images, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}.png'), drange=[-1,1], grid_size=grid_size)
# Save network snapshot.
snapshot_pkl = None
snapshot_data = None
if (network_snapshot_ticks is not None) and (done or cur_tick % network_snapshot_ticks == 0):
snapshot_data = dict(training_set_kwargs=dict(training_set_kwargs))
for name, module in [('G', G), ('D', D), ('G_ema', G_ema), ('augment_pipe', augment_pipe)]:
if module is not None:
if num_gpus > 1:
misc.check_ddp_consistency(module, ignore_regex=r'.*\.w_avg')
module = copy.deepcopy(module).eval().requires_grad_(False).cpu()
snapshot_data[name] = module
del module # conserve memory
snapshot_pkl = os.path.join(run_dir, f'network-snapshot-{cur_nimg//1000:06d}.pkl')
if rank == 0:
with open(snapshot_pkl, 'wb') as f:
pickle.dump(snapshot_data, f)
# Evaluate metrics.
if (snapshot_data is not None) and (len(metrics) > 0):
if rank == 0:
print('Evaluating metrics...')
for metric in metrics:
result_dict = metric_main.calc_metric(metric=metric, G=snapshot_data['G_ema'],
dataset_kwargs=training_set_kwargs, num_gpus=num_gpus, rank=rank, device=device)
if rank == 0:
metric_main.report_metric(result_dict, run_dir=run_dir, snapshot_pkl=snapshot_pkl)
stats_metrics.update(result_dict.results)
del snapshot_data # conserve memory
# Collect statistics.
for phase in phases:
value = []
if (phase.start_event is not None) and (phase.end_event is not None):
phase.end_event.synchronize()
value = phase.start_event.elapsed_time(phase.end_event)
training_stats.report0('Timing/' + phase.name, value)
stats_collector.update()
stats_dict = stats_collector.as_dict()
# Update logs.
timestamp = time.time()
if stats_jsonl is not None:
fields = dict(stats_dict, timestamp=timestamp)
stats_jsonl.write(json.dumps(fields) + '\n')
stats_jsonl.flush()
if stats_tfevents is not None:
global_step = int(cur_nimg / 1e3)
walltime = timestamp - start_time
for name, value in stats_dict.items():
stats_tfevents.add_scalar(name, value.mean, global_step=global_step, walltime=walltime)
for name, value in stats_metrics.items():
stats_tfevents.add_scalar(f'Metrics/{name}', value, global_step=global_step, walltime=walltime)
stats_tfevents.flush()
if progress_fn is not None:
progress_fn(cur_nimg // 1000, total_kimg)
# Update state.
cur_tick += 1
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = tick_start_time - tick_end_time
if done:
break
# Done.
if rank == 0:
print()
print('Exiting...')
#----------------------------------------------------------------------------
| 21,596 | 50.177725 | 168 | py |
ice-ice | ice-ice/training/networks_old.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import numpy as np
import torch
from torch_utils import misc
from torch_utils import persistence
from torch_utils.ops import conv2d_resample
from torch_utils.ops import upfirdn2d
from torch_utils.ops import bias_act
from torch_utils.ops import fma
#----------------------------------------------------------------------------
@misc.profiled_function
def normalize_2nd_moment(x, dim=1, eps=1e-8):
return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt()
#----------------------------------------------------------------------------
@misc.profiled_function
def modulated_conv2d(
x, # Input tensor of shape [batch_size, in_channels, in_height, in_width].
weight, # Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width].
styles, # Modulation coefficients of shape [batch_size, in_channels].
noise = None, # Optional noise tensor to add to the output activations.
up = 1, # Integer upsampling factor.
down = 1, # Integer downsampling factor.
padding = 0, # Padding with respect to the upsampled image.
resample_filter = None, # Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter().
demodulate = True, # Apply weight demodulation?
flip_weight = True, # False = convolution, True = correlation (matches torch.nn.functional.conv2d).
fused_modconv = True, # Perform modulation, convolution, and demodulation as a single fused operation?
):
batch_size = x.shape[0]
out_channels, in_channels, kh, kw = weight.shape
misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk]
misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
misc.assert_shape(styles, [batch_size, in_channels]) # [NI]
# Pre-normalize inputs to avoid FP16 overflow.
if x.dtype == torch.float16 and demodulate:
weight = weight * (1 / np.sqrt(in_channels * kh * kw) / weight.norm(float('inf'), dim=[1,2,3], keepdim=True)) # max_Ikk
styles = styles / styles.norm(float('inf'), dim=1, keepdim=True) # max_I
# Calculate per-sample weights and demodulation coefficients.
w = None
dcoefs = None
if demodulate or fused_modconv:
w = weight.unsqueeze(0) # [NOIkk]
w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk]
if demodulate:
dcoefs = (w.square().sum(dim=[2,3,4]) + 1e-8).rsqrt() # [NO]
if demodulate and fused_modconv:
w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk]
# Execute by scaling the activations before and after the convolution.
if not fused_modconv:
x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1)
x = conv2d_resample.conv2d_resample(x=x, w=weight.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, flip_weight=flip_weight)
if demodulate and noise is not None:
x = fma.fma(x, dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1), noise.to(x.dtype))
elif demodulate:
x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1)
elif noise is not None:
x = x.add_(noise.to(x.dtype))
return x
# Execute as one fused op using grouped convolution.
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
batch_size = int(batch_size)
misc.assert_shape(x, [batch_size, in_channels, None, None])
x = x.reshape(1, -1, *x.shape[2:])
w = w.reshape(-1, in_channels, kh, kw)
x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, groups=batch_size, flip_weight=flip_weight)
x = x.reshape(batch_size, -1, *x.shape[2:])
if noise is not None:
x = x.add_(noise)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class FullyConnectedLayer(torch.nn.Module):
def __init__(self,
in_features, # Number of input features.
out_features, # Number of output features.
bias = True, # Apply additive bias before the activation function?
activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
lr_multiplier = 1, # Learning rate multiplier.
bias_init = 0, # Initial value for the additive bias.
):
super().__init__()
self.activation = activation
self.weight = torch.nn.Parameter(torch.randn([out_features, in_features]) / lr_multiplier)
self.bias = torch.nn.Parameter(torch.full([out_features], np.float32(bias_init))) if bias else None
self.weight_gain = lr_multiplier / np.sqrt(in_features)
self.bias_gain = lr_multiplier
def forward(self, x):
w = self.weight.to(x.dtype) * self.weight_gain
b = self.bias
if b is not None:
b = b.to(x.dtype)
if self.bias_gain != 1:
b = b * self.bias_gain
if self.activation == 'linear' and b is not None:
x = torch.addmm(b.unsqueeze(0), x, w.t())
else:
x = x.matmul(w.t())
x = bias_act.bias_act(x, b, act=self.activation)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class Conv2dLayer(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels.
out_channels, # Number of output channels.
kernel_size, # Width and height of the convolution kernel.
bias = True, # Apply additive bias before the activation function?
activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
up = 1, # Integer upsampling factor.
down = 1, # Integer downsampling factor.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output to +-X, None = disable clamping.
channels_last = False, # Expect the input to have memory_format=channels_last?
trainable = True, # Update the weights of this layer during training?
):
super().__init__()
self.activation = activation
self.up = up
self.down = down
self.conv_clamp = conv_clamp
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.padding = kernel_size // 2
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
self.act_gain = bias_act.activation_funcs[activation].def_gain
memory_format = torch.channels_last if channels_last else torch.contiguous_format
weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format)
bias = torch.zeros([out_channels]) if bias else None
if trainable:
self.weight = torch.nn.Parameter(weight)
self.bias = torch.nn.Parameter(bias) if bias is not None else None
else:
self.register_buffer('weight', weight)
if bias is not None:
self.register_buffer('bias', bias)
else:
self.bias = None
def forward(self, x, gain=1):
w = self.weight * self.weight_gain
b = self.bias.to(x.dtype) if self.bias is not None else None
flip_weight = (self.up == 1) # slightly faster
x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=self.resample_filter, up=self.up, down=self.down, padding=self.padding, flip_weight=flip_weight)
act_gain = self.act_gain * gain
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
x = bias_act.bias_act(x, b, act=self.activation, gain=act_gain, clamp=act_clamp)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class MappingNetwork(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality, 0 = no latent.
c_dim, # Conditioning label (C) dimensionality, 0 = no label.
w_dim, # Intermediate latent (W) dimensionality.
num_ws, # Number of intermediate latents to output, None = do not broadcast.
num_layers = 8, # Number of mapping layers.
embed_features = None, # Label embedding dimensionality, None = same as w_dim.
layer_features = None, # Number of intermediate features in the mapping layers, None = same as w_dim.
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
lr_multiplier = 0.01, # Learning rate multiplier for the mapping layers.
w_avg_beta = 0.995, # Decay for tracking the moving average of W during training, None = do not track.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.num_ws = num_ws
self.num_layers = num_layers
self.w_avg_beta = w_avg_beta
if embed_features is None:
embed_features = w_dim
if c_dim == 0:
embed_features = 0
if layer_features is None:
layer_features = w_dim
features_list = [z_dim + embed_features] + [layer_features] * (num_layers - 1) + [w_dim]
if c_dim > 0:
self.embed = FullyConnectedLayer(c_dim, embed_features)
for idx in range(num_layers):
in_features = features_list[idx]
out_features = features_list[idx + 1]
layer = FullyConnectedLayer(in_features, out_features, activation=activation, lr_multiplier=lr_multiplier)
setattr(self, f'fc{idx}', layer)
if num_ws is not None and w_avg_beta is not None:
self.register_buffer('w_avg', torch.zeros([w_dim]))
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, skip_w_avg_update=False):
# Embed, normalize, and concat inputs.
x = None
with torch.autograd.profiler.record_function('input'):
if self.z_dim > 0:
misc.assert_shape(z, [None, self.z_dim])
x = normalize_2nd_moment(z.to(torch.float32))
if self.c_dim > 0:
misc.assert_shape(c, [None, self.c_dim])
y = normalize_2nd_moment(self.embed(c.to(torch.float32)))
x = torch.cat([x, y], dim=1) if x is not None else y
# Main layers.
for idx in range(self.num_layers):
layer = getattr(self, f'fc{idx}')
x = layer(x)
# Update moving average of W.
if self.w_avg_beta is not None and self.training and not skip_w_avg_update:
with torch.autograd.profiler.record_function('update_w_avg'):
self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta))
# Broadcast.
if self.num_ws is not None:
with torch.autograd.profiler.record_function('broadcast'):
x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
# Apply truncation.
if truncation_psi != 1:
with torch.autograd.profiler.record_function('truncate'):
assert self.w_avg_beta is not None
if self.num_ws is None or truncation_cutoff is None:
x = self.w_avg.lerp(x, truncation_psi)
else:
x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisLayer(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels.
out_channels, # Number of output channels.
w_dim, # Intermediate latent (W) dimensionality.
resolution, # Resolution of this layer.
kernel_size = 3, # Convolution kernel size.
up = 1, # Integer upsampling factor.
use_noise = True, # Enable noise input?
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
channels_last = False, # Use channels_last format for the weights?
):
super().__init__()
self.resolution = resolution
self.up = up
self.use_noise = use_noise
self.activation = activation
self.conv_clamp = conv_clamp
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.padding = kernel_size // 2
self.act_gain = bias_act.activation_funcs[activation].def_gain
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
memory_format = torch.channels_last if channels_last else torch.contiguous_format
self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
if use_noise:
self.register_buffer('noise_const', torch.randn([resolution, resolution]))
self.noise_strength = torch.nn.Parameter(torch.zeros([]))
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
def forward(self, x, w, noise_mode='random', fused_modconv=True, gain=1):
assert noise_mode in ['random', 'const', 'none']
in_resolution = self.resolution // self.up
misc.assert_shape(x, [None, self.weight.shape[1], in_resolution, in_resolution])
styles = self.affine(w)
noise = None
if self.use_noise and noise_mode == 'random':
noise = torch.randn([x.shape[0], 1, self.resolution, self.resolution], device=x.device) * self.noise_strength
if self.use_noise and noise_mode == 'const':
noise = self.noise_const * self.noise_strength
flip_weight = (self.up == 1) # slightly faster
x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up,
padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, fused_modconv=fused_modconv)
act_gain = self.act_gain * gain
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
x = bias_act.bias_act(x, self.bias.to(x.dtype), act=self.activation, gain=act_gain, clamp=act_clamp)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class ToRGBLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, w_dim, kernel_size=1, conv_clamp=None, channels_last=False):
super().__init__()
self.conv_clamp = conv_clamp
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
memory_format = torch.channels_last if channels_last else torch.contiguous_format
self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
def forward(self, x, w, fused_modconv=True):
styles = self.affine(w) * self.weight_gain
x = modulated_conv2d(x=x, weight=self.weight, styles=styles, demodulate=False, fused_modconv=fused_modconv)
x = bias_act.bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisBlock(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels, 0 = first block.
out_channels, # Number of output channels.
w_dim, # Intermediate latent (W) dimensionality.
resolution, # Resolution of this block.
img_channels, # Number of output color channels.
is_last, # Is this the last block?
architecture = 'skip', # Architecture: 'orig', 'skip', 'resnet'.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
use_fp16 = False, # Use FP16 for this block?
fp16_channels_last = False, # Use channels-last memory format with FP16?
**layer_kwargs, # Arguments for SynthesisLayer.
):
assert architecture in ['orig', 'skip', 'resnet']
super().__init__()
self.in_channels = in_channels
self.w_dim = w_dim
self.resolution = resolution
self.img_channels = img_channels
self.is_last = is_last
self.architecture = architecture
self.use_fp16 = use_fp16
self.channels_last = (use_fp16 and fp16_channels_last)
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.num_conv = 0
self.num_torgb = 0
if in_channels == 0:
self.const = torch.nn.Parameter(torch.randn([out_channels, resolution, resolution]))
if in_channels != 0:
self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, up=2,
resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
self.num_conv += 1
self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution,
conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
self.num_conv += 1
if is_last or architecture == 'skip':
self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim,
conv_clamp=conv_clamp, channels_last=self.channels_last)
self.num_torgb += 1
if in_channels != 0 and architecture == 'resnet':
self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2,
resample_filter=resample_filter, channels_last=self.channels_last)
def forward(self, x, img, ws, force_fp32=False, fused_modconv=None, **layer_kwargs):
misc.assert_shape(ws, [None, self.num_conv + self.num_torgb, self.w_dim])
w_iter = iter(ws.unbind(dim=1))
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
if fused_modconv is None:
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
fused_modconv = (not self.training) and (dtype == torch.float32 or int(x.shape[0]) == 1)
# Input.
if self.in_channels == 0:
x = self.const.to(dtype=dtype, memory_format=memory_format)
x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1, 1])
else:
misc.assert_shape(x, [None, self.in_channels, self.resolution // 2, self.resolution // 2])
x = x.to(dtype=dtype, memory_format=memory_format)
# Main layers.
if self.in_channels == 0:
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
elif self.architecture == 'resnet':
y = self.skip(x, gain=np.sqrt(0.5))
x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, gain=np.sqrt(0.5), **layer_kwargs)
x = y.add_(x)
else:
x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
# ToRGB.
if img is not None:
misc.assert_shape(img, [None, self.img_channels, self.resolution // 2, self.resolution // 2])
img = upfirdn2d.upsample2d(img, self.resample_filter)
if self.is_last or self.architecture == 'skip':
y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv)
y = y.to(dtype=torch.float32, memory_format=torch.contiguous_format)
img = img.add_(y) if img is not None else y
assert x.dtype == dtype
assert img is None or img.dtype == torch.float32
return x, img
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisNetwork(torch.nn.Module):
def __init__(self,
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output image resolution.
img_channels, # Number of color channels.
channel_base = 32768, # Overall multiplier for the number of channels.
channel_max = 512, # Maximum number of channels in any layer.
num_fp16_res = 0, # Use FP16 for the N highest resolutions.
**block_kwargs, # Arguments for SynthesisBlock.
):
assert img_resolution >= 4 and img_resolution & (img_resolution - 1) == 0
super().__init__()
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.block_resolutions = [2 ** i for i in range(2, self.img_resolution_log2 + 1)]
channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
self.num_ws = 0
for res in self.block_resolutions:
in_channels = channels_dict[res // 2] if res > 4 else 0
out_channels = channels_dict[res]
use_fp16 = (res >= fp16_resolution)
is_last = (res == self.img_resolution)
block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res,
img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, **block_kwargs)
self.num_ws += block.num_conv
if is_last:
self.num_ws += block.num_torgb
setattr(self, f'b{res}', block)
def forward(self, ws, **block_kwargs):
block_ws = []
with torch.autograd.profiler.record_function('split_ws'):
misc.assert_shape(ws, [None, self.num_ws, self.w_dim])
ws = ws.to(torch.float32)
w_idx = 0
for res in self.block_resolutions:
block = getattr(self, f'b{res}')
block_ws.append(ws.narrow(1, w_idx, block.num_conv + block.num_torgb))
w_idx += block.num_conv
x = img = None
for res, cur_ws in zip(self.block_resolutions, block_ws):
block = getattr(self, f'b{res}')
x, img = block(x, img, cur_ws, **block_kwargs)
return img
#----------------------------------------------------------------------------
@persistence.persistent_class
class Generator(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality.
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output resolution.
img_channels, # Number of output color channels.
mapping_kwargs = {}, # Arguments for MappingNetwork.
synthesis_kwargs = {}, # Arguments for SynthesisNetwork.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_channels = img_channels
self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs)
self.num_ws = self.synthesis.num_ws
self.mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, **synthesis_kwargs):
ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff)
img = self.synthesis(ws, **synthesis_kwargs)
return img
#----------------------------------------------------------------------------
@persistence.persistent_class
class DiscriminatorBlock(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels, 0 = first block.
tmp_channels, # Number of intermediate channels.
out_channels, # Number of output channels.
resolution, # Resolution of this block.
img_channels, # Number of input color channels.
first_layer_idx, # Index of the first layer.
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
use_fp16 = False, # Use FP16 for this block?
fp16_channels_last = False, # Use channels-last memory format with FP16?
freeze_layers = 0, # Freeze-D: Number of layers to freeze.
):
assert in_channels in [0, tmp_channels]
assert architecture in ['orig', 'skip', 'resnet']
super().__init__()
self.in_channels = in_channels
self.resolution = resolution
self.img_channels = img_channels
self.first_layer_idx = first_layer_idx
self.architecture = architecture
self.use_fp16 = use_fp16
self.channels_last = (use_fp16 and fp16_channels_last)
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.num_layers = 0
def trainable_gen():
while True:
layer_idx = self.first_layer_idx + self.num_layers
trainable = (layer_idx >= freeze_layers)
self.num_layers += 1
yield trainable
trainable_iter = trainable_gen()
if in_channels == 0 or architecture == 'skip':
self.fromrgb = Conv2dLayer(img_channels, tmp_channels, kernel_size=1, activation=activation,
trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
self.conv0 = Conv2dLayer(tmp_channels, tmp_channels, kernel_size=3, activation=activation,
trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
self.conv1 = Conv2dLayer(tmp_channels, out_channels, kernel_size=3, activation=activation, down=2,
trainable=next(trainable_iter), resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last)
if architecture == 'resnet':
self.skip = Conv2dLayer(tmp_channels, out_channels, kernel_size=1, bias=False, down=2,
trainable=next(trainable_iter), resample_filter=resample_filter, channels_last=self.channels_last)
def forward(self, x, img, force_fp32=False):
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
# Input.
if x is not None:
misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution])
x = x.to(dtype=dtype, memory_format=memory_format)
# FromRGB.
if self.in_channels == 0 or self.architecture == 'skip':
misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution])
img = img.to(dtype=dtype, memory_format=memory_format)
y = self.fromrgb(img)
x = x + y if x is not None else y
img = upfirdn2d.downsample2d(img, self.resample_filter) if self.architecture == 'skip' else None
# Main layers.
if self.architecture == 'resnet':
y = self.skip(x, gain=np.sqrt(0.5))
x = self.conv0(x)
x = self.conv1(x, gain=np.sqrt(0.5))
x = y.add_(x)
else:
x = self.conv0(x)
x = self.conv1(x)
assert x.dtype == dtype
return x, img
#----------------------------------------------------------------------------
@persistence.persistent_class
class MinibatchStdLayer(torch.nn.Module):
def __init__(self, group_size, num_channels=1):
super().__init__()
self.group_size = group_size
self.num_channels = num_channels
def forward(self, x):
N, C, H, W = x.shape
with misc.suppress_tracer_warnings(): # as_tensor results are registered as constants
G = torch.min(torch.as_tensor(self.group_size), torch.as_tensor(N)) if self.group_size is not None else N
F = self.num_channels
c = C // F
y = x.reshape(G, -1, F, c, H, W) # [GnFcHW] Split minibatch N into n groups of size G, and channels C into F groups of size c.
y = y - y.mean(dim=0) # [GnFcHW] Subtract mean over group.
y = y.square().mean(dim=0) # [nFcHW] Calc variance over group.
y = (y + 1e-8).sqrt() # [nFcHW] Calc stddev over group.
y = y.mean(dim=[2,3,4]) # [nF] Take average over channels and pixels.
y = y.reshape(-1, F, 1, 1) # [nF11] Add missing dimensions.
y = y.repeat(G, 1, H, W) # [NFHW] Replicate over group and pixels.
x = torch.cat([x, y], dim=1) # [NCHW] Append to input as new channels.
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class DiscriminatorEpilogue(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels.
cmap_dim, # Dimensionality of mapped conditioning label, 0 = no label.
resolution, # Resolution of this block.
img_channels, # Number of input color channels.
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, None = entire minibatch.
mbstd_num_channels = 1, # Number of features for the minibatch standard deviation layer, 0 = disable.
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
):
assert architecture in ['orig', 'skip', 'resnet']
super().__init__()
self.in_channels = in_channels
self.cmap_dim = cmap_dim
self.resolution = resolution
self.img_channels = img_channels
self.architecture = architecture
if architecture == 'skip':
self.fromrgb = Conv2dLayer(img_channels, in_channels, kernel_size=1, activation=activation)
self.mbstd = MinibatchStdLayer(group_size=mbstd_group_size, num_channels=mbstd_num_channels) if mbstd_num_channels > 0 else None
self.conv = Conv2dLayer(in_channels + mbstd_num_channels, in_channels, kernel_size=3, activation=activation, conv_clamp=conv_clamp)
self.fc = FullyConnectedLayer(in_channels * (resolution ** 2), in_channels, activation=activation)
self.out = FullyConnectedLayer(in_channels, 1 if cmap_dim == 0 else cmap_dim)
def forward(self, x, img, cmap, force_fp32=False):
misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution]) # [NCHW]
_ = force_fp32 # unused
dtype = torch.float32
memory_format = torch.contiguous_format
# FromRGB.
x = x.to(dtype=dtype, memory_format=memory_format)
if self.architecture == 'skip':
misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution])
img = img.to(dtype=dtype, memory_format=memory_format)
x = x + self.fromrgb(img)
# Main layers.
if self.mbstd is not None:
x = self.mbstd(x)
x = self.conv(x)
x = self.fc(x.flatten(1))
x = self.out(x)
# Conditioning.
if self.cmap_dim > 0:
misc.assert_shape(cmap, [None, self.cmap_dim])
x = (x * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim))
assert x.dtype == dtype
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class Discriminator(torch.nn.Module):
def __init__(self,
c_dim, # Conditioning label (C) dimensionality.
img_resolution, # Input resolution.
img_channels, # Number of input color channels.
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
channel_base = 32768, # Overall multiplier for the number of channels.
channel_max = 512, # Maximum number of channels in any layer.
num_fp16_res = 0, # Use FP16 for the N highest resolutions.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
cmap_dim = None, # Dimensionality of mapped conditioning label, None = default.
block_kwargs = {}, # Arguments for DiscriminatorBlock.
mapping_kwargs = {}, # Arguments for MappingNetwork.
epilogue_kwargs = {}, # Arguments for DiscriminatorEpilogue.
):
super().__init__()
self.c_dim = c_dim
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, 2, -1)]
channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
if cmap_dim is None:
cmap_dim = channels_dict[4]
if c_dim == 0:
cmap_dim = 0
common_kwargs = dict(img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp)
cur_layer_idx = 0
for res in self.block_resolutions:
in_channels = channels_dict[res] if res < img_resolution else 0
tmp_channels = channels_dict[res]
out_channels = channels_dict[res // 2]
use_fp16 = (res >= fp16_resolution)
block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res,
first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, **common_kwargs)
setattr(self, f'b{res}', block)
cur_layer_idx += block.num_layers
if c_dim > 0:
self.mapping = MappingNetwork(z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, **mapping_kwargs)
self.b4 = DiscriminatorEpilogue(channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, **common_kwargs)
def forward(self, img, c, **block_kwargs):
x = None
for res in self.block_resolutions:
block = getattr(self, f'b{res}')
x, img = block(x, img, **block_kwargs)
cmap = None
if self.c_dim > 0:
cmap = self.mapping(None, c)
x = self.b4(x, img, cmap)
return x
#----------------------------------------------------------------------------
| 37,392 | 50.223288 | 164 | py |
ice-ice | ice-ice/torch_utils/custom_ops.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import glob
import torch
import torch.utils.cpp_extension
import importlib
import hashlib
import shutil
from pathlib import Path
from torch.utils.file_baton import FileBaton
#----------------------------------------------------------------------------
# Global options.
verbosity = 'brief' # Verbosity level: 'none', 'brief', 'full'
#----------------------------------------------------------------------------
# Internal helper funcs.
def _find_compiler_bindir():
patterns = [
'C:/Program Files (x86)/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio */vc/bin',
]
for pattern in patterns:
matches = sorted(glob.glob(pattern))
if len(matches):
return matches[-1]
return None
#----------------------------------------------------------------------------
# Main entry point for compiling and loading C++/CUDA plugins.
_cached_plugins = dict()
def get_plugin(module_name, sources, **build_kwargs):
assert verbosity in ['none', 'brief', 'full']
# Already cached?
if module_name in _cached_plugins:
return _cached_plugins[module_name]
# Print status.
if verbosity == 'full':
print(f'Setting up PyTorch plugin "{module_name}"...')
elif verbosity == 'brief':
print(f'Setting up PyTorch plugin "{module_name}"... ', end='', flush=True)
try: # pylint: disable=too-many-nested-blocks
# Make sure we can find the necessary compiler binaries.
if os.name == 'nt' and os.system("where cl.exe >nul 2>nul") != 0:
compiler_bindir = _find_compiler_bindir()
if compiler_bindir is None:
raise RuntimeError(f'Could not find MSVC/GCC/CLANG installation on this computer. Check _find_compiler_bindir() in "{__file__}".')
os.environ['PATH'] += ';' + compiler_bindir
# Compile and load.
verbose_build = (verbosity == 'full')
# Incremental build md5sum trickery. Copies all the input source files
# into a cached build directory under a combined md5 digest of the input
# source files. Copying is done only if the combined digest has changed.
# This keeps input file timestamps and filenames the same as in previous
# extension builds, allowing for fast incremental rebuilds.
#
# This optimization is done only in case all the source files reside in
# a single directory (just for simplicity) and if the TORCH_EXTENSIONS_DIR
# environment variable is set (we take this as a signal that the user
# actually cares about this.)
source_dirs_set = set(os.path.dirname(source) for source in sources)
if len(source_dirs_set) == 1 and ('TORCH_EXTENSIONS_DIR' in os.environ):
all_source_files = sorted(list(x for x in Path(list(source_dirs_set)[0]).iterdir() if x.is_file()))
# Compute a combined hash digest for all source files in the same
# custom op directory (usually .cu, .cpp, .py and .h files).
hash_md5 = hashlib.md5()
for src in all_source_files:
with open(src, 'rb') as f:
hash_md5.update(f.read())
build_dir = torch.utils.cpp_extension._get_build_directory(module_name, verbose=verbose_build) # pylint: disable=protected-access
digest_build_dir = os.path.join(build_dir, hash_md5.hexdigest())
if not os.path.isdir(digest_build_dir):
os.makedirs(digest_build_dir, exist_ok=True)
baton = FileBaton(os.path.join(digest_build_dir, 'lock'))
if baton.try_acquire():
try:
for src in all_source_files:
shutil.copyfile(src, os.path.join(digest_build_dir, os.path.basename(src)))
finally:
baton.release()
else:
# Someone else is copying source files under the digest dir,
# wait until done and continue.
baton.wait()
digest_sources = [os.path.join(digest_build_dir, os.path.basename(x)) for x in sources]
torch.utils.cpp_extension.load(name=module_name, build_directory=build_dir,
verbose=verbose_build, sources=digest_sources, **build_kwargs)
else:
torch.utils.cpp_extension.load(name=module_name, verbose=verbose_build, sources=sources, **build_kwargs)
module = importlib.import_module(module_name)
except:
if verbosity == 'brief':
print('Failed!')
raise
# Print status and add to cache.
if verbosity == 'full':
print(f'Done setting up PyTorch plugin "{module_name}".')
elif verbosity == 'brief':
print('Done.')
_cached_plugins[module_name] = module
return module
#----------------------------------------------------------------------------
| 5,644 | 43.448819 | 146 | py |
ice-ice | ice-ice/torch_utils/training_stats.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Facilities for reporting and collecting training statistics across
multiple processes and devices. The interface is designed to minimize
synchronization overhead as well as the amount of boilerplate in user
code."""
import re
import numpy as np
import torch
import dnnlib
from . import misc
#----------------------------------------------------------------------------
_num_moments = 3 # [num_scalars, sum_of_scalars, sum_of_squares]
_reduce_dtype = torch.float32 # Data type to use for initial per-tensor reduction.
_counter_dtype = torch.float64 # Data type to use for the internal counters.
_rank = 0 # Rank of the current process.
_sync_device = None # Device to use for multiprocess communication. None = single-process.
_sync_called = False # Has _sync() been called yet?
_counters = dict() # Running counters on each device, updated by report(): name => device => torch.Tensor
_cumulative = dict() # Cumulative counters on the CPU, updated by _sync(): name => torch.Tensor
#----------------------------------------------------------------------------
def init_multiprocessing(rank, sync_device):
r"""Initializes `torch_utils.training_stats` for collecting statistics
across multiple processes.
This function must be called after
`torch.distributed.init_process_group()` and before `Collector.update()`.
The call is not necessary if multi-process collection is not needed.
Args:
rank: Rank of the current process.
sync_device: PyTorch device to use for inter-process
communication, or None to disable multi-process
collection. Typically `torch.device('cuda', rank)`.
"""
global _rank, _sync_device
assert not _sync_called
_rank = rank
_sync_device = sync_device
#----------------------------------------------------------------------------
@misc.profiled_function
def report(name, value):
r"""Broadcasts the given set of scalars to all interested instances of
`Collector`, across device and process boundaries.
This function is expected to be extremely cheap and can be safely
called from anywhere in the training loop, loss function, or inside a
`torch.nn.Module`.
Warning: The current implementation expects the set of unique names to
be consistent across processes. Please make sure that `report()` is
called at least once for each unique name by each process, and in the
same order. If a given process has no scalars to broadcast, it can do
`report(name, [])` (empty list).
Args:
name: Arbitrary string specifying the name of the statistic.
Averages are accumulated separately for each unique name.
value: Arbitrary set of scalars. Can be a list, tuple,
NumPy array, PyTorch tensor, or Python scalar.
Returns:
The same `value` that was passed in.
"""
if name not in _counters:
_counters[name] = dict()
elems = torch.as_tensor(value)
if elems.numel() == 0:
return value
elems = elems.detach().flatten().to(_reduce_dtype)
moments = torch.stack([
torch.ones_like(elems).sum(),
elems.sum(),
elems.square().sum(),
])
assert moments.ndim == 1 and moments.shape[0] == _num_moments
moments = moments.to(_counter_dtype)
device = moments.device
if device not in _counters[name]:
_counters[name][device] = torch.zeros_like(moments)
_counters[name][device].add_(moments)
return value
#----------------------------------------------------------------------------
def report0(name, value):
r"""Broadcasts the given set of scalars by the first process (`rank = 0`),
but ignores any scalars provided by the other processes.
See `report()` for further details.
"""
report(name, value if _rank == 0 else [])
return value
#----------------------------------------------------------------------------
class Collector:
r"""Collects the scalars broadcasted by `report()` and `report0()` and
computes their long-term averages (mean and standard deviation) over
user-defined periods of time.
The averages are first collected into internal counters that are not
directly visible to the user. They are then copied to the user-visible
state as a result of calling `update()` and can then be queried using
`mean()`, `std()`, `as_dict()`, etc. Calling `update()` also resets the
internal counters for the next round, so that the user-visible state
effectively reflects averages collected between the last two calls to
`update()`.
Args:
regex: Regular expression defining which statistics to
collect. The default is to collect everything.
keep_previous: Whether to retain the previous averages if no
scalars were collected on a given round
(default: True).
"""
def __init__(self, regex='.*', keep_previous=True):
self._regex = re.compile(regex)
self._keep_previous = keep_previous
self._cumulative = dict()
self._moments = dict()
self.update()
self._moments.clear()
def names(self):
r"""Returns the names of all statistics broadcasted so far that
match the regular expression specified at construction time.
"""
return [name for name in _counters if self._regex.fullmatch(name)]
def update(self):
r"""Copies current values of the internal counters to the
user-visible state and resets them for the next round.
If `keep_previous=True` was specified at construction time, the
operation is skipped for statistics that have received no scalars
since the last update, retaining their previous averages.
This method performs a number of GPU-to-CPU transfers and one
`torch.distributed.all_reduce()`. It is intended to be called
periodically in the main training loop, typically once every
N training steps.
"""
if not self._keep_previous:
self._moments.clear()
for name, cumulative in _sync(self.names()):
if name not in self._cumulative:
self._cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
delta = cumulative - self._cumulative[name]
self._cumulative[name].copy_(cumulative)
if float(delta[0]) != 0:
self._moments[name] = delta
def _get_delta(self, name):
r"""Returns the raw moments that were accumulated for the given
statistic between the last two calls to `update()`, or zero if
no scalars were collected.
"""
assert self._regex.fullmatch(name)
if name not in self._moments:
self._moments[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
return self._moments[name]
def num(self, name):
r"""Returns the number of scalars that were accumulated for the given
statistic between the last two calls to `update()`, or zero if
no scalars were collected.
"""
delta = self._get_delta(name)
return int(delta[0])
def mean(self, name):
r"""Returns the mean of the scalars that were accumulated for the
given statistic between the last two calls to `update()`, or NaN if
no scalars were collected.
"""
delta = self._get_delta(name)
if int(delta[0]) == 0:
return float('nan')
return float(delta[1] / delta[0])
def std(self, name):
r"""Returns the standard deviation of the scalars that were
accumulated for the given statistic between the last two calls to
`update()`, or NaN if no scalars were collected.
"""
delta = self._get_delta(name)
if int(delta[0]) == 0 or not np.isfinite(float(delta[1])):
return float('nan')
if int(delta[0]) == 1:
return float(0)
mean = float(delta[1] / delta[0])
raw_var = float(delta[2] / delta[0])
return np.sqrt(max(raw_var - np.square(mean), 0))
def as_dict(self):
r"""Returns the averages accumulated between the last two calls to
`update()` as an `dnnlib.EasyDict`. The contents are as follows:
dnnlib.EasyDict(
NAME = dnnlib.EasyDict(num=FLOAT, mean=FLOAT, std=FLOAT),
...
)
"""
stats = dnnlib.EasyDict()
for name in self.names():
stats[name] = dnnlib.EasyDict(num=self.num(name), mean=self.mean(name), std=self.std(name))
return stats
def __getitem__(self, name):
r"""Convenience getter.
`collector[name]` is a synonym for `collector.mean(name)`.
"""
return self.mean(name)
#----------------------------------------------------------------------------
def _sync(names):
r"""Synchronize the global cumulative counters across devices and
processes. Called internally by `Collector.update()`.
"""
if len(names) == 0:
return []
global _sync_called
_sync_called = True
# Collect deltas within current rank.
deltas = []
device = _sync_device if _sync_device is not None else torch.device('cpu')
for name in names:
delta = torch.zeros([_num_moments], dtype=_counter_dtype, device=device)
for counter in _counters[name].values():
delta.add_(counter.to(device))
counter.copy_(torch.zeros_like(counter))
deltas.append(delta)
deltas = torch.stack(deltas)
# Sum deltas across ranks.
if _sync_device is not None:
torch.distributed.all_reduce(deltas)
# Update cumulative values.
deltas = deltas.cpu()
for idx, name in enumerate(names):
if name not in _cumulative:
_cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
_cumulative[name].add_(deltas[idx])
# Return name-value pairs.
return [(name, _cumulative[name]) for name in names]
#----------------------------------------------------------------------------
| 10,707 | 38.806691 | 118 | py |
ice-ice | ice-ice/torch_utils/persistence.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Facilities for pickling Python code alongside other data.
The pickled code is automatically imported into a separate Python module
during unpickling. This way, any previously exported pickles will remain
usable even if the original code is no longer available, or if the current
version of the code is not consistent with what was originally pickled."""
import sys
import pickle
import io
import inspect
import copy
import uuid
import types
import dnnlib
#----------------------------------------------------------------------------
_version = 6 # internal version number
_decorators = set() # {decorator_class, ...}
_import_hooks = [] # [hook_function, ...]
_module_to_src_dict = dict() # {module: src, ...}
_src_to_module_dict = dict() # {src: module, ...}
#----------------------------------------------------------------------------
def persistent_class(orig_class):
r"""Class decorator that extends a given class to save its source code
when pickled.
Example:
from torch_utils import persistence
@persistence.persistent_class
class MyNetwork(torch.nn.Module):
def __init__(self, num_inputs, num_outputs):
super().__init__()
self.fc = MyLayer(num_inputs, num_outputs)
...
@persistence.persistent_class
class MyLayer(torch.nn.Module):
...
When pickled, any instance of `MyNetwork` and `MyLayer` will save its
source code alongside other internal state (e.g., parameters, buffers,
and submodules). This way, any previously exported pickle will remain
usable even if the class definitions have been modified or are no
longer available.
The decorator saves the source code of the entire Python module
containing the decorated class. It does *not* save the source code of
any imported modules. Thus, the imported modules must be available
during unpickling, also including `torch_utils.persistence` itself.
It is ok to call functions defined in the same module from the
decorated class. However, if the decorated class depends on other
classes defined in the same module, they must be decorated as well.
This is illustrated in the above example in the case of `MyLayer`.
It is also possible to employ the decorator just-in-time before
calling the constructor. For example:
cls = MyLayer
if want_to_make_it_persistent:
cls = persistence.persistent_class(cls)
layer = cls(num_inputs, num_outputs)
As an additional feature, the decorator also keeps track of the
arguments that were used to construct each instance of the decorated
class. The arguments can be queried via `obj.init_args` and
`obj.init_kwargs`, and they are automatically pickled alongside other
object state. A typical use case is to first unpickle a previous
instance of a persistent class, and then upgrade it to use the latest
version of the source code:
with open('old_pickle.pkl', 'rb') as f:
old_net = pickle.load(f)
new_net = MyNetwork(*old_obj.init_args, **old_obj.init_kwargs)
misc.copy_params_and_buffers(old_net, new_net, require_all=True)
"""
assert isinstance(orig_class, type)
if is_persistent(orig_class):
return orig_class
assert orig_class.__module__ in sys.modules
orig_module = sys.modules[orig_class.__module__]
orig_module_src = _module_to_src(orig_module)
class Decorator(orig_class):
_orig_module_src = orig_module_src
_orig_class_name = orig_class.__name__
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._init_args = copy.deepcopy(args)
self._init_kwargs = copy.deepcopy(kwargs)
assert orig_class.__name__ in orig_module.__dict__
_check_pickleable(self.__reduce__())
@property
def init_args(self):
return copy.deepcopy(self._init_args)
@property
def init_kwargs(self):
return dnnlib.EasyDict(copy.deepcopy(self._init_kwargs))
def __reduce__(self):
fields = list(super().__reduce__())
fields += [None] * max(3 - len(fields), 0)
if fields[0] is not _reconstruct_persistent_obj:
meta = dict(type='class', version=_version, module_src=self._orig_module_src, class_name=self._orig_class_name, state=fields[2])
fields[0] = _reconstruct_persistent_obj # reconstruct func
fields[1] = (meta,) # reconstruct args
fields[2] = None # state dict
return tuple(fields)
Decorator.__name__ = orig_class.__name__
_decorators.add(Decorator)
return Decorator
#----------------------------------------------------------------------------
def is_persistent(obj):
r"""Test whether the given object or class is persistent, i.e.,
whether it will save its source code when pickled.
"""
try:
if obj in _decorators:
return True
except TypeError:
pass
return type(obj) in _decorators # pylint: disable=unidiomatic-typecheck
#----------------------------------------------------------------------------
def import_hook(hook):
r"""Register an import hook that is called whenever a persistent object
is being unpickled. A typical use case is to patch the pickled source
code to avoid errors and inconsistencies when the API of some imported
module has changed.
The hook should have the following signature:
hook(meta) -> modified meta
`meta` is an instance of `dnnlib.EasyDict` with the following fields:
type: Type of the persistent object, e.g. `'class'`.
version: Internal version number of `torch_utils.persistence`.
module_src Original source code of the Python module.
class_name: Class name in the original Python module.
state: Internal state of the object.
Example:
@persistence.import_hook
def wreck_my_network(meta):
if meta.class_name == 'MyNetwork':
print('MyNetwork is being imported. I will wreck it!')
meta.module_src = meta.module_src.replace("True", "False")
return meta
"""
assert callable(hook)
_import_hooks.append(hook)
#----------------------------------------------------------------------------
def _reconstruct_persistent_obj(meta):
r"""Hook that is called internally by the `pickle` module to unpickle
a persistent object.
"""
meta = dnnlib.EasyDict(meta)
meta.state = dnnlib.EasyDict(meta.state)
for hook in _import_hooks:
meta = hook(meta)
assert meta is not None
assert meta.version == _version
module = _src_to_module(meta.module_src)
assert meta.type == 'class'
orig_class = module.__dict__[meta.class_name]
decorator_class = persistent_class(orig_class)
obj = decorator_class.__new__(decorator_class)
setstate = getattr(obj, '__setstate__', None)
if callable(setstate):
setstate(meta.state) # pylint: disable=not-callable
else:
obj.__dict__.update(meta.state)
return obj
#----------------------------------------------------------------------------
def _module_to_src(module):
r"""Query the source code of a given Python module.
"""
src = _module_to_src_dict.get(module, None)
if src is None:
src = inspect.getsource(module)
_module_to_src_dict[module] = src
_src_to_module_dict[src] = module
return src
def _src_to_module(src):
r"""Get or create a Python module for the given source code.
"""
module = _src_to_module_dict.get(src, None)
if module is None:
module_name = "_imported_module_" + uuid.uuid4().hex
module = types.ModuleType(module_name)
sys.modules[module_name] = module
_module_to_src_dict[module] = src
_src_to_module_dict[src] = module
exec(src, module.__dict__) # pylint: disable=exec-used
return module
#----------------------------------------------------------------------------
def _check_pickleable(obj):
r"""Check that the given object is pickleable, raising an exception if
it is not. This function is expected to be considerably more efficient
than actually pickling the object.
"""
def recurse(obj):
if isinstance(obj, (list, tuple, set)):
return [recurse(x) for x in obj]
if isinstance(obj, dict):
return [[recurse(x), recurse(y)] for x, y in obj.items()]
if isinstance(obj, (str, int, float, bool, bytes, bytearray)):
return None # Python primitive types are pickleable.
if f'{type(obj).__module__}.{type(obj).__name__}' in ['numpy.ndarray', 'torch.Tensor']:
return None # NumPy arrays and PyTorch tensors are pickleable.
if is_persistent(obj):
return None # Persistent objects are pickleable, by virtue of the constructor check.
return obj
with io.BytesIO() as f:
pickle.dump(recurse(obj), f)
#----------------------------------------------------------------------------
| 9,708 | 37.527778 | 144 | py |
ice-ice | ice-ice/torch_utils/misc.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import re
import contextlib
import numpy as np
import torch
import warnings
import dnnlib
#----------------------------------------------------------------------------
# Cached construction of constant tensors. Avoids CPU=>GPU copy when the
# same constant is used multiple times.
_constant_cache = dict()
def constant(value, shape=None, dtype=None, device=None, memory_format=None):
value = np.asarray(value)
if shape is not None:
shape = tuple(shape)
if dtype is None:
dtype = torch.get_default_dtype()
if device is None:
device = torch.device('cpu')
if memory_format is None:
memory_format = torch.contiguous_format
key = (value.shape, value.dtype, value.tobytes(), shape, dtype, device, memory_format)
tensor = _constant_cache.get(key, None)
if tensor is None:
tensor = torch.as_tensor(value.copy(), dtype=dtype, device=device)
if shape is not None:
tensor, _ = torch.broadcast_tensors(tensor, torch.empty(shape))
tensor = tensor.contiguous(memory_format=memory_format)
_constant_cache[key] = tensor
return tensor
#----------------------------------------------------------------------------
# Replace NaN/Inf with specified numerical values.
try:
nan_to_num = torch.nan_to_num # 1.8.0a0
except AttributeError:
def nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None): # pylint: disable=redefined-builtin
assert isinstance(input, torch.Tensor)
if posinf is None:
posinf = torch.finfo(input.dtype).max
if neginf is None:
neginf = torch.finfo(input.dtype).min
assert nan == 0
return torch.clamp(input.unsqueeze(0).nansum(0), min=neginf, max=posinf, out=out)
#----------------------------------------------------------------------------
# Symbolic assert.
try:
symbolic_assert = torch._assert # 1.8.0a0 # pylint: disable=protected-access
except AttributeError:
symbolic_assert = torch.Assert # 1.7.0
#----------------------------------------------------------------------------
# Context manager to suppress known warnings in torch.jit.trace().
class suppress_tracer_warnings(warnings.catch_warnings):
def __enter__(self):
super().__enter__()
warnings.simplefilter('ignore', category=torch.jit.TracerWarning)
return self
#----------------------------------------------------------------------------
# Assert that the shape of a tensor matches the given list of integers.
# None indicates that the size of a dimension is allowed to vary.
# Performs symbolic assertion when used in torch.jit.trace().
def assert_shape(tensor, ref_shape):
if tensor.ndim != len(ref_shape):
raise AssertionError(f'Wrong number of dimensions: got {tensor.ndim}, expected {len(ref_shape)}')
for idx, (size, ref_size) in enumerate(zip(tensor.shape, ref_shape)):
if ref_size is None:
pass
elif isinstance(ref_size, torch.Tensor):
with suppress_tracer_warnings(): # as_tensor results are registered as constants
symbolic_assert(torch.equal(torch.as_tensor(size), ref_size), f'Wrong size for dimension {idx}')
elif isinstance(size, torch.Tensor):
with suppress_tracer_warnings(): # as_tensor results are registered as constants
symbolic_assert(torch.equal(size, torch.as_tensor(ref_size)), f'Wrong size for dimension {idx}: expected {ref_size}')
elif size != ref_size:
raise AssertionError(f'Wrong size for dimension {idx}: got {size}, expected {ref_size}')
#----------------------------------------------------------------------------
# Function decorator that calls torch.autograd.profiler.record_function().
def profiled_function(fn):
def decorator(*args, **kwargs):
with torch.autograd.profiler.record_function(fn.__name__):
return fn(*args, **kwargs)
decorator.__name__ = fn.__name__
return decorator
#----------------------------------------------------------------------------
# Sampler for torch.utils.data.DataLoader that loops over the dataset
# indefinitely, shuffling items as it goes.
class InfiniteSampler(torch.utils.data.Sampler):
def __init__(self, dataset, rank=0, num_replicas=1, shuffle=True, seed=0, window_size=0.5):
assert len(dataset) > 0
assert num_replicas > 0
assert 0 <= rank < num_replicas
assert 0 <= window_size <= 1
super().__init__(dataset)
self.dataset = dataset
self.rank = rank
self.num_replicas = num_replicas
self.shuffle = shuffle
self.seed = seed
self.window_size = window_size
def __iter__(self):
order = np.arange(len(self.dataset))
rnd = None
window = 0
if self.shuffle:
rnd = np.random.RandomState(self.seed)
rnd.shuffle(order)
window = int(np.rint(order.size * self.window_size))
idx = 0
while True:
i = idx % order.size
if idx % self.num_replicas == self.rank:
yield order[i]
if window >= 2:
j = (i - rnd.randint(window)) % order.size
order[i], order[j] = order[j], order[i]
idx += 1
#----------------------------------------------------------------------------
# Utilities for operating with torch.nn.Module parameters and buffers.
def params_and_buffers(module):
assert isinstance(module, torch.nn.Module)
return list(module.parameters()) + list(module.buffers())
def named_params_and_buffers(module):
assert isinstance(module, torch.nn.Module)
return list(module.named_parameters()) + list(module.named_buffers())
def copy_params_and_buffers(src_module, dst_module, require_all=False):
assert isinstance(src_module, torch.nn.Module)
assert isinstance(dst_module, torch.nn.Module)
src_tensors = {name: tensor for name, tensor in named_params_and_buffers(src_module)}
for name, tensor in named_params_and_buffers(dst_module):
assert (name in src_tensors) or (not require_all)
if name in src_tensors:
tensor.copy_(src_tensors[name].detach()).requires_grad_(tensor.requires_grad)
#----------------------------------------------------------------------------
# Context manager for easily enabling/disabling DistributedDataParallel
# synchronization.
@contextlib.contextmanager
def ddp_sync(module, sync):
assert isinstance(module, torch.nn.Module)
if sync or not isinstance(module, torch.nn.parallel.DistributedDataParallel):
yield
else:
with module.no_sync():
yield
#----------------------------------------------------------------------------
# Check DistributedDataParallel consistency across processes.
def check_ddp_consistency(module, ignore_regex=None):
assert isinstance(module, torch.nn.Module)
for name, tensor in named_params_and_buffers(module):
fullname = type(module).__name__ + '.' + name
if ignore_regex is not None and re.fullmatch(ignore_regex, fullname):
continue
tensor = tensor.detach()
other = tensor.clone()
torch.distributed.broadcast(tensor=other, src=0)
assert (nan_to_num(tensor) == nan_to_num(other)).all(), fullname
#----------------------------------------------------------------------------
# Print summary table of module hierarchy.
def print_module_summary(module, inputs, max_nesting=3, skip_redundant=True):
assert isinstance(module, torch.nn.Module)
assert not isinstance(module, torch.jit.ScriptModule)
assert isinstance(inputs, (tuple, list))
# Register hooks.
entries = []
nesting = [0]
def pre_hook(_mod, _inputs):
nesting[0] += 1
def post_hook(mod, _inputs, outputs):
nesting[0] -= 1
if nesting[0] <= max_nesting:
outputs = list(outputs) if isinstance(outputs, (tuple, list)) else [outputs]
outputs = [t for t in outputs if isinstance(t, torch.Tensor)]
entries.append(dnnlib.EasyDict(mod=mod, outputs=outputs))
hooks = [mod.register_forward_pre_hook(pre_hook) for mod in module.modules()]
hooks += [mod.register_forward_hook(post_hook) for mod in module.modules()]
# Run module.
outputs = module(*inputs)
for hook in hooks:
hook.remove()
# Identify unique outputs, parameters, and buffers.
tensors_seen = set()
for e in entries:
e.unique_params = [t for t in e.mod.parameters() if id(t) not in tensors_seen]
e.unique_buffers = [t for t in e.mod.buffers() if id(t) not in tensors_seen]
e.unique_outputs = [t for t in e.outputs if id(t) not in tensors_seen]
tensors_seen |= {id(t) for t in e.unique_params + e.unique_buffers + e.unique_outputs}
# Filter out redundant entries.
if skip_redundant:
entries = [e for e in entries if len(e.unique_params) or len(e.unique_buffers) or len(e.unique_outputs)]
# Construct table.
rows = [[type(module).__name__, 'Parameters', 'Buffers', 'Output shape', 'Datatype']]
rows += [['---'] * len(rows[0])]
param_total = 0
buffer_total = 0
submodule_names = {mod: name for name, mod in module.named_modules()}
for e in entries:
name = '<top-level>' if e.mod is module else submodule_names[e.mod]
param_size = sum(t.numel() for t in e.unique_params)
buffer_size = sum(t.numel() for t in e.unique_buffers)
output_shapes = [str(list(e.outputs[0].shape)) for t in e.outputs]
output_dtypes = [str(t.dtype).split('.')[-1] for t in e.outputs]
rows += [[
name + (':0' if len(e.outputs) >= 2 else ''),
str(param_size) if param_size else '-',
str(buffer_size) if buffer_size else '-',
(output_shapes + ['-'])[0],
(output_dtypes + ['-'])[0],
]]
for idx in range(1, len(e.outputs)):
rows += [[name + f':{idx}', '-', '-', output_shapes[idx], output_dtypes[idx]]]
param_total += param_size
buffer_total += buffer_size
rows += [['---'] * len(rows[0])]
rows += [['Total', str(param_total), str(buffer_total), '-', '-']]
# Print table.
widths = [max(len(cell) for cell in column) for column in zip(*rows)]
print()
for row in rows:
print(' '.join(cell + ' ' * (width - len(cell)) for cell, width in zip(row, widths)))
print()
return outputs
#----------------------------------------------------------------------------
| 10,992 | 40.798479 | 133 | py |
ice-ice | ice-ice/torch_utils/ops/bias_act.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Custom PyTorch ops for efficient bias and activation."""
import os
import warnings
import numpy as np
import torch
import dnnlib
import traceback
from .. import custom_ops
from .. import misc
#----------------------------------------------------------------------------
activation_funcs = {
'linear': dnnlib.EasyDict(func=lambda x, **_: x, def_alpha=0, def_gain=1, cuda_idx=1, ref='', has_2nd_grad=False),
'relu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.relu(x), def_alpha=0, def_gain=np.sqrt(2), cuda_idx=2, ref='y', has_2nd_grad=False),
'lrelu': dnnlib.EasyDict(func=lambda x, alpha, **_: torch.nn.functional.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', has_2nd_grad=False),
'tanh': dnnlib.EasyDict(func=lambda x, **_: torch.tanh(x), def_alpha=0, def_gain=1, cuda_idx=4, ref='y', has_2nd_grad=True),
'sigmoid': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x), def_alpha=0, def_gain=1, cuda_idx=5, ref='y', has_2nd_grad=True),
'elu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.elu(x), def_alpha=0, def_gain=1, cuda_idx=6, ref='y', has_2nd_grad=True),
'selu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.selu(x), def_alpha=0, def_gain=1, cuda_idx=7, ref='y', has_2nd_grad=True),
'softplus': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.softplus(x), def_alpha=0, def_gain=1, cuda_idx=8, ref='y', has_2nd_grad=True),
'swish': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x) * x, def_alpha=0, def_gain=np.sqrt(2), cuda_idx=9, ref='x', has_2nd_grad=True),
}
#----------------------------------------------------------------------------
_inited = False
_plugin = None
_null_tensor = torch.empty([0])
def _init():
global _inited, _plugin
if not _inited:
_inited = True
sources = ['bias_act.cpp', 'bias_act.cu']
sources = [os.path.join(os.path.dirname(__file__), s) for s in sources]
try:
_plugin = custom_ops.get_plugin('bias_act_plugin', sources=sources, extra_cuda_cflags=['--use_fast_math'])
except:
warnings.warn('Failed to build CUDA kernels for bias_act. Falling back to slow reference implementation. Details:\n\n' + traceback.format_exc())
return _plugin is not None
#----------------------------------------------------------------------------
def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None, impl='cuda'):
r"""Fused bias and activation function.
Adds bias `b` to activation tensor `x`, evaluates activation function `act`,
and scales the result by `gain`. Each of the steps is optional. In most cases,
the fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports first and second order gradients,
but not third order gradients.
Args:
x: Input activation tensor. Can be of any shape.
b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
as `x`. The shape must be known, and it must match the dimension of `x`
corresponding to `dim`.
dim: The dimension in `x` corresponding to the elements of `b`.
The value of `dim` is ignored if `b` is not specified.
act: Name of the activation function to evaluate, or `"linear"` to disable.
Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc.
See `activation_funcs` for a full list. `None` is not allowed.
alpha: Shape parameter for the activation function, or `None` to use the default.
gain: Scaling factor for the output tensor, or `None` to use default.
See `activation_funcs` for the default scaling of each activation function.
If unsure, consider specifying 1.
clamp: Clamp the output values to `[-clamp, +clamp]`, or `None` to disable
the clamping (default).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the same shape and datatype as `x`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ['ref', 'cuda']
if impl == 'cuda' and x.device.type == 'cuda' and _init():
return _bias_act_cuda(dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp).apply(x, b)
return _bias_act_ref(x=x, b=b, dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp)
#----------------------------------------------------------------------------
@misc.profiled_function
def _bias_act_ref(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
"""Slow reference implementation of `bias_act()` using standard TensorFlow ops.
"""
assert isinstance(x, torch.Tensor)
assert clamp is None or clamp >= 0
spec = activation_funcs[act]
alpha = float(alpha if alpha is not None else spec.def_alpha)
gain = float(gain if gain is not None else spec.def_gain)
clamp = float(clamp if clamp is not None else -1)
# Add bias.
if b is not None:
assert isinstance(b, torch.Tensor) and b.ndim == 1
assert 0 <= dim < x.ndim
assert b.shape[0] == x.shape[dim]
x = x + b.reshape([-1 if i == dim else 1 for i in range(x.ndim)])
# Evaluate activation function.
alpha = float(alpha)
x = spec.func(x, alpha=alpha)
# Scale by gain.
gain = float(gain)
if gain != 1:
x = x * gain
# Clamp.
if clamp >= 0:
x = x.clamp(-clamp, clamp) # pylint: disable=invalid-unary-operand-type
return x
#----------------------------------------------------------------------------
_bias_act_cuda_cache = dict()
def _bias_act_cuda(dim=1, act='linear', alpha=None, gain=None, clamp=None):
"""Fast CUDA implementation of `bias_act()` using custom ops.
"""
# Parse arguments.
assert clamp is None or clamp >= 0
spec = activation_funcs[act]
alpha = float(alpha if alpha is not None else spec.def_alpha)
gain = float(gain if gain is not None else spec.def_gain)
clamp = float(clamp if clamp is not None else -1)
# Lookup from cache.
key = (dim, act, alpha, gain, clamp)
if key in _bias_act_cuda_cache:
return _bias_act_cuda_cache[key]
# Forward op.
class BiasActCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, b): # pylint: disable=arguments-differ
ctx.memory_format = torch.channels_last if x.ndim > 2 and x.stride()[1] == 1 else torch.contiguous_format
x = x.contiguous(memory_format=ctx.memory_format)
b = b.contiguous() if b is not None else _null_tensor
y = x
if act != 'linear' or gain != 1 or clamp >= 0 or b is not _null_tensor:
y = _plugin.bias_act(x, b, _null_tensor, _null_tensor, _null_tensor, 0, dim, spec.cuda_idx, alpha, gain, clamp)
ctx.save_for_backward(
x if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
b if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
y if 'y' in spec.ref else _null_tensor)
return y
@staticmethod
def backward(ctx, dy): # pylint: disable=arguments-differ
dy = dy.contiguous(memory_format=ctx.memory_format)
x, b, y = ctx.saved_tensors
dx = None
db = None
if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
dx = dy
if act != 'linear' or gain != 1 or clamp >= 0:
dx = BiasActCudaGrad.apply(dy, x, b, y)
if ctx.needs_input_grad[1]:
db = dx.sum([i for i in range(dx.ndim) if i != dim])
return dx, db
# Backward op.
class BiasActCudaGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, dy, x, b, y): # pylint: disable=arguments-differ
ctx.memory_format = torch.channels_last if dy.ndim > 2 and dy.stride()[1] == 1 else torch.contiguous_format
dx = _plugin.bias_act(dy, b, x, y, _null_tensor, 1, dim, spec.cuda_idx, alpha, gain, clamp)
ctx.save_for_backward(
dy if spec.has_2nd_grad else _null_tensor,
x, b, y)
return dx
@staticmethod
def backward(ctx, d_dx): # pylint: disable=arguments-differ
d_dx = d_dx.contiguous(memory_format=ctx.memory_format)
dy, x, b, y = ctx.saved_tensors
d_dy = None
d_x = None
d_b = None
d_y = None
if ctx.needs_input_grad[0]:
d_dy = BiasActCudaGrad.apply(d_dx, x, b, y)
if spec.has_2nd_grad and (ctx.needs_input_grad[1] or ctx.needs_input_grad[2]):
d_x = _plugin.bias_act(d_dx, b, x, y, dy, 2, dim, spec.cuda_idx, alpha, gain, clamp)
if spec.has_2nd_grad and ctx.needs_input_grad[2]:
d_b = d_x.sum([i for i in range(d_x.ndim) if i != dim])
return d_dy, d_x, d_b, d_y
# Add to cache.
_bias_act_cuda_cache[key] = BiasActCuda
return BiasActCuda
#----------------------------------------------------------------------------
| 10,047 | 46.173709 | 185 | py |
ice-ice | ice-ice/torch_utils/ops/grid_sample_gradfix.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Custom replacement for `torch.nn.functional.grid_sample` that
supports arbitrarily high order gradients between the input and output.
Only works on 2D images and assumes
`mode='bilinear'`, `padding_mode='zeros'`, `align_corners=False`."""
import warnings
import torch
# pylint: disable=redefined-builtin
# pylint: disable=arguments-differ
# pylint: disable=protected-access
#----------------------------------------------------------------------------
enabled = False # Enable the custom op by setting this to true.
#----------------------------------------------------------------------------
def grid_sample(input, grid):
if _should_use_custom_op():
return _GridSample2dForward.apply(input, grid)
return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
#----------------------------------------------------------------------------
def _should_use_custom_op():
if not enabled:
return False
if any(torch.__version__.startswith(x) for x in ['1.7.', '1.8.', '1.9']):
return True
warnings.warn(f'grid_sample_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.grid_sample().')
return False
#----------------------------------------------------------------------------
class _GridSample2dForward(torch.autograd.Function):
@staticmethod
def forward(ctx, input, grid):
assert input.ndim == 4
assert grid.ndim == 4
output = torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
ctx.save_for_backward(input, grid)
return output
@staticmethod
def backward(ctx, grad_output):
input, grid = ctx.saved_tensors
grad_input, grad_grid = _GridSample2dBackward.apply(grad_output, input, grid)
return grad_input, grad_grid
#----------------------------------------------------------------------------
class _GridSample2dBackward(torch.autograd.Function):
@staticmethod
def forward(ctx, grad_output, input, grid):
op = torch._C._jit_get_operation('aten::grid_sampler_2d_backward')
grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False)
ctx.save_for_backward(grid)
return grad_input, grad_grid
@staticmethod
def backward(ctx, grad2_grad_input, grad2_grad_grid):
_ = grad2_grad_grid # unused
grid, = ctx.saved_tensors
grad2_grad_output = None
grad2_input = None
grad2_grid = None
if ctx.needs_input_grad[0]:
grad2_grad_output = _GridSample2dForward.apply(grad2_grad_input, grid)
assert not ctx.needs_input_grad[2]
return grad2_grad_output, grad2_input, grad2_grid
#----------------------------------------------------------------------------
| 3,299 | 38.285714 | 138 | py |
ice-ice | ice-ice/torch_utils/ops/conv2d_gradfix.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Custom replacement for `torch.nn.functional.conv2d` that supports
arbitrarily high order gradients with zero performance penalty."""
import warnings
import contextlib
import torch
# pylint: disable=redefined-builtin
# pylint: disable=arguments-differ
# pylint: disable=protected-access
#----------------------------------------------------------------------------
enabled = False # Enable the custom op by setting this to true.
weight_gradients_disabled = False # Forcefully disable computation of gradients with respect to the weights.
@contextlib.contextmanager
def no_weight_gradients():
global weight_gradients_disabled
old = weight_gradients_disabled
weight_gradients_disabled = True
yield
weight_gradients_disabled = old
#----------------------------------------------------------------------------
def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
if _should_use_custom_op(input):
return _conv2d_gradfix(transpose=False, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=0, dilation=dilation, groups=groups).apply(input, weight, bias)
return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):
if _should_use_custom_op(input):
return _conv2d_gradfix(transpose=True, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation).apply(input, weight, bias)
return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation)
#----------------------------------------------------------------------------
def _should_use_custom_op(input):
assert isinstance(input, torch.Tensor)
if (not enabled) or (not torch.backends.cudnn.enabled):
return False
if input.device.type != 'cuda':
return False
if any(torch.__version__.startswith(x) for x in ['1.7.', '1.8.', '1.9']):
return True
warnings.warn(f'conv2d_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.conv2d().')
return False
def _tuple_of_ints(xs, ndim):
xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim
assert len(xs) == ndim
assert all(isinstance(x, int) for x in xs)
return xs
#----------------------------------------------------------------------------
_conv2d_gradfix_cache = dict()
def _conv2d_gradfix(transpose, weight_shape, stride, padding, output_padding, dilation, groups):
# Parse arguments.
ndim = 2
weight_shape = tuple(weight_shape)
stride = _tuple_of_ints(stride, ndim)
padding = _tuple_of_ints(padding, ndim)
output_padding = _tuple_of_ints(output_padding, ndim)
dilation = _tuple_of_ints(dilation, ndim)
# Lookup from cache.
key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups)
if key in _conv2d_gradfix_cache:
return _conv2d_gradfix_cache[key]
# Validate arguments.
assert groups >= 1
assert len(weight_shape) == ndim + 2
assert all(stride[i] >= 1 for i in range(ndim))
assert all(padding[i] >= 0 for i in range(ndim))
assert all(dilation[i] >= 0 for i in range(ndim))
if not transpose:
assert all(output_padding[i] == 0 for i in range(ndim))
else: # transpose
assert all(0 <= output_padding[i] < max(stride[i], dilation[i]) for i in range(ndim))
# Helpers.
common_kwargs = dict(stride=stride, padding=padding, dilation=dilation, groups=groups)
def calc_output_padding(input_shape, output_shape):
if transpose:
return [0, 0]
return [
input_shape[i + 2]
- (output_shape[i + 2] - 1) * stride[i]
- (1 - 2 * padding[i])
- dilation[i] * (weight_shape[i + 2] - 1)
for i in range(ndim)
]
# Forward & backward.
class Conv2d(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight, bias):
assert weight.shape == weight_shape
if not transpose:
output = torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, **common_kwargs)
else: # transpose
output = torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, output_padding=output_padding, **common_kwargs)
ctx.save_for_backward(input, weight)
return output
@staticmethod
def backward(ctx, grad_output):
input, weight = ctx.saved_tensors
grad_input = None
grad_weight = None
grad_bias = None
if ctx.needs_input_grad[0]:
p = calc_output_padding(input_shape=input.shape, output_shape=grad_output.shape)
grad_input = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, weight, None)
assert grad_input.shape == input.shape
if ctx.needs_input_grad[1] and not weight_gradients_disabled:
grad_weight = Conv2dGradWeight.apply(grad_output, input)
assert grad_weight.shape == weight_shape
if ctx.needs_input_grad[2]:
grad_bias = grad_output.sum([0, 2, 3])
return grad_input, grad_weight, grad_bias
# Gradient with respect to the weights.
class Conv2dGradWeight(torch.autograd.Function):
@staticmethod
def forward(ctx, grad_output, input):
op = torch._C._jit_get_operation('aten::cudnn_convolution_backward_weight' if not transpose else 'aten::cudnn_convolution_transpose_backward_weight')
flags = [torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic, torch.backends.cudnn.allow_tf32]
grad_weight = op(weight_shape, grad_output, input, padding, stride, dilation, groups, *flags)
assert grad_weight.shape == weight_shape
ctx.save_for_backward(grad_output, input)
return grad_weight
@staticmethod
def backward(ctx, grad2_grad_weight):
grad_output, input = ctx.saved_tensors
grad2_grad_output = None
grad2_input = None
if ctx.needs_input_grad[0]:
grad2_grad_output = Conv2d.apply(input, grad2_grad_weight, None)
assert grad2_grad_output.shape == grad_output.shape
if ctx.needs_input_grad[1]:
p = calc_output_padding(input_shape=input.shape, output_shape=grad_output.shape)
grad2_input = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, grad2_grad_weight, None)
assert grad2_input.shape == input.shape
return grad2_grad_output, grad2_input
_conv2d_gradfix_cache[key] = Conv2d
return Conv2d
#----------------------------------------------------------------------------
| 7,677 | 43.900585 | 197 | py |
ice-ice | ice-ice/torch_utils/ops/upfirdn2d.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Custom PyTorch ops for efficient resampling of 2D images."""
import os
import warnings
import numpy as np
import torch
import traceback
from .. import custom_ops
from .. import misc
from . import conv2d_gradfix
#----------------------------------------------------------------------------
_inited = False
_plugin = None
def _init():
global _inited, _plugin
if not _inited:
sources = ['upfirdn2d.cpp', 'upfirdn2d.cu']
sources = [os.path.join(os.path.dirname(__file__), s) for s in sources]
try:
_plugin = custom_ops.get_plugin('upfirdn2d_plugin', sources=sources, extra_cuda_cflags=['--use_fast_math'])
except:
warnings.warn('Failed to build CUDA kernels for upfirdn2d. Falling back to slow reference implementation. Details:\n\n' + traceback.format_exc())
return _plugin is not None
def _parse_scaling(scaling):
if isinstance(scaling, int):
scaling = [scaling, scaling]
assert isinstance(scaling, (list, tuple))
assert all(isinstance(x, int) for x in scaling)
sx, sy = scaling
assert sx >= 1 and sy >= 1
return sx, sy
def _parse_padding(padding):
if isinstance(padding, int):
padding = [padding, padding]
assert isinstance(padding, (list, tuple))
assert all(isinstance(x, int) for x in padding)
if len(padding) == 2:
padx, pady = padding
padding = [padx, padx, pady, pady]
padx0, padx1, pady0, pady1 = padding
return padx0, padx1, pady0, pady1
def _get_filter_size(f):
if f is None:
return 1, 1
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
fw = f.shape[-1]
fh = f.shape[0]
with misc.suppress_tracer_warnings():
fw = int(fw)
fh = int(fh)
misc.assert_shape(f, [fh, fw][:f.ndim])
assert fw >= 1 and fh >= 1
return fw, fh
#----------------------------------------------------------------------------
def setup_filter(f, device=torch.device('cpu'), normalize=True, flip_filter=False, gain=1, separable=None):
r"""Convenience function to setup 2D FIR filter for `upfirdn2d()`.
Args:
f: Torch tensor, numpy array, or python list of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable),
`[]` (impulse), or
`None` (identity).
device: Result device (default: cpu).
normalize: Normalize the filter so that it retains the magnitude
for constant input signal (DC)? (default: True).
flip_filter: Flip the filter? (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
separable: Return a separable filter? (default: select automatically).
Returns:
Float32 tensor of the shape
`[filter_height, filter_width]` (non-separable) or
`[filter_taps]` (separable).
"""
# Validate.
if f is None:
f = 1
f = torch.as_tensor(f, dtype=torch.float32)
assert f.ndim in [0, 1, 2]
assert f.numel() > 0
if f.ndim == 0:
f = f[np.newaxis]
# Separable?
if separable is None:
separable = (f.ndim == 1 and f.numel() >= 8)
if f.ndim == 1 and not separable:
f = f.ger(f)
assert f.ndim == (1 if separable else 2)
# Apply normalize, flip, gain, and device.
if normalize:
f /= f.sum()
if flip_filter:
f = f.flip(list(range(f.ndim)))
f = f * (gain ** (f.ndim / 2))
f = f.to(device=device)
return f
#----------------------------------------------------------------------------
def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Pad, upsample, filter, and downsample a batch of 2D images.
Performs the following sequence of operations for each channel:
1. Upsample the image by inserting N-1 zeros after each pixel (`up`).
2. Pad the image with the specified number of zeros on each side (`padding`).
Negative padding corresponds to cropping the image.
3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it
so that the footprint of all output pixels lies within the input image.
4. Downsample the image by keeping every Nth pixel (`down`).
This sequence of operations bears close resemblance to scipy.signal.upfirdn().
The fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports gradients of arbitrary order.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
up: Integer upsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
down: Integer downsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ['ref', 'cuda']
if impl == 'cuda' and x.device.type == 'cuda' and _init():
return _upfirdn2d_cuda(up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain).apply(x, f)
return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain)
#----------------------------------------------------------------------------
@misc.profiled_function
def _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1):
"""Slow reference implementation of `upfirdn2d()` using standard PyTorch ops.
"""
# Validate arguments.
assert isinstance(x, torch.Tensor) and x.ndim == 4
if f is None:
f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
assert f.dtype == torch.float32 and not f.requires_grad
batch_size, num_channels, in_height, in_width = x.shape
upx, upy = _parse_scaling(up)
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
# Upsample by inserting zeros.
x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1])
x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1])
x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx])
# Pad or crop.
x = torch.nn.functional.pad(x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)])
x = x[:, :, max(-pady0, 0) : x.shape[2] - max(-pady1, 0), max(-padx0, 0) : x.shape[3] - max(-padx1, 0)]
# Setup filter.
f = f * (gain ** (f.ndim / 2))
f = f.to(x.dtype)
if not flip_filter:
f = f.flip(list(range(f.ndim)))
# Convolve with the filter.
f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim)
if f.ndim == 4:
x = conv2d_gradfix.conv2d(input=x, weight=f, groups=num_channels)
else:
x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(2), groups=num_channels)
x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(3), groups=num_channels)
# Downsample by throwing away pixels.
x = x[:, :, ::downy, ::downx]
return x
#----------------------------------------------------------------------------
_upfirdn2d_cuda_cache = dict()
def _upfirdn2d_cuda(up=1, down=1, padding=0, flip_filter=False, gain=1):
"""Fast CUDA implementation of `upfirdn2d()` using custom ops.
"""
# Parse arguments.
upx, upy = _parse_scaling(up)
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
# Lookup from cache.
key = (upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain)
if key in _upfirdn2d_cuda_cache:
return _upfirdn2d_cuda_cache[key]
# Forward op.
class Upfirdn2dCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, f): # pylint: disable=arguments-differ
assert isinstance(x, torch.Tensor) and x.ndim == 4
if f is None:
f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
y = x
if f.ndim == 2:
y = _plugin.upfirdn2d(y, f, upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain)
else:
y = _plugin.upfirdn2d(y, f.unsqueeze(0), upx, 1, downx, 1, padx0, padx1, 0, 0, flip_filter, np.sqrt(gain))
y = _plugin.upfirdn2d(y, f.unsqueeze(1), 1, upy, 1, downy, 0, 0, pady0, pady1, flip_filter, np.sqrt(gain))
ctx.save_for_backward(f)
ctx.x_shape = x.shape
return y
@staticmethod
def backward(ctx, dy): # pylint: disable=arguments-differ
f, = ctx.saved_tensors
_, _, ih, iw = ctx.x_shape
_, _, oh, ow = dy.shape
fw, fh = _get_filter_size(f)
p = [
fw - padx0 - 1,
iw * upx - ow * downx + padx0 - upx + 1,
fh - pady0 - 1,
ih * upy - oh * downy + pady0 - upy + 1,
]
dx = None
df = None
if ctx.needs_input_grad[0]:
dx = _upfirdn2d_cuda(up=down, down=up, padding=p, flip_filter=(not flip_filter), gain=gain).apply(dy, f)
assert not ctx.needs_input_grad[1]
return dx, df
# Add to cache.
_upfirdn2d_cuda_cache[key] = Upfirdn2dCuda
return Upfirdn2dCuda
#----------------------------------------------------------------------------
def filter2d(x, f, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Filter a batch of 2D images using the given 2D FIR filter.
By default, the result is padded so that its shape matches the input.
User-specified padding is applied on top of that, with negative values
indicating cropping. Pixels outside the image are assumed to be zero.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
padding: Padding with respect to the output. Can be a single number or a
list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
padx0, padx1, pady0, pady1 = _parse_padding(padding)
fw, fh = _get_filter_size(f)
p = [
padx0 + fw // 2,
padx1 + (fw - 1) // 2,
pady0 + fh // 2,
pady1 + (fh - 1) // 2,
]
return upfirdn2d(x, f, padding=p, flip_filter=flip_filter, gain=gain, impl=impl)
#----------------------------------------------------------------------------
def upsample2d(x, f, up=2, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Upsample a batch of 2D images using the given 2D FIR filter.
By default, the result is padded so that its shape is a multiple of the input.
User-specified padding is applied on top of that, with negative values
indicating cropping. Pixels outside the image are assumed to be zero.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
up: Integer upsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the output. Can be a single number or a
list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
upx, upy = _parse_scaling(up)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
fw, fh = _get_filter_size(f)
p = [
padx0 + (fw + upx - 1) // 2,
padx1 + (fw - upx) // 2,
pady0 + (fh + upy - 1) // 2,
pady1 + (fh - upy) // 2,
]
return upfirdn2d(x, f, up=up, padding=p, flip_filter=flip_filter, gain=gain*upx*upy, impl=impl)
#----------------------------------------------------------------------------
def downsample2d(x, f, down=2, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Downsample a batch of 2D images using the given 2D FIR filter.
By default, the result is padded so that its shape is a fraction of the input.
User-specified padding is applied on top of that, with negative values
indicating cropping. Pixels outside the image are assumed to be zero.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
down: Integer downsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the input. Can be a single number or a
list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
fw, fh = _get_filter_size(f)
p = [
padx0 + (fw - downx + 1) // 2,
padx1 + (fw - downx) // 2,
pady0 + (fh - downy + 1) // 2,
pady1 + (fh - downy) // 2,
]
return upfirdn2d(x, f, down=down, padding=p, flip_filter=flip_filter, gain=gain, impl=impl)
#----------------------------------------------------------------------------
| 16,287 | 41.306494 | 157 | py |
ice-ice | ice-ice/torch_utils/ops/conv2d_resample.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""2D convolution with optional up/downsampling."""
import torch
from .. import misc
from . import conv2d_gradfix
from . import upfirdn2d
from .upfirdn2d import _parse_padding
from .upfirdn2d import _get_filter_size
#----------------------------------------------------------------------------
def _get_weight_shape(w):
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
shape = [int(sz) for sz in w.shape]
misc.assert_shape(w, shape)
return shape
#----------------------------------------------------------------------------
def _conv2d_wrapper(x, w, stride=1, padding=0, groups=1, transpose=False, flip_weight=True):
"""Wrapper for the underlying `conv2d()` and `conv_transpose2d()` implementations.
"""
out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)
# Flip weight if requested.
if not flip_weight: # conv2d() actually performs correlation (flip_weight=True) not convolution (flip_weight=False).
w = w.flip([2, 3])
# Workaround performance pitfall in cuDNN 8.0.5, triggered when using
# 1x1 kernel + memory_format=channels_last + less than 64 channels.
if kw == 1 and kh == 1 and stride == 1 and padding in [0, [0, 0], (0, 0)] and not transpose:
if x.stride()[1] == 1 and min(out_channels, in_channels_per_group) < 64:
if out_channels <= 4 and groups == 1:
in_shape = x.shape
x = w.squeeze(3).squeeze(2) @ x.reshape([in_shape[0], in_channels_per_group, -1])
x = x.reshape([in_shape[0], out_channels, in_shape[2], in_shape[3]])
else:
x = x.to(memory_format=torch.contiguous_format)
w = w.to(memory_format=torch.contiguous_format)
x = conv2d_gradfix.conv2d(x, w, groups=groups)
return x.to(memory_format=torch.channels_last)
# Otherwise => execute using conv2d_gradfix.
op = conv2d_gradfix.conv_transpose2d if transpose else conv2d_gradfix.conv2d
return op(x, w, stride=stride, padding=padding, groups=groups)
#----------------------------------------------------------------------------
@misc.profiled_function
def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight=True, flip_filter=False):
r"""2D convolution with optional up/downsampling.
Padding is performed only once at the beginning, not between the operations.
Args:
x: Input tensor of shape
`[batch_size, in_channels, in_height, in_width]`.
w: Weight tensor of shape
`[out_channels, in_channels//groups, kernel_height, kernel_width]`.
f: Low-pass filter for up/downsampling. Must be prepared beforehand by
calling upfirdn2d.setup_filter(). None = identity (default).
up: Integer upsampling factor (default: 1).
down: Integer downsampling factor (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
groups: Split input channels into N groups (default: 1).
flip_weight: False = convolution, True = correlation (default: True).
flip_filter: False = convolution, True = correlation (default: False).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
# Validate arguments.
assert isinstance(x, torch.Tensor) and (x.ndim == 4)
assert isinstance(w, torch.Tensor) and (w.ndim == 4) and (w.dtype == x.dtype)
assert f is None or (isinstance(f, torch.Tensor) and f.ndim in [1, 2] and f.dtype == torch.float32)
assert isinstance(up, int) and (up >= 1)
assert isinstance(down, int) and (down >= 1)
assert isinstance(groups, int) and (groups >= 1)
out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)
fw, fh = _get_filter_size(f)
px0, px1, py0, py1 = _parse_padding(padding)
# Adjust padding to account for up/downsampling.
if up > 1:
px0 += (fw + up - 1) // 2
px1 += (fw - up) // 2
py0 += (fh + up - 1) // 2
py1 += (fh - up) // 2
if down > 1:
px0 += (fw - down + 1) // 2
px1 += (fw - down) // 2
py0 += (fh - down + 1) // 2
py1 += (fh - down) // 2
# Fast path: 1x1 convolution with downsampling only => downsample first, then convolve.
if kw == 1 and kh == 1 and (down > 1 and up == 1):
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
return x
# Fast path: 1x1 convolution with upsampling only => convolve first, then upsample.
if kw == 1 and kh == 1 and (up > 1 and down == 1):
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
return x
# Fast path: downsampling only => use strided convolution.
if down > 1 and up == 1:
x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
x = _conv2d_wrapper(x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight)
return x
# Fast path: upsampling with optional downsampling => use transpose strided convolution.
if up > 1:
if groups == 1:
w = w.transpose(0, 1)
else:
w = w.reshape(groups, out_channels // groups, in_channels_per_group, kh, kw)
w = w.transpose(1, 2)
w = w.reshape(groups * in_channels_per_group, out_channels // groups, kh, kw)
px0 -= kw - 1
px1 -= kw - up
py0 -= kh - 1
py1 -= kh - up
pxt = max(min(-px0, -px1), 0)
pyt = max(min(-py0, -py1), 0)
x = _conv2d_wrapper(x=x, w=w, stride=up, padding=[pyt,pxt], groups=groups, transpose=True, flip_weight=(not flip_weight))
x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0+pxt,px1+pxt,py0+pyt,py1+pyt], gain=up**2, flip_filter=flip_filter)
if down > 1:
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
return x
# Fast path: no up/downsampling, padding supported by the underlying implementation => use plain conv2d.
if up == 1 and down == 1:
if px0 == px1 and py0 == py1 and px0 >= 0 and py0 >= 0:
return _conv2d_wrapper(x=x, w=w, padding=[py0,px0], groups=groups, flip_weight=flip_weight)
# Fallback: Generic reference implementation.
x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
if down > 1:
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
return x
#----------------------------------------------------------------------------
| 7,591 | 47.356688 | 130 | py |
ice-ice | ice-ice/torch_utils/ops/fma.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Fused multiply-add, with slightly faster gradients than `torch.addcmul()`."""
import torch
#----------------------------------------------------------------------------
def fma(a, b, c): # => a * b + c
return _FusedMultiplyAdd.apply(a, b, c)
#----------------------------------------------------------------------------
class _FusedMultiplyAdd(torch.autograd.Function): # a * b + c
@staticmethod
def forward(ctx, a, b, c): # pylint: disable=arguments-differ
out = torch.addcmul(c, a, b)
ctx.save_for_backward(a, b)
ctx.c_shape = c.shape
return out
@staticmethod
def backward(ctx, dout): # pylint: disable=arguments-differ
a, b = ctx.saved_tensors
c_shape = ctx.c_shape
da = None
db = None
dc = None
if ctx.needs_input_grad[0]:
da = _unbroadcast(dout * b, a.shape)
if ctx.needs_input_grad[1]:
db = _unbroadcast(dout * a, b.shape)
if ctx.needs_input_grad[2]:
dc = _unbroadcast(dout, c_shape)
return da, db, dc
#----------------------------------------------------------------------------
def _unbroadcast(x, shape):
extra_dims = x.ndim - len(shape)
assert extra_dims >= 0
dim = [i for i in range(x.ndim) if x.shape[i] > 1 and (i < extra_dims or shape[i - extra_dims] == 1)]
if len(dim):
x = x.sum(dim=dim, keepdim=True)
if extra_dims:
x = x.reshape(-1, *x.shape[extra_dims+1:])
assert x.shape == shape
return x
#----------------------------------------------------------------------------
| 2,034 | 32.360656 | 105 | py |
ice-ice | ice-ice/metrics/metric_utils.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import time
import hashlib
import pickle
import copy
import uuid
import numpy as np
import torch
import dnnlib
#----------------------------------------------------------------------------
class MetricOptions:
def __init__(self, G=None, G_kwargs={}, dataset_kwargs={}, num_gpus=1, rank=0, device=None, progress=None, cache=True):
assert 0 <= rank < num_gpus
self.G = G
self.G_kwargs = dnnlib.EasyDict(G_kwargs)
self.dataset_kwargs = dnnlib.EasyDict(dataset_kwargs)
self.num_gpus = num_gpus
self.rank = rank
self.device = device if device is not None else torch.device('cuda', rank)
self.progress = progress.sub() if progress is not None and rank == 0 else ProgressMonitor()
self.cache = cache
#----------------------------------------------------------------------------
_feature_detector_cache = dict()
def get_feature_detector_name(url):
return os.path.splitext(url.split('/')[-1])[0]
def get_feature_detector(url, device=torch.device('cpu'), num_gpus=1, rank=0, verbose=False):
assert 0 <= rank < num_gpus
key = (url, device)
if key not in _feature_detector_cache:
is_leader = (rank == 0)
if not is_leader and num_gpus > 1:
torch.distributed.barrier() # leader goes first
with dnnlib.util.open_url(url, verbose=(verbose and is_leader)) as f:
_feature_detector_cache[key] = torch.jit.load(f).eval().to(device)
if is_leader and num_gpus > 1:
torch.distributed.barrier() # others follow
return _feature_detector_cache[key]
#----------------------------------------------------------------------------
class FeatureStats:
def __init__(self, capture_all=False, capture_mean_cov=False, max_items=None):
self.capture_all = capture_all
self.capture_mean_cov = capture_mean_cov
self.max_items = max_items
self.num_items = 0
self.num_features = None
self.all_features = None
self.raw_mean = None
self.raw_cov = None
def set_num_features(self, num_features):
if self.num_features is not None:
assert num_features == self.num_features
else:
self.num_features = num_features
self.all_features = []
self.raw_mean = np.zeros([num_features], dtype=np.float64)
self.raw_cov = np.zeros([num_features, num_features], dtype=np.float64)
def is_full(self):
return (self.max_items is not None) and (self.num_items >= self.max_items)
def append(self, x):
x = np.asarray(x, dtype=np.float32)
assert x.ndim == 2
if (self.max_items is not None) and (self.num_items + x.shape[0] > self.max_items):
if self.num_items >= self.max_items:
return
x = x[:self.max_items - self.num_items]
self.set_num_features(x.shape[1])
self.num_items += x.shape[0]
if self.capture_all:
self.all_features.append(x)
if self.capture_mean_cov:
x64 = x.astype(np.float64)
self.raw_mean += x64.sum(axis=0)
self.raw_cov += x64.T @ x64
def append_torch(self, x, num_gpus=1, rank=0):
assert isinstance(x, torch.Tensor) and x.ndim == 2
assert 0 <= rank < num_gpus
if num_gpus > 1:
ys = []
for src in range(num_gpus):
y = x.clone()
torch.distributed.broadcast(y, src=src)
ys.append(y)
x = torch.stack(ys, dim=1).flatten(0, 1) # interleave samples
self.append(x.cpu().numpy())
def get_all(self):
assert self.capture_all
return np.concatenate(self.all_features, axis=0)
def get_all_torch(self):
return torch.from_numpy(self.get_all())
def get_mean_cov(self):
assert self.capture_mean_cov
mean = self.raw_mean / self.num_items
cov = self.raw_cov / self.num_items
cov = cov - np.outer(mean, mean)
return mean, cov
def save(self, pkl_file):
with open(pkl_file, 'wb') as f:
pickle.dump(self.__dict__, f)
@staticmethod
def load(pkl_file):
with open(pkl_file, 'rb') as f:
s = dnnlib.EasyDict(pickle.load(f))
obj = FeatureStats(capture_all=s.capture_all, max_items=s.max_items)
obj.__dict__.update(s)
return obj
#----------------------------------------------------------------------------
class ProgressMonitor:
def __init__(self, tag=None, num_items=None, flush_interval=1000, verbose=False, progress_fn=None, pfn_lo=0, pfn_hi=1000, pfn_total=1000):
self.tag = tag
self.num_items = num_items
self.verbose = verbose
self.flush_interval = flush_interval
self.progress_fn = progress_fn
self.pfn_lo = pfn_lo
self.pfn_hi = pfn_hi
self.pfn_total = pfn_total
self.start_time = time.time()
self.batch_time = self.start_time
self.batch_items = 0
if self.progress_fn is not None:
self.progress_fn(self.pfn_lo, self.pfn_total)
def update(self, cur_items):
assert (self.num_items is None) or (cur_items <= self.num_items)
if (cur_items < self.batch_items + self.flush_interval) and (self.num_items is None or cur_items < self.num_items):
return
cur_time = time.time()
total_time = cur_time - self.start_time
time_per_item = (cur_time - self.batch_time) / max(cur_items - self.batch_items, 1)
if (self.verbose) and (self.tag is not None):
print(f'{self.tag:<19s} items {cur_items:<7d} time {dnnlib.util.format_time(total_time):<12s} ms/item {time_per_item*1e3:.2f}')
self.batch_time = cur_time
self.batch_items = cur_items
if (self.progress_fn is not None) and (self.num_items is not None):
self.progress_fn(self.pfn_lo + (self.pfn_hi - self.pfn_lo) * (cur_items / self.num_items), self.pfn_total)
def sub(self, tag=None, num_items=None, flush_interval=1000, rel_lo=0, rel_hi=1):
return ProgressMonitor(
tag = tag,
num_items = num_items,
flush_interval = flush_interval,
verbose = self.verbose,
progress_fn = self.progress_fn,
pfn_lo = self.pfn_lo + (self.pfn_hi - self.pfn_lo) * rel_lo,
pfn_hi = self.pfn_lo + (self.pfn_hi - self.pfn_lo) * rel_hi,
pfn_total = self.pfn_total,
)
#----------------------------------------------------------------------------
def compute_feature_stats_for_dataset(opts, detector_url, detector_kwargs, rel_lo=0, rel_hi=1, batch_size=64, data_loader_kwargs=None, max_items=None, **stats_kwargs):
dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
if data_loader_kwargs is None:
data_loader_kwargs = dict(pin_memory=True, num_workers=3, prefetch_factor=2)
# Try to lookup from cache.
cache_file = None
if opts.cache:
# Choose cache file name.
args = dict(dataset_kwargs=opts.dataset_kwargs, detector_url=detector_url, detector_kwargs=detector_kwargs, stats_kwargs=stats_kwargs)
md5 = hashlib.md5(repr(sorted(args.items())).encode('utf-8'))
cache_tag = f'{dataset.name}-{get_feature_detector_name(detector_url)}-{md5.hexdigest()}'
cache_file = dnnlib.make_cache_dir_path('gan-metrics', cache_tag + '.pkl')
# Check if the file exists (all processes must agree).
flag = os.path.isfile(cache_file) if opts.rank == 0 else False
if opts.num_gpus > 1:
flag = torch.as_tensor(flag, dtype=torch.float32, device=opts.device)
torch.distributed.broadcast(tensor=flag, src=0)
flag = (float(flag.cpu()) != 0)
# Load.
if flag:
return FeatureStats.load(cache_file)
# Initialize.
num_items = len(dataset)
if max_items is not None:
num_items = min(num_items, max_items)
stats = FeatureStats(max_items=num_items, **stats_kwargs)
progress = opts.progress.sub(tag='dataset features', num_items=num_items, rel_lo=rel_lo, rel_hi=rel_hi)
detector = get_feature_detector(url=detector_url, device=opts.device, num_gpus=opts.num_gpus, rank=opts.rank, verbose=progress.verbose)
# Main loop.
item_subset = [(i * opts.num_gpus + opts.rank) % num_items for i in range((num_items - 1) // opts.num_gpus + 1)]
for images, _labels in torch.utils.data.DataLoader(dataset=dataset, sampler=item_subset, batch_size=batch_size, **data_loader_kwargs):
if images.shape[1] == 1:
images = images.repeat([1, 3, 1, 1])
features = detector(images.to(opts.device), **detector_kwargs)
stats.append_torch(features, num_gpus=opts.num_gpus, rank=opts.rank)
progress.update(stats.num_items)
# Save to cache.
if cache_file is not None and opts.rank == 0:
os.makedirs(os.path.dirname(cache_file), exist_ok=True)
temp_file = cache_file + '.' + uuid.uuid4().hex
stats.save(temp_file)
os.replace(temp_file, cache_file) # atomic
return stats
#----------------------------------------------------------------------------
def compute_feature_stats_for_generator(opts, detector_url, detector_kwargs, rel_lo=0, rel_hi=1, batch_size=64, batch_gen=None, jit=False, **stats_kwargs):
if batch_gen is None:
batch_gen = min(batch_size, 4)
assert batch_size % batch_gen == 0
# Setup generator and load labels.
G = copy.deepcopy(opts.G).eval().requires_grad_(False).to(opts.device)
dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
# Image generation func.
def run_generator(z, c):
img = G(z=z, c=c, **opts.G_kwargs)
img = (img * 127.5 + 128).clamp(0, 255).to(torch.uint8)
return img
# JIT.
if jit:
z = torch.zeros([batch_gen, G.z_dim], device=opts.device)
c = torch.zeros([batch_gen, G.c_dim], device=opts.device)
run_generator = torch.jit.trace(run_generator, [z, c], check_trace=False)
# Initialize.
stats = FeatureStats(**stats_kwargs)
assert stats.max_items is not None
progress = opts.progress.sub(tag='generator features', num_items=stats.max_items, rel_lo=rel_lo, rel_hi=rel_hi)
detector = get_feature_detector(url=detector_url, device=opts.device, num_gpus=opts.num_gpus, rank=opts.rank, verbose=progress.verbose)
# Main loop.
while not stats.is_full():
images = []
for _i in range(batch_size // batch_gen):
z = torch.randn([batch_gen, G.z_dim], device=opts.device)
c = [dataset.get_label(np.random.randint(len(dataset))) for _i in range(batch_gen)]
c = torch.from_numpy(np.stack(c)).pin_memory().to(opts.device)
images.append(run_generator(z, c))
images = torch.cat(images)
if images.shape[1] == 1:
images = images.repeat([1, 3, 1, 1])
features = detector(images, **detector_kwargs)
stats.append_torch(features, num_gpus=opts.num_gpus, rank=opts.rank)
progress.update(stats.num_items)
return stats
#----------------------------------------------------------------------------
| 11,806 | 41.778986 | 167 | py |
ice-ice | ice-ice/metrics/kernel_inception_distance.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Kernel Inception Distance (KID) from the paper "Demystifying MMD
GANs". Matches the original implementation by Binkowski et al. at
https://github.com/mbinkowski/MMD-GAN/blob/master/gan/compute_scores.py"""
import numpy as np
from . import metric_utils
#----------------------------------------------------------------------------
def compute_kid(opts, max_real, num_gen, num_subsets, max_subset_size):
# Direct TorchScript translation of http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
detector_url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/inception-2015-12-05.pt'
detector_kwargs = dict(return_features=True) # Return raw features before the softmax layer.
real_features = metric_utils.compute_feature_stats_for_dataset(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
rel_lo=0, rel_hi=0, capture_all=True, max_items=max_real).get_all()
gen_features = metric_utils.compute_feature_stats_for_generator(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
rel_lo=0, rel_hi=1, capture_all=True, max_items=num_gen).get_all()
if opts.rank != 0:
return float('nan')
n = real_features.shape[1]
m = min(min(real_features.shape[0], gen_features.shape[0]), max_subset_size)
t = 0
for _subset_idx in range(num_subsets):
x = gen_features[np.random.choice(gen_features.shape[0], m, replace=False)]
y = real_features[np.random.choice(real_features.shape[0], m, replace=False)]
a = (x @ x.T / n + 1) ** 3 + (y @ y.T / n + 1) ** 3
b = (x @ y.T / n + 1) ** 3
t += (a.sum() - np.diag(a).sum()) / (m - 1) - b.sum() * 2 / m
kid = t / num_subsets / m
return float(kid)
#----------------------------------------------------------------------------
| 2,302 | 48 | 118 | py |
ice-ice | ice-ice/metrics/frechet_inception_distance.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Frechet Inception Distance (FID) from the paper
"GANs trained by a two time-scale update rule converge to a local Nash
equilibrium". Matches the original implementation by Heusel et al. at
https://github.com/bioinf-jku/TTUR/blob/master/fid.py"""
import numpy as np
import scipy.linalg
from . import metric_utils
#----------------------------------------------------------------------------
def compute_fid(opts, max_real, num_gen):
# Direct TorchScript translation of http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
detector_url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/inception-2015-12-05.pt'
detector_kwargs = dict(return_features=True) # Return raw features before the softmax layer.
mu_real, sigma_real = metric_utils.compute_feature_stats_for_dataset(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
rel_lo=0, rel_hi=0, capture_mean_cov=True, max_items=max_real).get_mean_cov()
mu_gen, sigma_gen = metric_utils.compute_feature_stats_for_generator(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
rel_lo=0, rel_hi=1, capture_mean_cov=True, max_items=num_gen).get_mean_cov()
if opts.rank != 0:
return float('nan')
m = np.square(mu_gen - mu_real).sum()
s, _ = scipy.linalg.sqrtm(np.dot(sigma_gen, sigma_real), disp=False) # pylint: disable=no-member
fid = np.real(m + np.trace(sigma_gen + sigma_real - s * 2))
return float(fid)
#----------------------------------------------------------------------------
| 2,040 | 47.595238 | 118 | py |
ice-ice | ice-ice/metrics/perceptual_path_length.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Perceptual Path Length (PPL) from the paper "A Style-Based Generator
Architecture for Generative Adversarial Networks". Matches the original
implementation by Karras et al. at
https://github.com/NVlabs/stylegan/blob/master/metrics/perceptual_path_length.py"""
import copy
import numpy as np
import torch
import dnnlib
from . import metric_utils
#----------------------------------------------------------------------------
# Spherical interpolation of a batch of vectors.
def slerp(a, b, t):
a = a / a.norm(dim=-1, keepdim=True)
b = b / b.norm(dim=-1, keepdim=True)
d = (a * b).sum(dim=-1, keepdim=True)
p = t * torch.acos(d)
c = b - d * a
c = c / c.norm(dim=-1, keepdim=True)
d = a * torch.cos(p) + c * torch.sin(p)
d = d / d.norm(dim=-1, keepdim=True)
return d
#----------------------------------------------------------------------------
class PPLSampler(torch.nn.Module):
def __init__(self, G, G_kwargs, epsilon, space, sampling, crop, vgg16):
assert space in ['z', 'w']
assert sampling in ['full', 'end']
super().__init__()
self.G = copy.deepcopy(G)
self.G_kwargs = G_kwargs
self.epsilon = epsilon
self.space = space
self.sampling = sampling
self.crop = crop
self.vgg16 = copy.deepcopy(vgg16)
def forward(self, c):
# Generate random latents and interpolation t-values.
t = torch.rand([c.shape[0]], device=c.device) * (1 if self.sampling == 'full' else 0)
z0, z1 = torch.randn([c.shape[0] * 2, self.G.z_dim], device=c.device).chunk(2)
# Interpolate in W or Z.
if self.space == 'w':
w0, w1 = self.G.mapping(z=torch.cat([z0,z1]), c=torch.cat([c,c])).chunk(2)
wt0 = w0.lerp(w1, t.unsqueeze(1).unsqueeze(2))
wt1 = w0.lerp(w1, t.unsqueeze(1).unsqueeze(2) + self.epsilon)
else: # space == 'z'
zt0 = slerp(z0, z1, t.unsqueeze(1))
zt1 = slerp(z0, z1, t.unsqueeze(1) + self.epsilon)
wt0, wt1 = self.G.mapping(z=torch.cat([zt0,zt1]), c=torch.cat([c,c])).chunk(2)
# Randomize noise buffers.
for name, buf in self.G.named_buffers():
if name.endswith('.noise_const'):
buf.copy_(torch.randn_like(buf))
# Generate images.
img = self.G.synthesis(ws=torch.cat([wt0,wt1]), noise_mode='const', force_fp32=True, **self.G_kwargs)
# Center crop.
if self.crop:
assert img.shape[2] == img.shape[3]
c = img.shape[2] // 8
img = img[:, :, c*3 : c*7, c*2 : c*6]
# Downsample to 256x256.
factor = self.G.img_resolution // 256
if factor > 1:
img = img.reshape([-1, img.shape[1], img.shape[2] // factor, factor, img.shape[3] // factor, factor]).mean([3, 5])
# Scale dynamic range from [-1,1] to [0,255].
img = (img + 1) * (255 / 2)
if self.G.img_channels == 1:
img = img.repeat([1, 3, 1, 1])
# Evaluate differential LPIPS.
lpips_t0, lpips_t1 = self.vgg16(img, resize_images=False, return_lpips=True).chunk(2)
dist = (lpips_t0 - lpips_t1).square().sum(1) / self.epsilon ** 2
return dist
#----------------------------------------------------------------------------
def compute_ppl(opts, num_samples, epsilon, space, sampling, crop, batch_size, jit=False):
dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
vgg16_url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
vgg16 = metric_utils.get_feature_detector(vgg16_url, num_gpus=opts.num_gpus, rank=opts.rank, verbose=opts.progress.verbose)
# Setup sampler.
sampler = PPLSampler(G=opts.G, G_kwargs=opts.G_kwargs, epsilon=epsilon, space=space, sampling=sampling, crop=crop, vgg16=vgg16)
sampler.eval().requires_grad_(False).to(opts.device)
if jit:
c = torch.zeros([batch_size, opts.G.c_dim], device=opts.device)
sampler = torch.jit.trace(sampler, [c], check_trace=False)
# Sampling loop.
dist = []
progress = opts.progress.sub(tag='ppl sampling', num_items=num_samples)
for batch_start in range(0, num_samples, batch_size * opts.num_gpus):
progress.update(batch_start)
c = [dataset.get_label(np.random.randint(len(dataset))) for _i in range(batch_size)]
c = torch.from_numpy(np.stack(c)).pin_memory().to(opts.device)
x = sampler(c)
for src in range(opts.num_gpus):
y = x.clone()
if opts.num_gpus > 1:
torch.distributed.broadcast(y, src=src)
dist.append(y)
progress.update(num_samples)
# Compute PPL.
if opts.rank != 0:
return float('nan')
dist = torch.cat(dist)[:num_samples].cpu().numpy()
lo = np.percentile(dist, 1, interpolation='lower')
hi = np.percentile(dist, 99, interpolation='higher')
ppl = np.extract(np.logical_and(dist >= lo, dist <= hi), dist).mean()
return float(ppl)
#----------------------------------------------------------------------------
| 5,538 | 40.962121 | 131 | py |
ice-ice | ice-ice/metrics/inception_score.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Inception Score (IS) from the paper "Improved techniques for training
GANs". Matches the original implementation by Salimans et al. at
https://github.com/openai/improved-gan/blob/master/inception_score/model.py"""
import numpy as np
from . import metric_utils
#----------------------------------------------------------------------------
def compute_is(opts, num_gen, num_splits):
# Direct TorchScript translation of http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
detector_url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/inception-2015-12-05.pt'
detector_kwargs = dict(no_output_bias=True) # Match the original implementation by not applying bias in the softmax layer.
gen_probs = metric_utils.compute_feature_stats_for_generator(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
capture_all=True, max_items=num_gen).get_all()
if opts.rank != 0:
return float('nan'), float('nan')
scores = []
for i in range(num_splits):
part = gen_probs[i * num_gen // num_splits : (i + 1) * num_gen // num_splits]
kl = part * (np.log(part) - np.log(np.mean(part, axis=0, keepdims=True)))
kl = np.mean(np.sum(kl, axis=1))
scores.append(np.exp(kl))
return float(np.mean(scores)), float(np.std(scores))
#----------------------------------------------------------------------------
| 1,874 | 47.076923 | 126 | py |
ice-ice | ice-ice/metrics/metric_main.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import time
import json
import torch
import dnnlib
from . import metric_utils
from . import frechet_inception_distance
from . import kernel_inception_distance
from . import precision_recall
from . import perceptual_path_length
from . import inception_score
#----------------------------------------------------------------------------
_metric_dict = dict() # name => fn
def register_metric(fn):
assert callable(fn)
_metric_dict[fn.__name__] = fn
return fn
def is_valid_metric(metric):
return metric in _metric_dict
def list_valid_metrics():
return list(_metric_dict.keys())
#----------------------------------------------------------------------------
def calc_metric(metric, **kwargs): # See metric_utils.MetricOptions for the full list of arguments.
assert is_valid_metric(metric)
opts = metric_utils.MetricOptions(**kwargs)
# Calculate.
start_time = time.time()
results = _metric_dict[metric](opts)
total_time = time.time() - start_time
# Broadcast results.
for key, value in list(results.items()):
if opts.num_gpus > 1:
value = torch.as_tensor(value, dtype=torch.float64, device=opts.device)
torch.distributed.broadcast(tensor=value, src=0)
value = float(value.cpu())
results[key] = value
# Decorate with metadata.
return dnnlib.EasyDict(
results = dnnlib.EasyDict(results),
metric = metric,
total_time = total_time,
total_time_str = dnnlib.util.format_time(total_time),
num_gpus = opts.num_gpus,
)
#----------------------------------------------------------------------------
def report_metric(result_dict, run_dir=None, snapshot_pkl=None):
metric = result_dict['metric']
assert is_valid_metric(metric)
if run_dir is not None and snapshot_pkl is not None:
snapshot_pkl = os.path.relpath(snapshot_pkl, run_dir)
jsonl_line = json.dumps(dict(result_dict, snapshot_pkl=snapshot_pkl, timestamp=time.time()))
print(jsonl_line)
if run_dir is not None and os.path.isdir(run_dir):
with open(os.path.join(run_dir, f'metric-{metric}.jsonl'), 'at') as f:
f.write(jsonl_line + '\n')
#----------------------------------------------------------------------------
# Primary metrics.
@register_metric
def fid50k_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
fid = frechet_inception_distance.compute_fid(opts, max_real=None, num_gen=50000)
return dict(fid50k_full=fid)
@register_metric
def kid50k_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
kid = kernel_inception_distance.compute_kid(opts, max_real=1000000, num_gen=50000, num_subsets=100, max_subset_size=1000)
return dict(kid50k_full=kid)
@register_metric
def pr50k3_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
precision, recall = precision_recall.compute_pr(opts, max_real=200000, num_gen=50000, nhood_size=3, row_batch_size=10000, col_batch_size=10000)
return dict(pr50k3_full_precision=precision, pr50k3_full_recall=recall)
@register_metric
def ppl2_wend(opts):
ppl = perceptual_path_length.compute_ppl(opts, num_samples=50000, epsilon=1e-4, space='w', sampling='end', crop=False, batch_size=2)
return dict(ppl2_wend=ppl)
@register_metric
def is50k(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
mean, std = inception_score.compute_is(opts, num_gen=50000, num_splits=10)
return dict(is50k_mean=mean, is50k_std=std)
#----------------------------------------------------------------------------
# Legacy metrics.
@register_metric
def fid50k(opts):
opts.dataset_kwargs.update(max_size=None)
fid = frechet_inception_distance.compute_fid(opts, max_real=50000, num_gen=50000)
return dict(fid50k=fid)
@register_metric
def kid50k(opts):
opts.dataset_kwargs.update(max_size=None)
kid = kernel_inception_distance.compute_kid(opts, max_real=50000, num_gen=50000, num_subsets=100, max_subset_size=1000)
return dict(kid50k=kid)
@register_metric
def pr50k3(opts):
opts.dataset_kwargs.update(max_size=None)
precision, recall = precision_recall.compute_pr(opts, max_real=50000, num_gen=50000, nhood_size=3, row_batch_size=10000, col_batch_size=10000)
return dict(pr50k3_precision=precision, pr50k3_recall=recall)
@register_metric
def ppl_zfull(opts):
ppl = perceptual_path_length.compute_ppl(opts, num_samples=50000, epsilon=1e-4, space='z', sampling='full', crop=True, batch_size=2)
return dict(ppl_zfull=ppl)
@register_metric
def ppl_wfull(opts):
ppl = perceptual_path_length.compute_ppl(opts, num_samples=50000, epsilon=1e-4, space='w', sampling='full', crop=True, batch_size=2)
return dict(ppl_wfull=ppl)
@register_metric
def ppl_zend(opts):
ppl = perceptual_path_length.compute_ppl(opts, num_samples=50000, epsilon=1e-4, space='z', sampling='end', crop=True, batch_size=2)
return dict(ppl_zend=ppl)
@register_metric
def ppl_wend(opts):
ppl = perceptual_path_length.compute_ppl(opts, num_samples=50000, epsilon=1e-4, space='w', sampling='end', crop=True, batch_size=2)
return dict(ppl_wend=ppl)
#----------------------------------------------------------------------------
| 5,715 | 36.359477 | 147 | py |
ice-ice | ice-ice/metrics/precision_recall.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Precision/Recall (PR) from the paper "Improved Precision and Recall
Metric for Assessing Generative Models". Matches the original implementation
by Kynkaanniemi et al. at
https://github.com/kynkaat/improved-precision-and-recall-metric/blob/master/precision_recall.py"""
import torch
from . import metric_utils
#----------------------------------------------------------------------------
def compute_distances(row_features, col_features, num_gpus, rank, col_batch_size):
assert 0 <= rank < num_gpus
num_cols = col_features.shape[0]
num_batches = ((num_cols - 1) // col_batch_size // num_gpus + 1) * num_gpus
col_batches = torch.nn.functional.pad(col_features, [0, 0, 0, -num_cols % num_batches]).chunk(num_batches)
dist_batches = []
for col_batch in col_batches[rank :: num_gpus]:
dist_batch = torch.cdist(row_features.unsqueeze(0), col_batch.unsqueeze(0))[0]
for src in range(num_gpus):
dist_broadcast = dist_batch.clone()
if num_gpus > 1:
torch.distributed.broadcast(dist_broadcast, src=src)
dist_batches.append(dist_broadcast.cpu() if rank == 0 else None)
return torch.cat(dist_batches, dim=1)[:, :num_cols] if rank == 0 else None
#----------------------------------------------------------------------------
def compute_pr(opts, max_real, num_gen, nhood_size, row_batch_size, col_batch_size):
detector_url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
detector_kwargs = dict(return_features=True)
real_features = metric_utils.compute_feature_stats_for_dataset(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
rel_lo=0, rel_hi=0, capture_all=True, max_items=max_real).get_all_torch().to(torch.float16).to(opts.device)
gen_features = metric_utils.compute_feature_stats_for_generator(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
rel_lo=0, rel_hi=1, capture_all=True, max_items=num_gen).get_all_torch().to(torch.float16).to(opts.device)
results = dict()
for name, manifold, probes in [('precision', real_features, gen_features), ('recall', gen_features, real_features)]:
kth = []
for manifold_batch in manifold.split(row_batch_size):
dist = compute_distances(row_features=manifold_batch, col_features=manifold, num_gpus=opts.num_gpus, rank=opts.rank, col_batch_size=col_batch_size)
kth.append(dist.to(torch.float32).kthvalue(nhood_size + 1).values.to(torch.float16) if opts.rank == 0 else None)
kth = torch.cat(kth) if opts.rank == 0 else None
pred = []
for probes_batch in probes.split(row_batch_size):
dist = compute_distances(row_features=probes_batch, col_features=manifold, num_gpus=opts.num_gpus, rank=opts.rank, col_batch_size=col_batch_size)
pred.append((dist <= kth).any(dim=1) if opts.rank == 0 else None)
results[name] = float(torch.cat(pred).to(torch.float32).mean() if opts.rank == 0 else 'nan')
return results['precision'], results['recall']
#----------------------------------------------------------------------------
| 3,617 | 56.428571 | 159 | py |
ice-ice | ice-ice/ice/landmark_interpolation.py | import numpy as np
import scipy.spatial
import skimage.draw
import torch
from torchvision import io
import face_alignment
import matplotlib.pyplot as plt
def interpolate_from_landmarks(image, landmarks, vertex_indices=None, weights=None, mask=None):
H, W = image.shape[-2:]
step = 4
rect = landmarks.new_tensor([[0, 0], [H, 0], [0, W], [H, W]])
vertices = torch.cat([landmarks, rect], dim=0)
vertices_cpu = vertices.cpu().detach()
if vertex_indices is None:
delaunay = scipy.spatial.Delaunay(vertices_cpu)
triangles = delaunay.simplices
facet_map = np.full([H, W], -1, dtype=np.int32)
for index, triangle in enumerate(triangles):
points = vertices_cpu[triangle]
rr, cc = skimage.draw.polygon(points[:,0], points[:,1], [H - 1, W - 1])
facet_map[rr, cc] = index
facet_map = torch.from_numpy(facet_map).long().to(image.device)
triangles = torch.from_numpy(triangles).long().to(image.device)
grid0, grid1 = torch.meshgrid(torch.arange(H), torch.arange(W))
grids = torch.stack([grid0, grid1], dim=-1).to(image.device)
valid = facet_map >= 0
if mask is not None:
valid = valid & mask
facet_map = facet_map[valid]
grids = grids[valid]
N = len(facet_map)
# N -> N x 1 x 3
facet_map = facet_map[..., None, None].expand(-1, 1, 3)
# F x 3 -> N x F x 3
expanded = triangles[None, ...].expand(N, -1, -1)
# N x 1 x 3
vertex_indices = torch.gather(expanded, dim=1, index=facet_map)
# N x 1 x 3 -> N x 3
vertex_indices = vertex_indices.squeeze(1)
else:
assert mask is None
N = len(vertex_indices)
# N x 3 -> N x 3 x 2
expanded = vertex_indices[..., None].expand(-1, -1, 2)
# V x 2 -> N x V x 2
vertices = vertices[None, ...].expand(N, -1, -1)
# N x 3 x 2
vertices = torch.gather(vertices, dim=1, index=expanded)
if weights is None:
with torch.no_grad():
# https://gamedev.stackexchange.com/questions/23743/whats-the-most-efficient-way-to-find-barycentric-coordinates/63203#63203
v0 = vertices[:, 1, :] - vertices[:, 0, :]
v1 = vertices[:, 2, :] - vertices[:, 0, :]
v2 = grids - vertices[:, 0, :]
den = v0[:, 1] * v1[:, 0] - v1[:, 1] * v0[:, 0]
v = (v2[:, 1] * v1[:, 0] - v1[:, 1] * v2[:, 0]) / den
w = (v0[:, 1] * v2[:, 0] - v2[:, 1] * v0[:, 0]) / den
u = 1. - v - w
weights = torch.stack([u, v, w], dim=-1)
interpolated = (vertices * weights.unsqueeze(-1)).sum(dim=1)
if False:
if not hasattr(interpolate_from_landmarks, 'triangles'):
interpolate_from_landmarks.triangles = triangles
if not hasattr(interpolate_from_landmarks, 'save_index'):
interpolate_from_landmarks.save_index = 0
f = plt.figure(figsize=(3, 3))
plt.imshow(image.permute(1, 2, 0).cpu().detach())
for index, triangle in enumerate(interpolate_from_landmarks.triangles):
points = vertices_cpu[triangle]
points = points - 0.5
plt.plot(points[:, 1], points[:, 0], c='g')
# mask = facet_map[:, 0, 0] == 100
# vertices = vertices[mask].cpu().detach()
# interpolated = interpolated[mask].cpu().detach()
np.random.seed(12)
selected = np.random.choice(len(interpolated), 100)
selected = interpolated[selected, :].cpu().detach()
selected = selected - 0.5
# plt.figure(figsize=(16, 12))
# plt.imshow(image.permute(1, 2, 0).cpu().detach())
plt.scatter(x=selected[:, 1], y=selected[:, 0], c='r')
# plt.scatter(x=vertices[0, :, 1], y=vertices[0, :, 0], c='g')
f.gca().set_axis_off()
f.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
plt.margins(0, 0)
f.gca().xaxis.set_major_locator(plt.NullLocator())
f.gca().yaxis.set_major_locator(plt.NullLocator())
plt.show()
f.savefig(f'/opt_/cephfs_workspace/gpudisk/libo427/workspace/attractive/figs/blend_{interpolate_from_landmarks.save_index}.pdf',
bbox_inches='tight', pad_inches=0)
interpolate_from_landmarks.save_index += 1
# N x 2, N x 3, N x 3
return interpolated, vertex_indices, weights
if __name__ == '__main__':
image = io.read_image('research/jackiechan.png')
im = image.permute(1, 2, 0).numpy()
fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=False)
landmarks = fa.get_landmarks(im)[0]
landmarks = torch.from_numpy(landmarks).flip(dims=(-1,))
interpolated, vertex_indices, weights = interpolate_from_landmarks(image, landmarks)
import pdb; pdb.set_trace()
| 4,861 | 37.283465 | 136 | py |
ice-ice | ice-ice/ice/resnet.py | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet50']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class fc_block(nn.Module):
def __init__(self, inplanes, planes, drop_rate=0.15):
super(fc_block, self).__init__()
self.fc = nn.Linear(inplanes, planes)
self.bn = nn.BatchNorm1d(planes)
if drop_rate > 0:
self.dropout = nn.Dropout(drop_rate)
self.relu = nn.ReLU(inplace=True)
self.drop_rate = drop_rate
def forward(self, x):
x = self.fc(x)
x = self.bn(x)
if self.drop_rate > 0:
x = self.dropout(x)
x = self.relu(x)
return x
class ResNet(nn.Module):
def __init__(self, block, layers, num_attributes=40, zero_init_residual=False):
super(ResNet, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.stem = fc_block(512 * block.expansion, 512)
for i in range(num_attributes):
setattr(self, 'classifier' + str(i).zfill(2), nn.Sequential(fc_block(512, 256), nn.Linear(256, 2)))
self.num_attributes = num_attributes
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.stem(x)
y = []
for i in range(self.num_attributes):
classifier = getattr(self, 'classifier' + str(i).zfill(2))
y.append(classifier(x))
return y
def resnet50(pretrained=True, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
def init_pretrained_weights(model, model_url):
"""
Initialize model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
pretrain_dict = model_zoo.load_url(model_url)
model_dict = model.state_dict()
pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
print("Initialized model with pretrained weights from {}".format(model_url))
| 7,115 | 31.792627 | 116 | py |
ice-ice | ice-ice/ice/wrapper.py | import matplotlib.pyplot as plt
import face_alignment
import kornia
import torch
from torch import nn
import torchvision.transforms as transforms
import torch.nn.functional as F
from torch_utils import misc
import dnnlib
import legacy
from external.identity.iresnet import iresnet50, iresnet100
from external.landmark.mobilefacenet import MobileFaceNet
from external.parsing.model import BiSeNet
class StyleGanWrapper(nn.Module):
def __init__(self, generator):
super(StyleGanWrapper, self).__init__()
if isinstance(generator, nn.Module):
self.generator = generator
elif isinstance(generator, str):
with open(generator, 'rb') as f:
official_gan = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
self.generator = Generator(z_dim=official_gan.z_dim,
c_dim=official_gan.c_dim,
w_dim=official_gan.w_dim,
img_resolution=official_gan.img_resolution,
img_channels=official_gan.img_channels)
self.generator.load_state_dict(official_gan.state_dict())
else:
raise NotImplementedError
self.generator.eval()
self.truncation_psi = 1
self.noise_mode = 'const'
gen_size = (self.generator.img_resolution, ) * 2
def reset(self):
pass
def forward(self, u, from_='z', to='x'):
self.generator.eval()
if from_ == 'z':
misc.assert_shape(u, [None, self.generator.z_dim])
zs = u
if to == 'z':
return zs
if from_ == 'w':
misc.assert_shape(u, [None, self.generator.w_dim])
w0s = u
ws = w0s.unsqueeze(1).expand(-1, self.generator.num_ws, -1)
elif from_ == 'w+':
misc.assert_shape(u, [None, self.generator.num_ws, self.generator.w_dim])
ws = u
elif from_ in ['z']:
ws = self.generator.mapping(zs, None, truncation_psi=self.truncation_psi)
w0s = ws[:, 0, :]
if to == 'w':
return w0s
if to == 'w+':
return ws
if from_ == 's':
svec = u
styles = self.generator.unpack_styles(svec)
elif from_ in ['z', 'w', 'w+']:
styles = self.generator.compute_styles(ws)
svec = self.generator.pack_styles(styles)
if to == 's':
return svec
x = self.generator.synthesis(styles, noise_mode=self.noise_mode)
x = x * 0.5 + 0.5
return x
class FaceSegmenter(nn.Module):
def __init__(self, path, mask_labels, morphology=None, ks_ratio=0.1):
super(FaceSegmenter, self).__init__()
n_classes = 19
self.segmenter = BiSeNet(n_classes=n_classes)
with dnnlib.util.open_url(path, 'rb') as f:
self.segmenter.load_state_dict(torch.load(f))
self.segmenter.eval()
self.sgt_size = (512, 512)
self.normalize = transforms.Compose([
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
transforms.Resize(self.sgt_size)
])
self.morphology = morphology
ks_size = int(self.sgt_size[0] * ks_ratio)
self.register_buffer('kernel',
torch.ones(ks_size, ks_size, dtype=torch.int))
self.mask_labels = mask_labels
def forward(self, x):
x_n = self.normalize(x)
pred = self.segmenter(x_n)[0].argmax(dim=1, keepdim=True)
target = x.new_tensor(self.mask_labels)[None, :, None, None]
mask = (pred == target).any(dim=1, keepdim=True).int()
mask = mask.cpu()
self.kernel = self.kernel.cpu()
if self.morphology == 'dilation':
mask = kornia.morphology.dilation(mask, self.kernel, border_type='constant')
elif self.morphology == 'erosion':
mask = kornia.morphology.erosion(mask, self.kernel, border_type='constant')
elif self.morphology == 'ring':
dilated = kornia.morphology.dilation(mask, self.kernel, border_type='constant')
eroded = kornia.morphology.erosion(mask, self.kernel, border_type='constant')
mask = dilated & (~eroded)
else:
assert self.morphology is None
mask = mask.to(x.device)
mask = transforms.functional.resize(mask, x.shape[-2:])
mask = mask > 0
return mask
class KeyPointDetector2(nn.Module):
def __init__(self, path):
super(KeyPointDetector2, self).__init__()
self.det_size = (112, 112)
self.num_landmarks = 68
self.detector = MobileFaceNet(self.det_size, self.num_landmarks * 2)
self.detector.load_state_dict(torch.load(path)['state_dict'])
self.detector.eval()
self.normalize = transforms.Compose([
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
transforms.Resize(self.det_size)
])
def forward(self, x):
x = self.normalize(x)
B, C, H, W = x.shape
landmarks = self.detector(x)[0]
landmarks = landmarks.reshape(-1, self.num_landmarks, 2)
landmarks = landmarks.flip(dims=(-1,)) * x.new_tensor([H, W])
# landmarks = landmarks.cpu().detach()
# for i in range(len(x)):
# plt.figure()
# plt.imshow(x[i].permute(1, 2, 0).cpu().detach())
# plt.scatter(landmarks[i, :, 1], landmarks[i, :, 0])
# plt.show()
# import pdb; pdb.set_trace()
return landmarks
class KeyPointHeatMapper(nn.Module):
def __init__(self):
super(KeyPointHeatMapper, self).__init__()
self.fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=False)
self.detector = self.fa.face_alignment_net
self.detector.eval()
self.det_size = (256, 256)
self.num_landmarks = 68
self.resize = transforms.Compose([ transforms.Resize(self.det_size) ])
def forward(self, x):
H0, W0 = x.shape[-2:]
x = self.resize(x)
out = self.detector(x)
return out
class KeyPointDetector(nn.Module):
def __init__(self):
super(KeyPointDetector, self).__init__()
self.fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=False)
self.detector = self.fa.face_alignment_net
self.detector.eval()
self.det_size = (256, 256)
self.num_landmarks = 68
self.resize = transforms.Compose([ transforms.Resize(self.det_size) ])
def forward(self, x):
H0, W0 = x.shape[-2:]
x = self.resize(x)
# out = self.detect(x)
# landmarks = self.fa.get_landmarks(x[0].permute(1, 2, 0))
# plt.imshow(x[0].permute(1, 2, 0).cpu().detach())
# plt.show()
# import pdb; pdb.set_trace()
preds = self.detector(x)
preds /= preds.sum([2, 3], keepdim=True)
B, C, H, W = preds.shape
grid0, grid1 = torch.meshgrid(torch.arange(H), torch.arange(W))
grid0 = grid0[None, None, ...].to(x.device)
grid1 = grid1[None, None, ...].to(x.device)
p0 = (grid0 * preds).sum(dim=[2, 3])
p1 = (grid1 * preds).sum(dim=[2, 3])
p0 = (p0 + 0.) / H * H0
p1 = (p1 + 0.) / W * W0
landmarks = torch.stack([p0, p1], dim=-1)
return landmarks
def detect0(self, x):
fa = self.fa
x = self.resize(x)
resolution = 256
for i in range(len(x)):
x_perm = x[i].permute(1, 2, 0) * 255
with torch.no_grad():
faces = fa.face_detector.detect_from_image(x_perm)
assert len(faces) > 0
d = faces[0]
center = torch.tensor(
[d[2] - (d[2] - d[0]) / 2.0, d[3] - (d[3] - d[1]) / 2.0])
center[1] = center[1] - (d[3] - d[1]) * 0.12
scale = (d[2] - d[0] + d[3] - d[1]) / fa.face_detector.reference_scale
ul = face_alignment.utils.transform([1, 1], center, scale, resolution, True)
br = face_alignment.utils.transform([resolution, resolution], center, scale, resolution, True)
print('my', center, scale, resolution, ul, br)
size = br - ul
cropped = resized_crop(x[i],
ul[0], ul[1], size[0], size[1], [resolution, resolution])
out = fa.face_alignment_net(cropped.unsqueeze(0)).detach()
inp = face_alignment.utils.crop(x_perm.detach().cpu().numpy(), center, scale)
inp = torch.from_numpy(inp.transpose(
(2, 0, 1))).float()
inp = inp.to(x.device)
inp.div_(255.0).unsqueeze_(0)
out1 = fa.face_alignment_net(inp).detach()
out2 = fa.face_alignment_net(x[i].unsqueeze(0)).detach()
preds = out
preds /= preds.sum([2, 3], keepdim=True)
B, C, H, W = preds.shape
grid0, grid1 = torch.meshgrid(torch.arange(H), torch.arange(W))
grid0 = grid0[None, None, ...].to(device)
grid1 = grid1[None, None, ...].to(device)
p0 = (grid0 * preds).sum(dim=[2, 3]).cpu().detach()
p1 = (grid1 * preds).sum(dim=[2, 3]).cpu().detach()
plt.imshow(preds[0, 0].cpu().detach())
plt.scatter(x=p1, y=p0, c='r')
plt.show()
preds = out1
preds /= preds.sum([2, 3], keepdim=True)
B, C, H, W = preds.shape
grid0, grid1 = torch.meshgrid(torch.arange(H), torch.arange(W))
grid0 = grid0[None, None, ...].to(device)
grid1 = grid1[None, None, ...].to(device)
p0 = (grid0 * preds).sum(dim=[2, 3]).cpu().detach()
p1 = (grid1 * preds).sum(dim=[2, 3]).cpu().detach()
plt.imshow(preds[0, 0].cpu().detach())
plt.scatter(x=p1, y=p0, c='r')
plt.show()
preds = out2
preds /= preds.sum([2, 3], keepdim=True)
B, C, H, W = preds.shape
grid0, grid1 = torch.meshgrid(torch.arange(H), torch.arange(W))
grid0 = grid0[None, None, ...].to(device)
grid1 = grid1[None, None, ...].to(device)
p0 = (grid0 * preds).sum(dim=[2, 3]).cpu().detach()
p1 = (grid1 * preds).sum(dim=[2, 3]).cpu().detach()
plt.imshow(preds[0, 0].cpu().detach())
plt.scatter(x=p1, y=p0, c='r')
plt.show()
import pdb; pdb.set_trace()
class IDFeatureExtractor(nn.Module):
def __init__(self, model_pth="backbone100.pth"):
super(IDFeatureExtractor, self).__init__()
self.model = iresnet100()
with dnnlib.util.open_url(path, 'rb') as f:
self.model.load_state_dict(torch.load(f))
def forward(self, x):
x = (x - 0.5) / 0.5
z = self.model(F.interpolate(x, size=112, mode='bilinear'))
z = F.normalize(z)
return z
| 10,977 | 36.986159 | 106 | py |
ice-ice | ice-ice/ice/criterions.py | import matplotlib.pyplot as plt
import kornia
import torch
from torch import nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from wrapper import StyleGanWrapper, FaceSegmenter, KeyPointDetector
from landmark_interpolation import interpolate_from_landmarks
def masked_mean(x, mask):
B, C, H, W = x.shape
assert mask.shape == (B, 1, H, W) and mask.dtype == torch.bool
x = x * mask
x = x.flatten(2, -1).sum(dim=-1)
m_sum = mask.flatten(2, -1).sum(dim=-1)
x = x / m_sum
return x
class MaskedMSE(StyleGanWrapper):
def __init__(self, generator, from_, segmenter, resize=None, return_complementary=True):
super(MaskedMSE, self).__init__(generator)
self.segmenter = segmenter
self.segmenter.eval()
self.gen_size = (self.generator.img_resolution,) * 2
self.cached_mask = None
self.from_ = from_
self.resize = None if resize is None else transforms.Resize(resize)
self.return_complementary = return_complementary
def reset(self):
self.cached_mask = None
def get_mask(self, x):
if self.cached_mask is None:
with torch.no_grad():
self.cached_mask = self.segmenter(x)
return self.cached_mask
def crop_to_bbox(self, x, mask):
mask = mask.flatten(0, 1).any(dim=0)
H, W = x.shape[-2:]
grid0, grid1 = torch.meshgrid(torch.arange(H), torch.arange(W))
grids = torch.stack([grid0, grid1], dim=0).to(x.device)
# XXX broadcast in masked_selected does not preserve outer dimension.
pixels = grids.masked_select(mask).reshape(2, -1)
ul = pixels.min(dim=1)[0]
br = pixels.max(dim=1)[0]
size = br - ul + 1
x = transforms.functional.crop(x, ul[0], ul[1], size[0], size[1])
return x
def forward(self, u):
x = super(MaskedMSE, self).forward(u, from_=self.from_)
mask = self.get_mask(x)
if self.resize is not None:
x = self.resize(x)
mask = self.resize(mask)
x_in = x * mask
x_in = self.crop_to_bbox(x_in, mask)
if self.return_complementary:
inv_mask = ~mask
x_out = x * (inv_mask)
x_out = self.crop_to_bbox(x_out, inv_mask)
return x_in, x_out
else:
return x_in
class LandmarkMSE(StyleGanWrapper):
def __init__(self, generator, from_, detector, selected):
super(LandmarkMSE, self).__init__(generator)
self.detector = detector
self.detector.eval()
self.from_ = from_
self.selected = selected
self.not_selected = list(set(range(detector.num_landmarks)).difference(self.selected))
def reset(self):
pass
def forward(self, u):
x = super(LandmarkMSE, self).forward(u, from_=self.from_)
landmarks = self.detector(x)
return landmarks[:, self.selected], landmarks[:, self.not_selected]
class RelativeLandmarkMSE(LandmarkMSE):
def __init__(self, generator, from_, detector, selected):
super(RelativeLandmarkMSE, self).__init__(
generator, from_, detector, selected)
def reset(self):
pass
def forward(self, u):
landmarks0, landmarks1 = super(RelativeLandmarkMSE, self).forward(u)
# landmarks = torch.cat([landmarks0, landmarks1], dim=1)
# centers = landmarks.mean(dim=1, keepdim=True)
landmarks0 = landmarks0 - landmarks0.mean(dim=1, keepdim=True)
landmarks1 = landmarks1 - landmarks1.mean(dim=1, keepdim=True)
return landmarks0, landmarks1
class LandmarkInterpolationMSE(StyleGanWrapper):
def __init__(self, generator, from_, detector, segmenter, resize=None):
super(LandmarkInterpolationMSE, self).__init__(generator)
self.detector = detector
self.segmenter = segmenter
detector.eval()
not segmenter or segmenter.eval()
self.from_ = from_
self.resize = None if resize is None else transforms.Resize(resize)
def reset(self):
self.vertex_indices = None
self.interp_weights = None
pass
def forward(self, u):
x = super(LandmarkInterpolationMSE, self).forward(u, from_=self.from_)
landmarks = self.detector(x)
mask = None if not self.segmenter else self.segmenter(x)
if self.resize is not None:
scale = x.new_tensor(self.resize.size) / x.new_tensor(x.shape[-2:])
landmarks *= scale
x = self.resize(x)
mask = self.resize(mask)
pixels, vertex_indices, interp_weights = [], [], []
if self.vertex_indices is None or self.interp_weights is None:
for i in range(len(x)):
ret = interpolate_from_landmarks(x[i], landmarks[i], mask=mask[i, 0])
pixels.append(ret[0])
vertex_indices.append(ret[1])
interp_weights.append(ret[2])
self.vertex_indices = vertex_indices
self.interp_weights = interp_weights
else:
for i in range(len(x)):
ret = interpolate_from_landmarks(x[i], landmarks[i],
vertex_indices=self.vertex_indices[i],
weights = self.interp_weights[i])
pixels.append(ret[0])
min_len = min(len(_) for _ in pixels)
assert min_len > 10
for i in range(len(pixels)):
perm = torch.randperm(len(pixels[i]))
pixels[i] = pixels[i][perm[:min_len]]
pixels = torch.stack(pixels, dim=0).unsqueeze(-2)
# pixels = pixels.detach()
pixels = pixels / (u.new_tensor(x.shape[-2:]) - 1) * 2 - 1
# grid_sample assume grid to be "xy" order
pixels = pixels.flip(dims=(-1,))
interpolated = F.grid_sample(x, pixels, align_corners=True)
return interpolated
# H, W = x.shape[-2:]
# grid0, grid1 = torch.meshgrid(torch.arange(H), torch.arange(W))
# pixels = torch.stack([grid1, grid0], dim=-1).unsqueeze(0).expand(len(u), -1, -1, -1)
# pixels = pixels.to(u.device).float()
# pixels = pixels / (u.new_tensor([H, W]) - 1) * 2 - 1
# interpolated = F.grid_sample(x, pixels, align_corners=True)
class RegionColorMSE(StyleGanWrapper):
def __init__(self, generator, from_, segmenter, resize=None, return_residual=True):
super(RegionColorMSE, self).__init__(generator)
self.segmenter = segmenter
not segmenter or segmenter.eval()
self.from_ = from_
self.resize = None if resize is None else transforms.Resize(resize)
self.return_residual = return_residual
def forward(self, u):
x = super(RegionColorMSE, self).forward(u, from_=self.from_)
mask = None if not self.segmenter else self.segmenter(x)
if self.resize is not None:
x = self.resize(x)
mask = self.resize(mask)
mean_color = masked_mean(x, mask)[..., None, None]
mean_color_map = mean_color * mask
residual_map = x - mean_color_map
if self.return_residual:
return mean_color, residual_map
else:
return mean_color
class HighFrequencyMSE(StyleGanWrapper):
def __init__(self, generator, from_, segmenter, resize=None):
super(HighFrequencyMSE, self).__init__(generator)
self.segmenter = segmenter
not segmenter or segmenter.eval()
self.from_ = from_
self.resize = None if resize is None else transforms.Resize(resize)
self.low_freq_resize = transforms.Resize((24, 24))
def forward(self, u):
x = super(HighFrequencyMSE, self).forward(u, from_=self.from_)
mask = None if not self.segmenter else self.segmenter(x)
if self.resize is not None:
x = self.resize(x)
if mask is not None:
mask = self.resize(mask)
x_small = self.low_freq_resize(x)
x_low = transforms.functional.resize(x_small, x.shape[-2:])
x_low = x_low
x_high = x - x_low
if mask is not None:
x_low = x_low * mask
x_high = x_high * mask
# plt.figure()
# plt.imshow((x_high)[0].permute(1, 2, 0).cpu().detach())
# plt.show()
# plt.figure()
# plt.imshow(x_small[0].permute(1, 2, 0).cpu().detach())
# plt.show()
# plt.figure()
# plt.imshow(x_low[0].permute(1, 2, 0).cpu().detach())
# plt.show()
# import pdb; pdb.set_trace()
return x_high, x_low
def forward_interesting(self, u):
x = super(HighFrequencyMSE, self).forward(u, from_=self.from_)
mask = None if not self.segmenter else self.segmenter(x)
if self.resize is not None:
x = self.resize(x)
mask = self.resize(mask)
x_small = self.low_freq_resize(x)
x_low = transforms.functional.resize(x_small, x.shape[-2:])
# plt.figure()
# plt.imshow((x - x_low)[0].permute(1, 2, 0).cpu().detach())
# plt.show()
# plt.figure()
# plt.imshow(x_small[0].permute(1, 2, 0).cpu().detach())
# plt.show()
# plt.figure()
# plt.imshow(x_low[0].permute(1, 2, 0).cpu().detach())
# plt.show()
# import pdb; pdb.set_trace()
return x - x_low, x_small
class IDFeatureMSE(StyleGanWrapper):
def __init__(self, generator, from_, extractor):
super(IDFeatureMSE, self).__init__(generator)
self.extractor = extractor
self.extractor.eval()
self.from_ = from_
def forward(self, u):
x = super(IDFeatureMSE, self).forward(u, from_=self.from_)
feature = self.extractor(x)
return feature
| 9,823 | 34.338129 | 94 | py |
ice-ice | ice-ice/ice/jtj_analysis.py | import functools
import itertools
import numpy as np
from pathlib import Path
import pickle
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from training.networks import Generator
from torch_utils import misc
import legacy
from wrapper import StyleGanWrapper, FaceSegmenter, KeyPointDetector
from criterions import *
def find_jtj_direct(f, dataloader, bases=None):
JtJs = None
for u in (dataloader):
f.reset()
batch_size = len(u)
u_param = nn.Parameter(u, requires_grad=True)
ys = f(u_param)
if not isinstance(ys, (list, tuple)):
ys = [ys]
ys_fl = [_.flatten(1, -1) for _ in ys]
Js = [u.new_zeros(_.shape + (u.flatten(1, -1).shape[1],)) for _ in ys_fl]
for i, y_fl in enumerate(ys_fl):
for j in tqdm(range(y_fl.shape[1])):
u_param.grad is None or u_param.grad.zeros_()
y_grad = u.new_tensor(j).repeat(batch_size).long()
y_grad = F.one_hot(y_grad, num_classes=y_fl.shape[1]).float()
u_grad = torch.autograd.grad(y_fl, u_param,
grad_outputs=y_grad, retain_graph=True)
Js[i][:, j, :] = u_grad[0].flatten(1, -1).detach()
Js = [_.flatten(0, 1) for _ in Js]
if JtJs is None:
JtJs = [0 for _ in ys]
for i in range(len(JtJs)):
JtJs[i] += Js[i].T @ Js[i]
JtJs = [_ / len(dataloader.dataset) for _ in JtJs]
return JtJs
def find_jtj_approx(f, dataloader, bases=None):
JtJs = None
for u in (dataloader):
f.reset()
with torch.no_grad():
y0s = f(u)
if not isinstance(y0s, (list, tuple)):
y0s = [y0s]
u_fl = u.flatten(1, -1)
JtJs_cur = [u.new_zeros((u_fl.shape[1],) * 2) for _ in range(len(y0s))]
alpha = u_fl.norm(dim=1).mean().item() * 1e-4
eye = torch.eye(*(u_fl.shape[1],) * 2, device=u.device)
for iu in tqdm(range(u_fl.shape[1])):
n_param = nn.Parameter(eye[iu], requires_grad=True)
u1 = (u_fl + n_param * alpha).reshape(u.shape)
y1s = f(u1)
if not isinstance(y1s, (list, tuple)):
y1s = [y1s]
for iy in range(len(y1s)):
loss = (y1s[iy] - y0s[iy]).square().sum() / 2
f.zero_grad()
n_param.grad is None or n_param.grad.zero_()
loss.backward(retain_graph=True)
JtJs_cur[iy][iu, :] = n_param.grad.clone().detach()
if JtJs is None:
JtJs = [0 for _ in y0s]
for i in range(len(JtJs)):
JtJs[i] += JtJs_cur[i]
for i in range(len(JtJs)):
JtJs[i] /= len(dataloader.dataset) * alpha ** 2
JtJs[i] = (JtJs[i] + JtJs[i].T) / 2
return JtJs
def load_or_compute(f, compute, recompute=False, map_location=None):
if not Path(f).is_file() or recompute:
res = compute()
torch.save(res, f)
else:
res = torch.load(f, map_location=map_location)
return res
def projected_pca(JtJ, bases=None):
if bases is not None:
JtJ = bases.T @ JtJ @ bases
S, V = torch.symeig(JtJ, eigenvectors=True)
index = S.abs().argsort(descending=True)
S, V = S[index], V[:, index]
if bases is not None:
V = bases @ V
return S, V
def trim_stack(*V, dim=1):
min_len = min(_.shape[dim] for _ in V)
assert dim == 1
stacked = torch.stack(tuple(_[:, :min_len] for _ in V))
return stacked
# https://discuss.pytorch.org/t/nullspace-of-a-tensor/69980/4
def nullspace(A, rcond=None):
At = A.T
ut, st, vht = torch.Tensor.svd(At, some=False,compute_uv=True)
vht=vht.T
Mt, Nt = ut.shape[0], vht.shape[1]
if rcond is None:
rcondt = torch.finfo(st.dtype).eps * max(Mt, Nt)
tolt = torch.max(st) * rcondt
numt= torch.sum(st > tolt, dtype=int)
nullspace = vht[numt:,:].T.conj()
# nullspace.backward(torch.ones_like(nullspace),retain_graph=True)
return nullspace
def subspace_intersect(*As):
null_as = [nullspace(_) for _ in As]
combined = torch.cat(null_as, dim=1)
intersected = nullspace(combined)
return intersected
def compute_early_projected_pca(jtj_act, jtjs_sup, sup_ratio=1e-2, act_ratio=1e-2):
vs_sup = []
if not isinstance(sup_ratio, (list, tuple)):
sup_ratio = [sup_ratio] * len(jtjs_sup)
for jtj_sup, ratio in zip(jtjs_sup, sup_ratio):
s_sup, v_sup = projected_pca(jtj_sup)
mask = s_sup.abs() < s_sup.abs().max() * ratio
v_sup = v_sup[:, mask]
vs_sup.append(v_sup)
if not vs_sup:
vs_sup = None
else:
vs_sup = subspace_intersect(*vs_sup)
s_act, v_act = projected_pca(jtj_act, bases=vs_sup)
if act_ratio is not None:
mask = s_act.abs() > s_act.abs().max() * act_ratio
s_act = s_act[mask]
v_act = v_act[:, mask]
return s_act, v_act
def compute_late_projected_pca(jtj_act, jtjs_sup, sup_ratio=1e-2, act_ratio=1e-2):
vs_sup = []
if not isinstance(sup_ratio, (list, tuple)):
sup_ratio = [sup_ratio] * len(jtjs_sup)
for jtj_sup, ratio in zip(jtjs_sup, sup_ratio):
s_sup, v_sup = projected_pca(jtj_sup)
mask = s_sup.abs() < s_sup.abs().max() * ratio
v_sup = v_sup[:, mask]
vs_sup.append(v_sup)
if not vs_sup:
vs_sup = None
else:
vs_sup = subspace_intersect(*vs_sup)
s_act, v_act = projected_pca(jtj_act)
v_act = v_sup @ v_sup.T @ v_act
v_act = F.normalize(v_act, dim=1)
if act_ratio is not None:
mask = s_act.abs() > s_act.abs().max() * act_ratio
s_act = s_act[mask]
v_act = v_act[:, mask]
return s_act, v_act
def compute_projected_pca(jtj_act, early_jtjs_sup, late_jtjs_sup, early_sup_ratio, late_sup_ratio=[], act_ratio=1e-2):
early_vs_sup = []
if not isinstance(early_sup_ratio, (list, tuple)):
early_sup_ratio = [early_sup_ratio] * len(jtjs_sup)
for jtj_sup, ratio in zip(early_jtjs_sup, early_sup_ratio):
s_sup, v_sup = projected_pca(jtj_sup)
mask = s_sup.abs() < s_sup.abs().max() * ratio
v_sup = v_sup[:, mask]
early_vs_sup.append(v_sup)
if not early_vs_sup:
early_vs_sup = None
else:
early_vs_sup = subspace_intersect(*early_vs_sup)
s_act, v_act = projected_pca(jtj_act, bases=early_vs_sup)
late_vs_sup = []
if not isinstance(late_sup_ratio, (list, tuple)):
late_sup_ratio = [late_sup_ratio] * len(jtjs_sup)
for jtj_sup, ratio in zip(late_jtjs_sup, late_sup_ratio):
s_sup, v_sup = projected_pca(jtj_sup)
mask = s_sup.abs() < s_sup.abs().max() * ratio
v_sup = v_sup[:, mask]
late_vs_sup.append(v_sup)
if not late_vs_sup:
late_vs_sup = None
else:
late_vs_sup = subspace_intersect(*late_vs_sup)
v_act = late_vs_sup @ late_vs_sup.T @ v_act
if act_ratio is not None:
mask = s_act.abs() > s_act.abs().max() * act_ratio
s_act = s_act[mask]
v_act = v_act[:, mask]
return s_act, v_act
| 7,312 | 32.240909 | 118 | py |
ice-ice | ice-ice/ice/external/identity/iresnet.py | import torch
from torch import nn
__all__ = ['iresnet18', 'iresnet34', 'iresnet50', 'iresnet100', 'iresnet200']
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=1,
stride=stride,
bias=False)
class IBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None,
groups=1, base_width=64, dilation=1):
super(IBasicBlock, self).__init__()
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05,)
self.conv1 = conv3x3(inplanes, planes)
self.bn2 = nn.BatchNorm2d(planes, eps=1e-05,)
self.prelu = nn.PReLU(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn3 = nn.BatchNorm2d(planes, eps=1e-05,)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.bn1(x)
out = self.conv1(out)
out = self.bn2(out)
out = self.prelu(out)
out = self.conv2(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
class IResNet(nn.Module):
fc_scale = 7 * 7
def __init__(self,
block, layers, dropout=0, num_features=512, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None, fp16=False):
super(IResNet, self).__init__()
self.fp16 = fp16
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes, eps=1e-05)
self.prelu = nn.PReLU(self.inplanes)
self.layer1 = self._make_layer(block, 64, layers[0], stride=2)
self.layer2 = self._make_layer(block,
128,
layers[1],
stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block,
256,
layers[2],
stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block,
512,
layers[3],
stride=2,
dilate=replace_stride_with_dilation[2])
self.bn2 = nn.BatchNorm2d(512 * block.expansion, eps=1e-05,)
self.dropout = nn.Dropout(p=dropout, inplace=True)
self.fc = nn.Linear(512 * block.expansion * self.fc_scale, num_features)
self.features = nn.BatchNorm1d(num_features, eps=1e-05)
nn.init.constant_(self.features.weight, 1.0)
self.features.weight.requires_grad = False
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, 0, 0.1)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, IBasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion, eps=1e-05, ),
)
layers = []
layers.append(
block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation))
return nn.Sequential(*layers)
def forward(self, x):
with torch.cuda.amp.autocast(self.fp16):
x = self.conv1(x)
x = self.bn1(x)
x = self.prelu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.bn2(x)
x = torch.flatten(x, 1)
x = self.dropout(x)
x = self.fc(x.float() if self.fp16 else x)
x = self.features(x)
return x
def _iresnet(arch, block, layers, pretrained, progress, **kwargs):
model = IResNet(block, layers, **kwargs)
if pretrained:
raise ValueError()
return model
def iresnet18(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet18', IBasicBlock, [2, 2, 2, 2], pretrained,
progress, **kwargs)
def iresnet34(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet34', IBasicBlock, [3, 4, 6, 3], pretrained,
progress, **kwargs)
def iresnet50(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet50', IBasicBlock, [3, 4, 14, 3], pretrained,
progress, **kwargs)
def iresnet100(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet100', IBasicBlock, [3, 13, 30, 3], pretrained,
progress, **kwargs)
def iresnet200(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet200', IBasicBlock, [6, 26, 60, 6], pretrained,
progress, **kwargs)
if __name__ == "__main__":
model_weight_pth = "backbone100.pth"
model = iresnet100()
model.load_state_dict(torch.load(model_weight_pth))
model.eval()
input = torch.randn(4, 3, 112, 112)
fea = model(input)
print(fea.shape)
| 7,401 | 36.383838 | 97 | py |
ice-ice | ice-ice/ice/external/parsing/model.py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
# from resnet import Resnet18
# from modules.bn import InPlaceABNSync as BatchNorm2d
# ---------------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as modelzoo
# from modules.bn import InPlaceABNSync as BatchNorm2d
resnet18_url = 'https://download.pytorch.org/models/resnet18-5c106cde.pth'
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
def __init__(self, in_chan, out_chan, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_chan, out_chan, stride)
self.bn1 = nn.BatchNorm2d(out_chan)
self.conv2 = conv3x3(out_chan, out_chan)
self.bn2 = nn.BatchNorm2d(out_chan)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
if in_chan != out_chan or stride != 1:
self.downsample = nn.Sequential(
nn.Conv2d(in_chan, out_chan,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_chan),
)
def forward(self, x):
residual = self.conv1(x)
residual = F.relu(self.bn1(residual))
residual = self.conv2(residual)
residual = self.bn2(residual)
shortcut = x
if self.downsample is not None:
shortcut = self.downsample(x)
out = shortcut + residual
out = self.relu(out)
return out
def create_layer_basic(in_chan, out_chan, bnum, stride=1):
layers = [BasicBlock(in_chan, out_chan, stride=stride)]
for i in range(bnum-1):
layers.append(BasicBlock(out_chan, out_chan, stride=1))
return nn.Sequential(*layers)
class Resnet18(nn.Module):
def __init__(self):
super(Resnet18, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = create_layer_basic(64, 64, bnum=2, stride=1)
self.layer2 = create_layer_basic(64, 128, bnum=2, stride=2)
self.layer3 = create_layer_basic(128, 256, bnum=2, stride=2)
self.layer4 = create_layer_basic(256, 512, bnum=2, stride=2)
self.init_weight()
def forward(self, x):
x = self.conv1(x)
x = F.relu(self.bn1(x))
x = self.maxpool(x)
x = self.layer1(x)
feat8 = self.layer2(x) # 1/8
feat16 = self.layer3(feat8) # 1/16
feat32 = self.layer4(feat16) # 1/32
return feat8, feat16, feat32
def init_weight(self):
state_dict = modelzoo.load_url(resnet18_url)
self_state_dict = self.state_dict()
for k, v in state_dict.items():
if 'fc' in k: continue
self_state_dict.update({k: v})
self.load_state_dict(self_state_dict)
def get_params(self):
wd_params, nowd_params = [], []
for name, module in self.named_modules():
if isinstance(module, (nn.Linear, nn.Conv2d)):
wd_params.append(module.weight)
if not module.bias is None:
nowd_params.append(module.bias)
elif isinstance(module, nn.BatchNorm2d):
nowd_params += list(module.parameters())
return wd_params, nowd_params
# ---------------------------------------------------
class ConvBNReLU(nn.Module):
def __init__(self, in_chan, out_chan, ks=3, stride=1, padding=1, *args, **kwargs):
super(ConvBNReLU, self).__init__()
self.conv = nn.Conv2d(in_chan,
out_chan,
kernel_size = ks,
stride = stride,
padding = padding,
bias = False)
self.bn = nn.BatchNorm2d(out_chan)
self.init_weight()
def forward(self, x):
x = self.conv(x)
x = F.relu(self.bn(x))
return x
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if not ly.bias is None: nn.init.constant_(ly.bias, 0)
class BiSeNetOutput(nn.Module):
def __init__(self, in_chan, mid_chan, n_classes, *args, **kwargs):
super(BiSeNetOutput, self).__init__()
self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1)
self.conv_out = nn.Conv2d(mid_chan, n_classes, kernel_size=1, bias=False)
self.init_weight()
def forward(self, x):
x = self.conv(x)
x = self.conv_out(x)
return x
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if not ly.bias is None: nn.init.constant_(ly.bias, 0)
def get_params(self):
wd_params, nowd_params = [], []
for name, module in self.named_modules():
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
wd_params.append(module.weight)
if not module.bias is None:
nowd_params.append(module.bias)
elif isinstance(module, nn.BatchNorm2d):
nowd_params += list(module.parameters())
return wd_params, nowd_params
class AttentionRefinementModule(nn.Module):
def __init__(self, in_chan, out_chan, *args, **kwargs):
super(AttentionRefinementModule, self).__init__()
self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1)
self.conv_atten = nn.Conv2d(out_chan, out_chan, kernel_size= 1, bias=False)
self.bn_atten = nn.BatchNorm2d(out_chan)
self.sigmoid_atten = nn.Sigmoid()
self.init_weight()
def forward(self, x):
feat = self.conv(x)
atten = F.avg_pool2d(feat, feat.size()[2:])
atten = self.conv_atten(atten)
atten = self.bn_atten(atten)
atten = self.sigmoid_atten(atten)
out = torch.mul(feat, atten)
return out
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if not ly.bias is None: nn.init.constant_(ly.bias, 0)
class ContextPath(nn.Module):
def __init__(self, *args, **kwargs):
super(ContextPath, self).__init__()
self.resnet = Resnet18()
self.arm16 = AttentionRefinementModule(256, 128)
self.arm32 = AttentionRefinementModule(512, 128)
self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)
self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)
self.conv_avg = ConvBNReLU(512, 128, ks=1, stride=1, padding=0)
self.init_weight()
def forward(self, x):
H0, W0 = x.size()[2:]
feat8, feat16, feat32 = self.resnet(x)
H8, W8 = feat8.size()[2:]
H16, W16 = feat16.size()[2:]
H32, W32 = feat32.size()[2:]
avg = F.avg_pool2d(feat32, feat32.size()[2:])
avg = self.conv_avg(avg)
avg_up = F.interpolate(avg, (H32, W32), mode='nearest')
feat32_arm = self.arm32(feat32)
feat32_sum = feat32_arm + avg_up
feat32_up = F.interpolate(feat32_sum, (H16, W16), mode='nearest')
feat32_up = self.conv_head32(feat32_up)
feat16_arm = self.arm16(feat16)
feat16_sum = feat16_arm + feat32_up
feat16_up = F.interpolate(feat16_sum, (H8, W8), mode='nearest')
feat16_up = self.conv_head16(feat16_up)
return feat8, feat16_up, feat32_up # x8, x8, x16
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if not ly.bias is None: nn.init.constant_(ly.bias, 0)
def get_params(self):
wd_params, nowd_params = [], []
for name, module in self.named_modules():
if isinstance(module, (nn.Linear, nn.Conv2d)):
wd_params.append(module.weight)
if not module.bias is None:
nowd_params.append(module.bias)
elif isinstance(module, nn.BatchNorm2d):
nowd_params += list(module.parameters())
return wd_params, nowd_params
### This is not used, since I replace this with the resnet feature with the same size
class SpatialPath(nn.Module):
def __init__(self, *args, **kwargs):
super(SpatialPath, self).__init__()
self.conv1 = ConvBNReLU(3, 64, ks=7, stride=2, padding=3)
self.conv2 = ConvBNReLU(64, 64, ks=3, stride=2, padding=1)
self.conv3 = ConvBNReLU(64, 64, ks=3, stride=2, padding=1)
self.conv_out = ConvBNReLU(64, 128, ks=1, stride=1, padding=0)
self.init_weight()
def forward(self, x):
feat = self.conv1(x)
feat = self.conv2(feat)
feat = self.conv3(feat)
feat = self.conv_out(feat)
return feat
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if not ly.bias is None: nn.init.constant_(ly.bias, 0)
def get_params(self):
wd_params, nowd_params = [], []
for name, module in self.named_modules():
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
wd_params.append(module.weight)
if not module.bias is None:
nowd_params.append(module.bias)
elif isinstance(module, nn.BatchNorm2d):
nowd_params += list(module.parameters())
return wd_params, nowd_params
class FeatureFusionModule(nn.Module):
def __init__(self, in_chan, out_chan, *args, **kwargs):
super(FeatureFusionModule, self).__init__()
self.convblk = ConvBNReLU(in_chan, out_chan, ks=1, stride=1, padding=0)
self.conv1 = nn.Conv2d(out_chan,
out_chan//4,
kernel_size = 1,
stride = 1,
padding = 0,
bias = False)
self.conv2 = nn.Conv2d(out_chan//4,
out_chan,
kernel_size = 1,
stride = 1,
padding = 0,
bias = False)
self.relu = nn.ReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
self.init_weight()
def forward(self, fsp, fcp):
fcat = torch.cat([fsp, fcp], dim=1)
feat = self.convblk(fcat)
atten = F.avg_pool2d(feat, feat.size()[2:])
atten = self.conv1(atten)
atten = self.relu(atten)
atten = self.conv2(atten)
atten = self.sigmoid(atten)
feat_atten = torch.mul(feat, atten)
feat_out = feat_atten + feat
return feat_out
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if not ly.bias is None: nn.init.constant_(ly.bias, 0)
def get_params(self):
wd_params, nowd_params = [], []
for name, module in self.named_modules():
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
wd_params.append(module.weight)
if not module.bias is None:
nowd_params.append(module.bias)
elif isinstance(module, nn.BatchNorm2d):
nowd_params += list(module.parameters())
return wd_params, nowd_params
class BiSeNet(nn.Module):
def __init__(self, n_classes, *args, **kwargs):
super(BiSeNet, self).__init__()
self.cp = ContextPath()
## here self.sp is deleted
self.ffm = FeatureFusionModule(256, 256)
self.conv_out = BiSeNetOutput(256, 256, n_classes)
self.conv_out16 = BiSeNetOutput(128, 64, n_classes)
self.conv_out32 = BiSeNetOutput(128, 64, n_classes)
self.init_weight()
def forward(self, x):
H, W = x.size()[2:]
feat_res8, feat_cp8, feat_cp16 = self.cp(x) # here return res3b1 feature
feat_sp = feat_res8 # use res3b1 feature to replace spatial path feature
feat_fuse = self.ffm(feat_sp, feat_cp8)
feat_out = self.conv_out(feat_fuse)
feat_out16 = self.conv_out16(feat_cp8)
feat_out32 = self.conv_out32(feat_cp16)
feat_out = F.interpolate(feat_out, (H, W), mode='bilinear', align_corners=True)
feat_out16 = F.interpolate(feat_out16, (H, W), mode='bilinear', align_corners=True)
feat_out32 = F.interpolate(feat_out32, (H, W), mode='bilinear', align_corners=True)
return feat_out, feat_out16, feat_out32
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if not ly.bias is None: nn.init.constant_(ly.bias, 0)
def get_params(self):
wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params = [], [], [], []
for name, child in self.named_children():
child_wd_params, child_nowd_params = child.get_params()
if isinstance(child, FeatureFusionModule) or isinstance(child, BiSeNetOutput):
lr_mul_wd_params += child_wd_params
lr_mul_nowd_params += child_nowd_params
else:
wd_params += child_wd_params
nowd_params += child_nowd_params
return wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params
if __name__ == "__main__":
net = BiSeNet(19)
net.cuda()
net.eval()
in_ten = torch.randn(16, 3, 640, 480).cuda()
out, out16, out32 = net(in_ten)
print(out.shape)
net.get_params()
| 14,108 | 35.742188 | 91 | py |
ice-ice | ice-ice/ice/external/attribution/resnet.py | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet50']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class fc_block(nn.Module):
def __init__(self, inplanes, planes, drop_rate=0.15):
super(fc_block, self).__init__()
self.fc = nn.Linear(inplanes, planes)
self.bn = nn.BatchNorm1d(planes)
if drop_rate > 0:
self.dropout = nn.Dropout(drop_rate)
self.relu = nn.ReLU(inplace=True)
self.drop_rate = drop_rate
def forward(self, x):
x = self.fc(x)
x = self.bn(x)
if self.drop_rate > 0:
x = self.dropout(x)
x = self.relu(x)
return x
class ResNet(nn.Module):
def __init__(self, block, layers, num_attributes=40, zero_init_residual=False):
super(ResNet, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.stem = fc_block(512 * block.expansion, 512)
for i in range(num_attributes):
setattr(self, 'classifier' + str(i).zfill(2), nn.Sequential(fc_block(512, 256), nn.Linear(256, 2)))
self.num_attributes = num_attributes
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.stem(x)
y = []
for i in range(self.num_attributes):
classifier = getattr(self, 'classifier' + str(i).zfill(2))
y.append(classifier(x))
return y
def resnet50(pretrained=True, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
def init_pretrained_weights(model, model_url):
"""
Initialize model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
pretrain_dict = model_zoo.load_url(model_url)
model_dict = model.state_dict()
pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
print("Initialized model with pretrained weights from {}".format(model_url))
| 7,115 | 31.792627 | 116 | py |
MrMustard-develop | MrMustard-develop/mrmustard/__init__.py | # Copyright 2022 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is the top-most `__init__.py` file of MrMustard package."""
import numpy as np
import rich.table
from rich import print
from ._version import __version__
# pylint: disable=too-many-instance-attributes
class Settings:
"""Settings class."""
def __new__(cls): # singleton
if not hasattr(cls, "instance"):
cls.instance = super(Settings, cls).__new__(cls)
return cls.instance
def __init__(self):
self._backend = "tensorflow"
self.HBAR = 2.0
self.CHOI_R = 0.881373587019543 # np.arcsinh(1.0)
self.DEBUG = False
self.AUTOCUTOFF_PROBABILITY = 0.999 # capture at least 99.9% of the probability
self.AUTOCUTOFF_MAX_CUTOFF = 100
self.AUTOCUTOFF_MIN_CUTOFF = 1
self.CIRCUIT_DECIMALS = 3
# use cutoff=5 for each mode when determining if two transformations in fock repr are equal
self.EQ_TRANSFORMATION_CUTOFF = 3 # 3 is enough to include a full step of the rec relations
self.EQ_TRANSFORMATION_RTOL_FOCK = 1e-3
self.EQ_TRANSFORMATION_RTOL_GAUSS = 1e-6
# for the detectors
self.PNR_INTERNAL_CUTOFF = 50
self.HOMODYNE_SQUEEZING = 10.0
# misc
self.PROGRESSBAR = True
self._seed = np.random.randint(0, 2**31 - 1)
self.rng = np.random.default_rng(self._seed)
self.DEFAULT_BS_METHOD = "vanilla" # can be 'vanilla' or 'schwinger'
@property
def SEED(self):
"""Returns the seed value if set, otherwise returns a random seed."""
if self._seed is None:
self._seed = np.random.randint(0, 2**31 - 1)
self.rng = np.random.default_rng(self._seed)
return self._seed
@SEED.setter
def SEED(self, value):
"""Sets the seed value."""
self._seed = value
self.rng = np.random.default_rng(self._seed)
@property
def BACKEND(self):
"""The backend which is used.
Can be either ``'tensorflow'`` or ``'torch'``.
"""
return self._backend
@BACKEND.setter
def BACKEND(self, backend_name: str):
if backend_name not in ["tensorflow", "torch"]: # pragma: no cover
raise ValueError("Backend must be either 'tensorflow' or 'torch'")
self._backend = backend_name
# use rich.table to print the settings
def __repr__(self):
"""Returns a string representation of the settings."""
table = rich.table.Table(title="MrMustard Settings")
table.add_column("Setting")
table.add_column("Value")
table.add_row("BACKEND", self.BACKEND)
table.add_row("SEED", str(self.SEED))
for key, value in self.__dict__.items():
if key == key.upper():
table.add_row(key, str(value))
print(table)
return ""
settings = Settings()
"""Settings object."""
def version():
r"""Version number of Mr Mustard.
Returns:
str: package version number
"""
return __version__
def about():
"""Mr Mustard information.
Prints the installed version numbers for Mr Mustard and its dependencies,
and some system info. Please include this information in bug reports.
**Example:**
.. code-block:: pycon
>>> mm.about()
Mr Mustard: a differentiable bridge between phase space and Fock space.
Copyright 2021 Xanadu Quantum Technologies Inc.
Python version: 3.6.10
Platform info: Linux-5.8.18-1-MANJARO-x86_64-with-arch-Manjaro-Linux
Installation path: /home/mrmustard/
Mr Mustard version: 0.1.0
Numpy version: 1.21.4
Numba version: 0.48.0
Scipy version: 1.7.3
The Walrus version: 0.17.0
TensorFlow version: 2.7.0
Torch version: 1.10.0+cu102
"""
# pylint: disable=import-outside-toplevel
import os
import platform
import sys
import numba
import numpy
import scipy
import tensorflow
import thewalrus
# a QuTiP-style infobox
print("\nMr Mustard: a differentiable bridge between phase space and Fock space.")
print("Copyright 2021 Xanadu Quantum Technologies Inc.\n")
print("Python version: {}.{}.{}".format(*sys.version_info[0:3]))
print("Platform info: {}".format(platform.platform()))
print("Installation path: {}".format(os.path.dirname(__file__)))
print("Mr Mustard version: {}".format(__version__))
print("Numpy version: {}".format(numpy.__version__))
print("Numba version: {}".format(numba.__version__))
print("Scipy version: {}".format(scipy.__version__))
print("The Walrus version: {}".format(thewalrus.__version__))
print("TensorFlow version: {}".format(tensorflow.__version__))
try: # pragma: no cover
import torch
torch_version = torch.__version__
print("Torch version: {}".format(torch_version))
except ImportError:
torch_version = None
| 5,726 | 33.089286 | 100 | py |
MrMustard-develop | MrMustard-develop/mrmustard/math/torch.py | # Copyright 2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the Pytorch implementation of the :class:`Math` interface."""
from typing import Callable, List, Optional, Sequence, Tuple, Union, Dict
import numpy as np
import torch
from mrmustard.math.autocast import Autocast
from mrmustard.typing import Tensor, Trainable
from .math_interface import MathInterface
# pylint: disable=too-many-public-methods,no-self-use
class TorchMath(MathInterface):
r"""Torch implemantion of the :class:`Math` interface."""
float64 = torch.float64
float32 = torch.float32
complex64 = torch.complex64
complex128 = torch.complex128
def __getattr__(self, name):
return getattr(torch, name)
# ~~~~~~~~~
# Basic ops
# ~~~~~~~~~
def atleast_1d(self, array: torch.Tensor, dtype=None) -> torch.Tensor:
return self.cast(torch.reshape(self.astensor(array), [-1]), dtype)
def astensor(self, array: Union[np.ndarray, torch.Tensor], dtype=None) -> torch.Tensor:
return self.cast(torch.tensor(array), dtype)
def conj(self, array: torch.Tensor) -> torch.Tensor:
return torch.conj(array)
def real(self, array: torch.Tensor) -> torch.Tensor:
return torch.real(array)
def imag(self, array: torch.Tensor) -> torch.Tensor:
return torch.imag(array)
def cos(self, array: torch.Tensor) -> torch.Tensor:
return torch.cos(array)
def cosh(self, array: torch.Tensor) -> torch.Tensor:
return torch.cosh(array)
def sinh(self, array: torch.Tensor) -> torch.Tensor:
return torch.sinh(array)
def sin(self, array: torch.Tensor) -> torch.Tensor:
return torch.sin(array)
def exp(self, array: torch.Tensor) -> torch.Tensor:
return torch.exp(array)
def sqrt(self, x: torch.Tensor, dtype=None) -> torch.Tensor:
return self.cast(torch.sqrt(x), dtype)
def lgamma(self, x: torch.Tensor) -> torch.Tensor:
return torch.lgamma(x)
def log(self, x: torch.Tensor) -> torch.Tensor:
return torch.log(x)
def cast(self, x: torch.Tensor, dtype=None) -> torch.Tensor:
if dtype is None:
return x
return x.to(dtype)
@Autocast()
def maximum(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return torch.maximum(a, b)
@Autocast()
def minimum(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return torch.minimum(a, b)
def abs(self, array: torch.Tensor) -> torch.Tensor:
return torch.abs(array)
def expm(self, matrix: torch.Tensor) -> torch.Tensor:
return torch.matrix_exp(matrix)
def norm(self, array: torch.Tensor) -> torch.Tensor:
"""Note that the norm preserves the type of array."""
return torch.norm(array)
@Autocast()
def matmul(
self,
a: torch.Tensor,
b: torch.Tensor,
transpose_a=False,
transpose_b=False,
adjoint_a=False,
adjoint_b=False,
) -> torch.Tensor:
return torch.matmul(a, b)
@Autocast()
def matvec(
self, a: torch.Tensor, b: torch.Tensor, transpose_a=False, adjoint_a=False
) -> torch.Tensor:
return torch.mv(a, b)
@Autocast()
def tensordot(self, a: torch.Tensor, b: torch.Tensor, axes: List[int]) -> torch.Tensor:
return torch.tensordot(a, b, axes)
def einsum(self, string: str, *tensors) -> torch.Tensor:
return torch.einsum(string, *tensors)
def inv(self, a: torch.Tensor) -> torch.Tensor:
return torch.inverse(a)
def pinv(self, array: torch.Tensor) -> torch.Tensor:
return torch.pinverse(array)
def det(self, a: torch.Tensor) -> torch.Tensor:
return torch.det(a)
def tile(self, array: torch.Tensor, repeats: Sequence[int]) -> torch.Tensor:
return torch.tile(array, repeats)
def diag(self, array: torch.Tensor, k: int = 0) -> torch.Tensor:
return torch.diag(array, k=k)
def diag_part(self, array: torch.Tensor) -> torch.Tensor:
return torch.diag_embed(array)
def pad(
self,
array: torch.Tensor,
paddings: Sequence[Tuple[int, int]],
mode="constant",
constant_values=0,
) -> torch.Tensor:
return torch.nn.functional.pad(array, paddings, mode=mode, value=constant_values)
@Autocast()
def convolution(
self,
array: torch.Tensor,
filters: torch.Tensor,
strides: Optional[List[int]] = None,
padding="VALID",
data_format="NWC",
dilations: Optional[List[int]] = None,
) -> torch.Tensor:
r"""Wrapper for ``torch.nn.Conv1d`` and ``torch.nn.Conv2d``.
Args:
1D convolution: Tensor of shape [batch_size, input_channels, signal_length].
2D convolution: [batch_size, input_channels, input_height, input_width]
Returns:
"""
array.shape[0]
input_channels = array.shape[1]
output_channels = ... # TODO: unsure of how to get output channels
if array.dim() == 3: # 1D case
array.shape[2]
m = torch.nn.Conv1d(
input_channels,
output_channels,
filters,
stride=strides,
padding=padding,
dtype=data_format,
dilation=dilations,
)
return m(array)
if array.dim() == 4: # 2D case
array.shape[2]
array.shape[3]
m = torch.nn.Conv2d(
input_channels,
output_channels,
filters,
stride=strides,
padding=padding,
dtype=data_format,
dilation=dilations,
)
return m(array)
raise NotImplementedError
def transpose(self, a: torch.Tensor, perm: List[int] = None) -> torch.Tensor:
if a is None:
return None # TODO: remove and address None inputs where transpose is used
return torch.transpose(a, perm[0], perm[1])
def reshape(self, array: torch.Tensor, shape: Sequence[int]) -> torch.Tensor:
return torch.reshape(array, shape)
def sum(self, array: torch.Tensor, axes: Sequence[int] = None):
return torch.sum(array, axes)
def arange(
self, start: int, limit: int = None, delta: int = 1, dtype=torch.float64
) -> torch.Tensor:
return torch.arange(start, limit, delta, dtype=dtype)
@Autocast()
def outer(self, array1: torch.Tensor, array2: torch.Tensor) -> torch.Tensor:
return torch.tensordot(array1, array2, [[], []])
def eye(self, size: int, dtype=torch.float64) -> torch.Tensor:
return torch.eye(size, dtype=dtype)
def zeros(self, shape: Sequence[int], dtype=torch.float64) -> torch.Tensor:
return torch.zeros(shape, dtype=dtype)
def zeros_like(self, array: torch.Tensor) -> torch.Tensor:
return torch.zeros_like(array)
def ones(self, shape: Sequence[int], dtype=torch.float64) -> torch.Tensor:
return torch.ones(shape, dtype=dtype)
def ones_like(self, array: torch.Tensor) -> torch.Tensor:
return torch.ones_like(array)
def gather(self, array: torch.Tensor, indices: torch.Tensor, axis: int = None) -> torch.Tensor:
# TODO: gather works differently in Pytorch vs Tensorflow.
return torch.gather(array, axis, indices)
def trace(self, array: torch.Tensor, dtype=None) -> torch.Tensor:
return self.cast(torch.trace(array), dtype)
def concat(self, values: Sequence[torch.Tensor], axis: int) -> torch.Tensor:
return torch.cat(values, axis)
def update_tensor(
self, tensor: torch.Tensor, indices: torch.Tensor, values: torch.Tensor, dims: int = 0
):
# TODO: dims need to be an argument, or should be interpreted from the other data
return tensor.scatter_(dims, indices, values)
def update_add_tensor(
self, tensor: torch.Tensor, indices: torch.Tensor, values: torch.Tensor, dims: int = 0
):
# TODO: dims need to be an argument, or should be interpreted from the other data
return tensor.scatter_add_(dims, indices, values)
def constraint_func(
self, bounds: Tuple[Optional[float], Optional[float]]
) -> Optional[Callable]:
bounds = (
-np.inf if bounds[0] is None else bounds[0],
np.inf if bounds[1] is None else bounds[1],
)
if bounds != (-np.inf, np.inf):
def constraint(x):
return torch.clamp(x, min=bounds[0], max=bounds[1])
else:
constraint = None
return constraint
def new_variable(
self, value, bounds: Tuple[Optional[float], Optional[float]], name: str, dtype=torch.float64
):
return torch.tensor(value, dtype=dtype, requires_grad=True)
def new_constant(self, value, name: str, dtype=torch.float64):
return torch.tensor(value, dtype=dtype)
def asnumpy(self, tensor: torch.Tensor) -> Tensor:
return tensor.numpy()
def hash_tensor(self, tensor: torch.Tensor) -> str:
return hash(tensor)
def hermite_renormalized(
self, A: torch.Tensor, B: torch.Tensor, C: torch.Tensor, shape: Tuple[int]
) -> torch.Tensor: # TODO this is not ready
r"""Renormalized multidimensional Hermite polynomial.
This is given by the "exponential" Taylor series of :math:`exp(Ax^2 + Bx + C)` at zero,
where the series has :math:`sqrt(n!)` at the denominator rather than `n!`.
Args:
A: The A matrix.
B: The B vector.
C: The C scalar.
shape: The shape of the final tensor.
Returns:
The renormalized Hermite polynomial of given shape.
"""
raise NotImplementedError
def DefaultEuclideanOptimizer(self, params) -> torch.optim.Optimizer:
r"""Default optimizer for the Euclidean parameters."""
self.optimizer = torch.optim.Adam(params, lr=0.001)
return self.optimizer
def value_and_gradients(
self, cost_fn: Callable, parameters: Dict[str, List[Trainable]]
) -> Tuple[torch.Tensor, Dict[str, List[torch.Tensor]]]:
r"""Computes the loss and gradients of the given cost function.
Args:
cost_fn (Callable): The cost function. Takes in two arguments:
- Output: The output tensor of the model.
parameters (Dict): The parameters to optimize in three kinds:
symplectic, orthogonal and euclidean.
optimizer: The optimizer to be used by the math backend.
Returns:
The loss and the gradients.
"""
self.optimizer.zero_grad()
loss = (
cost_fn()
) # TODO: I think this should be cost_fn(params), but if it works I think it is fine.
loss.backward()
self.optimizer.step()
grads = [p.grad for p in parameters]
return loss, grads
def eigvals(self, tensor: torch.Tensor) -> Tensor:
"""Returns the eigenvalues of a matrix."""
return torch.linalg.eigvals(tensor)
def eigvalsh(self, tensor: torch.Tensor) -> Tensor:
"""Returns the eigenvalues of a Real Symmetric or Hermitian matrix."""
return torch.linalg.eigvalsh(tensor)
def svd(self, tensor: torch.Tensor) -> Tensor:
"""Returns the Singular Value Decomposition of a matrix."""
return torch.linalg.svd(tensor)
def xlogy(self, x: torch.Tensor, y: torch.Tensor) -> Tensor:
"""Returns 0 if ``x == 0``, and ``x * log(y)`` otherwise, elementwise."""
return torch.xlogy(x, y)
def sqrtm(self, tensor: torch.Tensor) -> Tensor:
raise NotImplementedError
def boolean_mask(self, tensor: torch.Tensor, mask: torch.Tensor) -> Tensor:
"""Returns a new 1-D tensor which indexes the `input` tensor according to the boolean mask `mask`."""
return torch.masked_select(tensor, mask)
| 12,620 | 32.745989 | 109 | py |
MrMustard-develop | MrMustard-develop/mrmustard/math/__init__.py | # Copyright 2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
The ``math`` module contains low-level functions for performing mathematical operations.
It is recommended that users access the backends using the an instance of the :class:`Math` class rather than the backends themselves.
The Math class is a wrapper that passes the calls to the currently active backend, which is determined by
the ``BACKEND`` parameter in ``mrmustard.settings`` (the default is ``tensorflow``).
The advantage of using the Math class is that the same code can run on different backends, allowing for a
greater degree of flexibility and code reuse.
.. code-block::
from mrmustard.math import Math
math = Math()
math.cos(x) # tensorflow backend
from mrmustard import settings
settings.BACKEND = 'torch'
math.cos(x) # torch backend
"""
import importlib
from mrmustard import settings
if importlib.util.find_spec("tensorflow"):
from mrmustard.math.tensorflow import TFMath
if importlib.util.find_spec("torch"):
from mrmustard.math.torch import TorchMath
class Math:
r"""
This class is a switcher for performing math operations on the currently active backend.
"""
def __getattribute__(self, name):
if settings.BACKEND == "tensorflow":
return object.__getattribute__(TFMath(), name)
elif settings.BACKEND == "torch":
return object.__getattribute__(TorchMath(), name)
else:
raise ValueError(
f"No `{settings.BACKEND}` backend found. Ensure your backend is either ``'tensorflow'`` or ``'torch'``"
)
| 2,161 | 33.870968 | 134 | py |
MrMustard-develop | MrMustard-develop/mrmustard/math/tensorflow.py | # Copyright 2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the Tensorflow implementation of the :class:`Math` interface."""
from typing import Callable, List, Optional, Sequence, Tuple, Union
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from mrmustard import settings
from mrmustard.math.autocast import Autocast
from mrmustard.math.lattice import strategies
from mrmustard.math.numba.compactFock_inputValidation import (
grad_hermite_multidimensional_1leftoverMode,
grad_hermite_multidimensional_diagonal,
hermite_multidimensional_1leftoverMode,
hermite_multidimensional_diagonal,
)
from mrmustard.typing import Tensor, Trainable
from .math_interface import MathInterface
# pylint: disable=too-many-public-methods,no-self-argument,arguments-differ
class TFMath(MathInterface):
r"""Tensorflow implemantion of the :class:`Math` interface."""
float64 = tf.float64
float32 = tf.float32
complex64 = tf.complex64
complex128 = tf.complex128
def __getattr__(self, name):
return getattr(tf, name)
# ~~~~~~~~~
# Basic ops
# ~~~~~~~~~
def abs(self, array: tf.Tensor) -> tf.Tensor:
return tf.abs(array)
def any(self, array: tf.Tensor) -> tf.Tensor:
return tf.math.reduce_any(array)
def arange(self, start: int, limit: int = None, delta: int = 1, dtype=tf.float64) -> tf.Tensor:
return tf.range(start, limit, delta, dtype=dtype)
def asnumpy(self, tensor: tf.Tensor) -> Tensor:
return np.array(tensor)
def assign(self, tensor: tf.Tensor, value: tf.Tensor) -> tf.Tensor:
tensor.assign(value)
return tensor
def astensor(self, array: Union[np.ndarray, tf.Tensor], dtype=None) -> tf.Tensor:
return tf.convert_to_tensor(array, dtype=dtype)
def atleast_1d(self, array: tf.Tensor, dtype=None) -> tf.Tensor:
return self.cast(tf.reshape(array, [-1]), dtype)
def cast(self, array: tf.Tensor, dtype=None) -> tf.Tensor:
if dtype is None:
return array
return tf.cast(array, dtype)
def clip(self, array, a_min, a_max) -> tf.Tensor:
return tf.clip_by_value(array, a_min, a_max)
def concat(self, values: Sequence[tf.Tensor], axis: int) -> tf.Tensor:
return tf.concat(values, axis)
def conj(self, array: tf.Tensor) -> tf.Tensor:
return tf.math.conj(array)
def constraint_func(
self, bounds: Tuple[Optional[float], Optional[float]]
) -> Optional[Callable]:
bounds = (
-np.inf if bounds[0] is None else bounds[0],
np.inf if bounds[1] is None else bounds[1],
)
if bounds != (-np.inf, np.inf):
def constraint(x):
return tf.clip_by_value(x, bounds[0], bounds[1])
else:
constraint = None
return constraint
# pylint: disable=arguments-differ
@Autocast()
def convolution(
self,
array: tf.Tensor,
filters: tf.Tensor,
strides: Optional[List[int]] = None,
padding="VALID",
data_format="NWC",
dilations: Optional[List[int]] = None,
) -> tf.Tensor:
return tf.nn.convolution(array, filters, strides, padding, data_format, dilations)
def cos(self, array: tf.Tensor) -> tf.Tensor:
return tf.math.cos(array)
def cosh(self, array: tf.Tensor) -> tf.Tensor:
return tf.math.cosh(array)
def atan2(self, y: tf.Tensor, x: tf.Tensor) -> tf.Tensor:
return tf.math.atan2(y, x)
def make_complex(self, real: tf.Tensor, imag: tf.Tensor) -> tf.Tensor:
return tf.complex(real, imag)
def det(self, matrix: tf.Tensor) -> tf.Tensor:
return tf.linalg.det(matrix)
def diag(self, array: tf.Tensor, k: int = 0) -> tf.Tensor:
return tf.linalg.diag(array, k=k)
def diag_part(self, array: tf.Tensor) -> tf.Tensor:
return tf.linalg.diag_part(array)
def einsum(self, string: str, *tensors) -> tf.Tensor:
if type(string) is str:
return tf.einsum(string, *tensors)
return None # provide same functionality as numpy.einsum or upgrade to opt_einsum
def exp(self, array: tf.Tensor) -> tf.Tensor:
return tf.math.exp(array)
def expand_dims(self, array: tf.Tensor, axis: int) -> tf.Tensor:
return tf.expand_dims(array, axis)
def expm(self, matrix: tf.Tensor) -> tf.Tensor:
return tf.linalg.expm(matrix)
def eye(self, size: int, dtype=tf.float64) -> tf.Tensor:
return tf.eye(size, dtype=dtype)
def eye_like(self, array: tf.Tensor) -> Tensor:
return tf.eye(array.shape[-1], dtype=array.dtype)
def from_backend(self, value) -> bool:
return isinstance(value, (tf.Tensor, tf.Variable))
def gather(self, array: tf.Tensor, indices: tf.Tensor, axis: int = None) -> tf.Tensor:
return tf.gather(array, indices, axis=axis)
def hash_tensor(self, tensor: tf.Tensor) -> int:
try:
REF = tensor.ref()
except AttributeError as e:
raise TypeError("Cannot hash tensor") from e
return hash(REF)
def imag(self, array: tf.Tensor) -> tf.Tensor:
return tf.math.imag(array)
def inv(self, tensor: tf.Tensor) -> tf.Tensor:
return tf.linalg.inv(tensor)
def is_trainable(self, tensor: tf.Tensor) -> bool:
return isinstance(tensor, tf.Variable)
def lgamma(self, x: tf.Tensor) -> tf.Tensor:
return tf.math.lgamma(x)
def log(self, x: tf.Tensor) -> tf.Tensor:
return tf.math.log(x)
@Autocast()
def matmul(
self,
a: tf.Tensor,
b: tf.Tensor,
transpose_a=False,
transpose_b=False,
adjoint_a=False,
adjoint_b=False,
) -> tf.Tensor:
return tf.linalg.matmul(a, b, transpose_a, transpose_b, adjoint_a, adjoint_b)
@Autocast()
def matvec(self, a: tf.Tensor, b: tf.Tensor, transpose_a=False, adjoint_a=False) -> tf.Tensor:
return tf.linalg.matvec(a, b, transpose_a, adjoint_a)
@Autocast()
def maximum(self, a: tf.Tensor, b: tf.Tensor) -> tf.Tensor:
return tf.maximum(a, b)
@Autocast()
def minimum(self, a: tf.Tensor, b: tf.Tensor) -> tf.Tensor:
return tf.minimum(a, b)
def new_variable(
self,
value,
bounds: Union[Tuple[Optional[float], Optional[float]], None],
name: str,
dtype=tf.float64,
):
bounds = bounds or (None, None)
value = self.convert_to_tensor(value, dtype)
return tf.Variable(value, name=name, dtype=dtype, constraint=self.constraint_func(bounds))
def new_constant(self, value, name: str, dtype=tf.float64):
value = self.convert_to_tensor(value, dtype)
return tf.constant(value, dtype=dtype, name=name)
def norm(self, array: tf.Tensor) -> tf.Tensor:
"""Note that the norm preserves the type of array."""
return tf.linalg.norm(array)
def ones(self, shape: Sequence[int], dtype=tf.float64) -> tf.Tensor:
return tf.ones(shape, dtype=dtype)
def ones_like(self, array: tf.Tensor) -> tf.Tensor:
return tf.ones_like(array)
@Autocast()
def outer(self, array1: tf.Tensor, array2: tf.Tensor) -> tf.Tensor:
return tf.tensordot(array1, array2, [[], []])
def pad(
self,
array: tf.Tensor,
paddings: Sequence[Tuple[int, int]],
mode="CONSTANT",
constant_values=0,
) -> tf.Tensor:
return tf.pad(array, paddings, mode, constant_values)
@staticmethod
def pinv(matrix: tf.Tensor) -> tf.Tensor:
return tf.linalg.pinv(matrix)
@Autocast()
def pow(self, x: tf.Tensor, y: float) -> tf.Tensor:
return tf.math.pow(x, y)
def real(self, array: tf.Tensor) -> tf.Tensor:
return tf.math.real(array)
def reshape(self, array: tf.Tensor, shape: Sequence[int]) -> tf.Tensor:
return tf.reshape(array, shape)
def sin(self, array: tf.Tensor) -> tf.Tensor:
return tf.math.sin(array)
def sinh(self, array: tf.Tensor) -> tf.Tensor:
return tf.math.sinh(array)
def solve(self, matrix: tf.Tensor, rhs: tf.Tensor) -> tf.Tensor:
if len(rhs.shape) == len(matrix.shape) - 1:
rhs = tf.expand_dims(rhs, -1)
return tf.linalg.solve(matrix, rhs)[..., 0]
return tf.linalg.solve(matrix, rhs)
def sqrt(self, x: tf.Tensor, dtype=None) -> tf.Tensor:
return tf.sqrt(self.cast(x, dtype))
def sum(self, array: tf.Tensor, axes: Sequence[int] = None):
return tf.reduce_sum(array, axes)
@Autocast()
def tensordot(self, a: tf.Tensor, b: tf.Tensor, axes: List[int]) -> tf.Tensor:
return tf.tensordot(a, b, axes)
def tile(self, array: tf.Tensor, repeats: Sequence[int]) -> tf.Tensor:
return tf.tile(array, repeats)
def trace(self, array: tf.Tensor, dtype=None) -> tf.Tensor:
return self.cast(tf.linalg.trace(array), dtype)
def transpose(self, a: tf.Tensor, perm: Sequence[int] = None) -> tf.Tensor:
if a is None:
return None # TODO: remove and address None inputs where tranpose is used
return tf.transpose(a, perm)
@Autocast()
def update_tensor(self, tensor: tf.Tensor, indices: tf.Tensor, values: tf.Tensor):
return tf.tensor_scatter_nd_update(tensor, indices, values)
@Autocast()
def update_add_tensor(self, tensor: tf.Tensor, indices: tf.Tensor, values: tf.Tensor):
return tf.tensor_scatter_nd_add(tensor, indices, values)
def unique_tensors(self, lst: List[Tensor]) -> List[Tensor]:
hash_dict = {}
for tensor in lst:
try:
if (hash := self.hash_tensor(tensor)) not in hash_dict:
hash_dict[hash] = tensor
except TypeError:
continue
return list(hash_dict.values())
def zeros(self, shape: Sequence[int], dtype=tf.float64) -> tf.Tensor:
return tf.zeros(shape, dtype=dtype)
def zeros_like(self, array: tf.Tensor) -> tf.Tensor:
return tf.zeros_like(array)
def map_fn(self, func, elements):
return tf.map_fn(func, elements)
def squeeze(self, tensor, axis=None):
return tf.squeeze(tensor, axis=axis or [])
def cholesky(self, input: Tensor):
return tf.linalg.cholesky(input)
def Categorical(self, probs: Tensor, name: str):
return tfp.distributions.Categorical(probs=probs, name=name)
def MultivariateNormalTriL(self, loc: Tensor, scale_tril: Tensor):
return tfp.distributions.MultivariateNormalTriL(loc=loc, scale_tril=scale_tril)
# ~~~~~~~~~~~~~~~~~
# Special functions
# ~~~~~~~~~~~~~~~~~
# TODO: is a wrapper class better?
@staticmethod
def DefaultEuclideanOptimizer() -> tf.keras.optimizers.Optimizer:
r"""Default optimizer for the Euclidean parameters."""
return tf.keras.optimizers.Adam(learning_rate=0.001)
def value_and_gradients(
self, cost_fn: Callable, parameters: List[Trainable]
) -> Tuple[tf.Tensor, List[tf.Tensor]]:
r"""Computes the loss and gradients of the given cost function.
Args:
cost_fn (Callable with no args): The cost function.
parameters (List[Trainable]): The parameters to optimize.
Returns:
tuple(Tensor, List[Tensor]): the loss and the gradients
"""
with tf.GradientTape() as tape:
loss = cost_fn()
gradients = tape.gradient(loss, parameters)
return loss, gradients
@tf.custom_gradient
def hermite_renormalized(
self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, shape: Tuple[int]
) -> tf.Tensor:
r"""Renormalized multidimensional Hermite polynomial given by the "exponential" Taylor
series of :math:`exp(C + Bx + 1/2*Ax^2)` at zero, where the series has :math:`sqrt(n!)`
at the denominator rather than :math:`n!`. It computes all the amplitudes within the
tensor of given shape.
Args:
A: The A matrix.
B: The B vector.
C: The C scalar.
shape: The shape of the final tensor.
Returns:
The renormalized Hermite polynomial of given shape.
"""
_A, _B, _C = self.asnumpy(A), self.asnumpy(B), self.asnumpy(C)
G = strategies.vanilla(tuple(shape), _A, _B, _C)
def grad(dLdGconj):
dLdA, dLdB, dLdC = strategies.vanilla_vjp(G, _C, np.conj(dLdGconj))
return self.conj(dLdA), self.conj(dLdB), self.conj(dLdC)
return G, grad
@tf.custom_gradient
def hermite_renormalized_binomial(
self,
A: tf.Tensor,
B: tf.Tensor,
C: tf.Tensor,
shape: Tuple[int],
max_l2: Optional[float],
global_cutoff: Optional[int],
) -> tf.Tensor:
r"""Renormalized multidimensional Hermite polynomial given by the "exponential" Taylor
series of :math:`exp(C + Bx + 1/2*Ax^2)` at zero, where the series has :math:`sqrt(n!)`
at the denominator rather than :math:`n!`. The computation fills a tensor of given shape
up to a given L2 norm or global cutoff, whichever applies first. The max_l2 value, if
not provided, is set to the default value of the AUTOCUTOFF_PROBABILITY setting.
Args:
A: The A matrix.
B: The B vector.
C: The C scalar.
shape: The shape of the final tensor (local cutoffs).
max_l2 (float): The maximum squared L2 norm of the tensor.
global_cutoff (optional int): The global cutoff.
Returns:
The renormalized Hermite polynomial of given shape.
"""
_A, _B, _C = self.asnumpy(A), self.asnumpy(B), self.asnumpy(C)
G, _ = strategies.binomial(
tuple(shape),
_A,
_B,
_C,
max_l2=max_l2 or settings.AUTOCUTOFF_PROBABILITY,
global_cutoff=global_cutoff or sum(shape) - len(shape) + 1,
)
def grad(dLdGconj):
dLdA, dLdB, dLdC = strategies.vanilla_vjp(G, _C, np.conj(dLdGconj))
return self.conj(dLdA), self.conj(dLdB), self.conj(dLdC)
return G, grad
def reorder_AB_bargmann(self, A: tf.Tensor, B: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
r"""In mrmustard.math.numba.compactFock~ dimensions of the Fock representation are ordered like [mode0,mode0,mode1,mode1,...]
while in mrmustard.physics.bargmann the ordering is [mode0,mode1,...,mode0,mode1,...]. Here we reorder A and B.
"""
ordering = np.arange(2 * A.shape[0] // 2).reshape(2, -1).T.flatten()
A = tf.gather(A, ordering, axis=1)
A = tf.gather(A, ordering)
B = tf.gather(B, ordering)
return A, B
def hermite_renormalized_diagonal(
self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, cutoffs: Tuple[int]
) -> tf.Tensor:
r"""First, reorder A and B parameters of Bargmann representation to match conventions in mrmustard.math.numba.compactFock~
Then, calculate the required renormalized multidimensional Hermite polynomial.
"""
A, B = self.reorder_AB_bargmann(A, B)
return self.hermite_renormalized_diagonal_reorderedAB(A, B, C, cutoffs=cutoffs)
@tf.custom_gradient
def hermite_renormalized_diagonal_reorderedAB(
self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, cutoffs: Tuple[int]
) -> tf.Tensor:
r"""Renormalized multidimensional Hermite polynomial given by the "exponential" Taylor
series of :math:`exp(C + Bx - Ax^2)` at zero, where the series has :math:`sqrt(n!)` at the
denominator rather than :math:`n!`. Note the minus sign in front of ``A``.
Calculates the diagonal of the Fock representation (i.e. the PNR detection probabilities of all modes)
by applying the recursion relation in a selective manner.
Args:
A: The A matrix.
B: The B vector.
C: The C scalar.
cutoffs: upper boundary of photon numbers in each mode
Returns:
The renormalized Hermite polynomial.
"""
poly0, poly2, poly1010, poly1001, poly1 = tf.numpy_function(
hermite_multidimensional_diagonal, [A, B, C, cutoffs], [A.dtype] * 5
)
def grad(dLdpoly):
dpoly_dC, dpoly_dA, dpoly_dB = tf.numpy_function(
grad_hermite_multidimensional_diagonal,
[A, B, C, poly0, poly2, poly1010, poly1001, poly1],
[poly0.dtype] * 3,
)
ax = tuple(range(dLdpoly.ndim))
dLdA = self.sum(dLdpoly[..., None, None] * self.conj(dpoly_dA), axes=ax)
dLdB = self.sum(dLdpoly[..., None] * self.conj(dpoly_dB), axes=ax)
dLdC = self.sum(dLdpoly * self.conj(dpoly_dC), axes=ax)
return dLdA, dLdB, dLdC
return poly0, grad
def hermite_renormalized_1leftoverMode(
self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, cutoffs: Tuple[int]
) -> tf.Tensor:
r"""First, reorder A and B parameters of Bargmann representation to match conventions in mrmustard.math.numba.compactFock~
Then, calculate the required renormalized multidimensional Hermite polynomial.
"""
A, B = self.reorder_AB_bargmann(A, B)
return self.hermite_renormalized_1leftoverMode_reorderedAB(A, B, C, cutoffs=cutoffs)
@tf.custom_gradient
def hermite_renormalized_1leftoverMode_reorderedAB(
self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, cutoffs: Tuple[int]
) -> tf.Tensor:
r"""Renormalized multidimensional Hermite polynomial given by the "exponential" Taylor
series of :math:`exp(C + Bx - Ax^2)` at zero, where the series has :math:`sqrt(n!)` at the
denominator rather than :math:`n!`. Note the minus sign in front of ``A``.
Calculates all possible Fock representations of mode 0,
where all other modes are PNR detected.
This is done by applying the recursion relation in a selective manner.
Args:
A: The A matrix.
B: The B vector.
C: The C scalar.
cutoffs: upper boundary of photon numbers in each mode
Returns:
The renormalized Hermite polynomial.
"""
poly0, poly2, poly1010, poly1001, poly1 = tf.numpy_function(
hermite_multidimensional_1leftoverMode, [A, B, C, cutoffs], [A.dtype] * 5
)
def grad(dLdpoly):
dpoly_dC, dpoly_dA, dpoly_dB = tf.numpy_function(
grad_hermite_multidimensional_1leftoverMode,
[A, B, C, poly0, poly2, poly1010, poly1001, poly1],
[poly0.dtype] * 3,
)
ax = tuple(range(dLdpoly.ndim))
dLdA = self.sum(dLdpoly[..., None, None] * self.conj(dpoly_dA), axes=ax)
dLdB = self.sum(dLdpoly[..., None] * self.conj(dpoly_dB), axes=ax)
dLdC = self.sum(dLdpoly * self.conj(dpoly_dC), axes=ax)
return dLdA, dLdB, dLdC
return poly0, grad
@staticmethod
def eigvals(tensor: tf.Tensor) -> Tensor:
"""Returns the eigenvalues of a matrix."""
return tf.linalg.eigvals(tensor)
@staticmethod
def eigvalsh(tensor: tf.Tensor) -> Tensor:
"""Returns the eigenvalues of a Real Symmetric or Hermitian matrix."""
return tf.linalg.eigvalsh(tensor)
@staticmethod
def svd(tensor: tf.Tensor) -> Tensor:
"""Returns the Singular Value Decomposition of a matrix."""
return tf.linalg.svd(tensor)
@staticmethod
def xlogy(x: tf.Tensor, y: tf.Tensor) -> Tensor:
"""Returns 0 if ``x == 0,`` and ``x * log(y)`` otherwise, elementwise."""
return tf.math.xlogy(x, y)
@staticmethod
def eigh(tensor: tf.Tensor) -> Tensor:
"""Returns the eigenvalues and eigenvectors of a matrix."""
return tf.linalg.eigh(tensor)
def sqrtm(self, tensor: tf.Tensor, rtol=1e-05, atol=1e-08) -> Tensor:
"""Returns the matrix square root of a square matrix, such that ``sqrt(A) @ sqrt(A) = A``."""
# The sqrtm function has issues with matrices that are close to zero, hence we branch
if np.allclose(tensor, 0, rtol=rtol, atol=atol):
return self.zeros_like(tensor)
return tf.linalg.sqrtm(tensor)
@staticmethod
def boolean_mask(tensor: tf.Tensor, mask: tf.Tensor) -> Tensor:
"""Returns a tensor based on the truth value of the boolean mask."""
return tf.boolean_mask(tensor, mask)
@staticmethod
def custom_gradient(func, *args, **kwargs):
"""Decorator to define a function with a custom gradient."""
return tf.custom_gradient(func, *args, **kwargs)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Extras (not in the Interface)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@tf.custom_gradient
def getitem(tensor, *, key):
"""A differentiable pure equivalent of numpy's ``value = tensor[key]``."""
value = np.array(tensor)[key]
def grad(dy):
dL_dtensor = np.zeros_like(tensor)
dL_dtensor[key] = dy
return dL_dtensor
return value, grad
@tf.custom_gradient
def setitem(tensor, value, *, key):
"""A differentiable pure equivalent of numpy's ``tensor[key] = value``."""
_tensor = np.array(tensor)
value = np.array(value)
_tensor[key] = value
def grad(dy):
dL_dtensor = np.array(dy)
dL_dtensor[key] = 0.0
# unbroadcasting the gradient
implicit_broadcast = list(range(_tensor.ndim - value.ndim))
explicit_broadcast = [
_tensor.ndim - value.ndim + j for j in range(value.ndim) if value.shape[j] == 1
]
dL_dvalue = np.sum(
np.array(dy)[key], axis=tuple(implicit_broadcast + explicit_broadcast)
)
dL_dvalue = np.expand_dims(
dL_dvalue, [i - len(implicit_broadcast) for i in explicit_broadcast]
)
return dL_dtensor, dL_dvalue
return _tensor, grad
| 22,806 | 35.785484 | 133 | py |
MrMustard-develop | MrMustard-develop/tests/test_math/test_interface.py | # Copyright 2022 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the :class:`Math`.
"""
import numpy as np
import pytest
from mrmustard import settings
from mrmustard.math import Math
try:
import torch
torch_available = True
except ImportError:
torch_available = False
def test_backend_redirection_tf():
"""Test Math class is redirecting calls to the backend set on MM settings"""
math = Math()
settings.BACKEND = "tensorflow"
assert math._MathInterface__instance.__module__ == "mrmustard.math.tensorflow"
@pytest.mark.skipif(not torch_available, reason="Test only works if Torch is installed")
def test_backend_redirection_torch():
"""Test Math class is redirecting calls to the backend set on MM settings"""
math = Math()
settings.BACKEND = "torch"
assert math._MathInterface__instance.__module__ == "mrmustard.math.torch"
def test_error_for_wrong_backend():
"""Test error is raise when using a backend that is not allowed"""
backend = settings.BACKEND
with pytest.raises(ValueError) as exception_info:
settings.BACKEND = "unexisting_backend"
assert exception_info.value.args[0] == "Backend must be either 'tensorflow' or 'torch'"
# set back to initial value to avoid side effects
settings.BACKEND = backend
def test_hash_tensor():
"""Test hash of a tensor"""
math = Math()
tensor = math.astensor([1, 2, 3])
assert np.allclose(*[math.hash_tensor(tensor) for _ in range(3)])
| 2,035 | 29.848485 | 95 | py |
MrMustard-develop | MrMustard-develop/doc/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os, sys, re
sys.path.insert(0, os.path.abspath(".."))
sys.path.insert(0, os.path.abspath("_ext"))
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(".")), "doc"))
# -- Project information -----------------------------------------------------
project = "Mr Mustard"
copyright = "2022, Xanadu Quantum Technologies"
author = "Filippo Miatto"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
import mrmustard as mm
release = mm.__version__
# The short X.Y version.
version = re.match(r"^(\d+\.\d+)", release).expand(r"\1")
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx.ext.inheritance_diagram",
"sphinx.ext.viewcode",
"sphinxcontrib.bibtex",
"edit_on_github",
"sphinx_autodoc_typehints",
"sphinx.ext.intersphinx",
"sphinx_automodapi.automodapi",
"sphinx_copybutton",
"m2r2",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
autosummary_generate = True
autosummary_imported_members = False
automodapi_toctreedirnm = "code/api"
automodsumm_inherited_members = True
mathjax_path = (
"https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/MathJax.js?config=TeX-MML-AM_CHTML"
)
bibtex_bibfiles = ["references.bib"]
# -- Options for HTML output -------------------------------------------------
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
html_sidebars = {
"**": [
"searchbox.html",
"globaltoc.html",
]
}
# Output file base name for HTML help builder.
htmlhelp_basename = "MrMustarddoc"
edit_on_github_project = "XanaduAI/MrMustard"
edit_on_github_branch = "master/doc"
# the order in which autodoc lists the documented members
autodoc_member_order = "bysource"
# mock non-installed imports
autodoc_mock_imports = ["torch"]
# inheritance_diagram graphviz attributes
inheritance_node_attrs = dict(color="lightskyblue1", style="filled")
# -- Xanadu theme ---------------------------------------------------------
html_theme = "xanadu"
html_theme_options = {
"navbar_name": "Mr Mustard",
"navbar_logo_path": "_static/mm_logo.png",
"navbar_right_links": [
{
"name": "GitHub",
"href": "https://github.com/XanaduAI/MrMustard",
"icon": "fab fa-github",
}
],
"extra_copyrights": [
"TensorFlow, the TensorFlow logo, and any related marks are trademarks " "of Google Inc."
],
"google_analytics_tracking_id": "UA-116279123-2",
"prev_next_button_colour": "#b79226",
"prev_next_button_hover_colour": "#d7b348",
"toc_marker_colour": "#b79226",
"table_header_background_colour": "#ffdce5",
"border_colour": "#b79226",
"text_accent_colour": "#b79226",
}
| 4,913 | 30.299363 | 97 | py |
cGAN-KD | cGAN-KD-main/UTKFace/baseline_cnn.py | print("\n===================================================================================================")
import os
import argparse
import shutil
import timeit
import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
import torch.nn as nn
import torch.backends.cudnn as cudnn
import random
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import matplotlib as mpl
from torch import autograd
from torchvision.utils import save_image
import csv
from tqdm import tqdm
import gc
import h5py
### import my stuffs ###
from opts import cnn_opts
from models import *
from utils import *
from train_cnn import train_cnn, test_cnn
#######################################################################################
''' Settings '''
#######################################################################################
args = cnn_opts()
print(args)
#-------------------------------
# seeds
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
cudnn.benchmark = False
np.random.seed(args.seed)
#-------------------------------
# CNN settings
## lr decay scheme
lr_decay_epochs = (args.lr_decay_epochs).split("_")
lr_decay_epochs = [int(epoch) for epoch in lr_decay_epochs]
#-------------------------------
# output folders
if args.fake_data_path!="None" and args.nfake>0:
fake_data_name = args.fake_data_path.split("/")[-1]
output_directory = os.path.join(args.root_path, 'output/CNN/{}_useNfake_{}'.format(fake_data_name, args.nfake))
cnn_info = '{}_lr_{}_decay_{}_finetune_{}'.format(args.cnn_name, args.lr_base, args.weight_decay, args.finetune)
else:
output_directory = os.path.join(args.root_path, 'output/CNN/vanilla')
cnn_info = '{}_lr_{}_decay_{}'.format(args.cnn_name, args.lr_base, args.weight_decay)
os.makedirs(output_directory, exist_ok=True)
#-------------------------------
# some functions
def fn_norm_labels(labels):
'''
labels: unnormalized labels
'''
return labels/args.max_label
def fn_denorm_labels(labels):
'''
labels: normalized labels; numpy array
'''
if isinstance(labels, np.ndarray):
return (labels*args.max_label).astype(int)
elif torch.is_tensor(labels):
return (labels*args.max_label).type(torch.int)
else:
return int(labels*args.max_label)
#######################################################################################
''' Data loader '''
#######################################################################################
print('\n Loading real data...')
hf = h5py.File(os.path.join(args.data_path, 'UTKFace_64x64_prop_0.8.h5'), 'r')
images_train = hf['images_train'][:]
labels_train = hf['labels_train'][:]
images_test = hf['images_test'][:]
labels_test = hf['labels_test'][:]
hf.close()
## unique labels
unique_labels = np.sort(np.array(list(set(labels_train))))
## for each age, take no more than args.max_num_img_per_label images
image_num_threshold = args.max_num_img_per_label
print("\n Original training set has {} images; For each age, take no more than {} images>>>".format(len(images_train), image_num_threshold))
sel_indx = []
for i in tqdm(range(len(unique_labels))):
indx_i = np.where(labels_train == unique_labels[i])[0]
if len(indx_i)>image_num_threshold:
np.random.shuffle(indx_i)
indx_i = indx_i[0:image_num_threshold]
sel_indx.append(indx_i)
sel_indx = np.concatenate(sel_indx, axis=0)
images_train = images_train[sel_indx]
labels_train = labels_train[sel_indx]
print("\r {} training images left.".format(len(images_train)))
## normalize to [0,1]
labels_train = fn_norm_labels(labels_train)
labels_test = fn_norm_labels(labels_test)
## number of real images
nreal = len(labels_train)
assert len(labels_train) == len(images_train)
## load fake data if needed
if args.fake_data_path != 'None':
print("\n Start loading fake data: {}...".format(args.fake_data_path))
hf = h5py.File(args.fake_data_path, 'r')
fake_images = hf['fake_images'][:]
fake_labels = hf['fake_labels'][:]
hf.close()
print('\n Fake images: {}, min {}, max {}.'.format(fake_images.shape, fake_images.min(), fake_images.max()))
print('\n Fake labels: {}, min {}, max {}.'.format(fake_labels.shape, fake_labels.min(), fake_labels.max()))
assert np.max(fake_images)>1 and np.min(fake_images)>=0
# ## take no more than args.nfake_per_label imgs for each label
# indx_fake = []
# for i in range(len(unique_labels)):
# label_i = unique_labels[i]
# indx_i = np.where(fake_labels==label_i)[0]
# np.random.shuffle(indx_i)
# if args.nfake_per_label<len(indx_i):
# indx_i = indx_i[0:args.nfake_per_label]
# indx_fake.append(indx_i)
# ###end for i
# indx_fake = np.concatenate(indx_fake)
indx_fake = np.arange(len(fake_labels))
np.random.shuffle(indx_fake)
indx_fake = indx_fake[:args.nfake]
fake_images = fake_images[indx_fake]
fake_labels = fake_labels[indx_fake]
### visualize data distribution
unique_labels_unnorm = np.arange(1,int(args.max_label)+1)
frequencies = []
for i in range(len(unique_labels_unnorm)):
indx_i = np.where(fake_labels==unique_labels_unnorm[i])[0]
frequencies.append(len(indx_i))
frequencies = np.array(frequencies).astype(int)
width = 0.8
x = np.arange(1,int(args.max_label)+1)
# plot data in grouped manner of bar type
fig, ax = plt.subplots(1,1, figsize=(6,4))
ax.grid(color='lightgrey', linestyle='--', zorder=0)
ax.bar(unique_labels_unnorm, frequencies, width, align='center', color='tab:green', zorder=3)
ax.set_xlabel("Age")
ax.set_ylabel("Frequency")
plt.tight_layout()
plt.savefig(os.path.join(args.root_path, "{}_UseNFake_{}_data_dist.pdf".format(fake_data_name, args.nfake)))
plt.close()
print('\n Frequence of ages: MIN={}, MEAN={}, MAX={}.'.format(np.min(frequencies),np.mean(frequencies),np.max(frequencies)))
assert len(fake_images)==len(fake_labels)
fake_labels = fn_norm_labels(fake_labels)
## combine fake and real
images_train = np.concatenate((images_train, fake_images), axis=0)
labels_train = np.concatenate((labels_train, fake_labels))
## if
## data loader for the training set and test set
# trainset = IMGs_dataset(images_train, labels_train, normalize=True)
# trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size_train, shuffle=True, num_workers=args.num_workers)
testset = IMGs_dataset(images_test, labels_test, normalize=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size_test, shuffle=False, num_workers=args.num_workers)
## info of training set and test set
print("\n Training set: {}x{}x{}x{}; Testing set: {}x{}x{}x{}.".format(images_train.shape[0], images_train.shape[1], images_train.shape[2], images_train.shape[3], images_test.shape[0], images_test.shape[1], images_test.shape[2], images_test.shape[3]))
#######################################################################################
''' CNN Training '''
#######################################################################################
### model initialization
net = cnn_dict[args.cnn_name]()
num_parameters = count_parameters(net)
### start training
if args.finetune:
filename_ckpt = os.path.join(output_directory, 'ckpt_{}_epoch_{}_finetune_True_last.pth'.format(args.cnn_name, args.epochs))
## load pre-trained model
checkpoint = torch.load(args.init_model_path)
net.load_state_dict(checkpoint['net_state_dict'])
else:
filename_ckpt = os.path.join(output_directory, 'ckpt_{}_epoch_{}_last.pth'.format(args.cnn_name, args.epochs))
print('\n' + filename_ckpt)
# training
if not os.path.isfile(filename_ckpt):
print("\n Start training the {} >>>".format(args.cnn_name))
path_to_ckpt_in_train = output_directory + '/ckpts_in_train/{}'.format(cnn_info)
os.makedirs(path_to_ckpt_in_train, exist_ok=True)
train_cnn(net=net, net_name=args.cnn_name, train_images=images_train, train_labels=labels_train, testloader=testloader, epochs=args.epochs, resume_epoch=args.resume_epoch, save_freq=args.save_freq, batch_size=args.batch_size_train, lr_base=args.lr_base, lr_decay_factor=args.lr_decay_factor, lr_decay_epochs=lr_decay_epochs, weight_decay=args.weight_decay, path_to_ckpt = path_to_ckpt_in_train, fn_denorm_labels=fn_denorm_labels)
# store model
torch.save({
'net_state_dict': net.state_dict(),
}, filename_ckpt)
print("\n End training CNN.")
else:
print("\n Loading pre-trained {}.".format(args.cnn_name))
checkpoint = torch.load(filename_ckpt)
net.load_state_dict(checkpoint['net_state_dict'])
#end if
# testing
test_mae = test_cnn(net, testloader, fn_denorm_labels=fn_denorm_labels, verbose=True)
print("\n Test MAE {}.".format(test_mae))
test_results_logging_fullpath = output_directory + '/test_results_{}_MAE_{:.3f}.txt'.format(cnn_info, test_mae)
if not os.path.isfile(test_results_logging_fullpath):
test_results_logging_file = open(test_results_logging_fullpath, "w")
test_results_logging_file.close()
with open(test_results_logging_fullpath, 'a') as test_results_logging_file:
test_results_logging_file.write("\n===================================================================================================")
test_results_logging_file.write("\n {}; num paras: {}; seed: {} \n".format(cnn_info, num_parameters, args.seed))
print(args, file=test_results_logging_file)
test_results_logging_file.write("\n Test MAE {}.".format(test_mae))
print("\n===================================================================================================")
| 9,981 | 37.099237 | 433 | py |
cGAN-KD | cGAN-KD-main/UTKFace/eval_metrics.py | """
Compute
Inception Score (IS),
Frechet Inception Discrepency (FID), ref "https://github.com/mseitzer/pytorch-fid/blob/master/fid_score.py"
Maximum Mean Discrepancy (MMD)
for a set of fake images
use numpy array
Xr: high-level features for real images; nr by d array
Yr: labels for real images
Xg: high-level features for fake images; ng by d array
Yg: labels for fake images
IMGSr: real images
IMGSg: fake images
"""
import os
import gc
import numpy as np
# from numpy import linalg as LA
from scipy import linalg
import torch
import torch.nn as nn
from scipy.stats import entropy
from torch.nn import functional as F
from torchvision.utils import save_image
from utils import SimpleProgressBar, IMGs_dataset
##############################################################################
# FID scores
##############################################################################
# compute FID based on extracted features
def FID(Xr, Xg, eps=1e-10):
'''
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
'''
#sample mean
MUr = np.mean(Xr, axis = 0)
MUg = np.mean(Xg, axis = 0)
mean_diff = MUr - MUg
#sample covariance
SIGMAr = np.cov(Xr.transpose())
SIGMAg = np.cov(Xg.transpose())
# Product might be almost singular
covmean, _ = linalg.sqrtm(SIGMAr.dot(SIGMAg), disp=False)#square root of a matrix
covmean = covmean.real
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(SIGMAr.shape[0]) * eps
covmean = linalg.sqrtm((SIGMAr + offset).dot(SIGMAg + offset))
#fid score
fid_score = mean_diff.dot(mean_diff) + np.trace(SIGMAr + SIGMAg - 2*covmean)
return fid_score
##test
#Xr = np.random.rand(10000,1000)
#Xg = np.random.rand(10000,1000)
#print(FID(Xr, Xg))
# compute FID from raw images
def cal_FID(PreNetFID, IMGSr, IMGSg, batch_size = 500, resize = None):
#resize: if None, do not resize; if resize = (H,W), resize images to 3 x H x W
PreNetFID.eval()
nr = IMGSr.shape[0]
ng = IMGSg.shape[0]
nc = IMGSr.shape[1] #IMGSr is nrxNCxIMG_SIExIMG_SIZE
img_size = IMGSr.shape[2]
if batch_size > min(nr, ng):
batch_size = min(nr, ng)
# print("FID: recude batch size to {}".format(batch_size))
#compute the length of extracted features
with torch.no_grad():
test_img = torch.from_numpy(IMGSr[0].reshape((1,nc,img_size,img_size))).type(torch.float).cuda()
if resize is not None:
test_img = nn.functional.interpolate(test_img, size = resize, scale_factor=None, mode='bilinear', align_corners=False)
# _, test_features = PreNetFID(test_img)
test_features = PreNetFID(test_img)
d = test_features.shape[1] #length of extracted features
Xr = np.zeros((nr, d))
Xg = np.zeros((ng, d))
#batch_size = 500
with torch.no_grad():
tmp = 0
pb1 = SimpleProgressBar()
for i in range(nr//batch_size):
imgr_tensor = torch.from_numpy(IMGSr[tmp:(tmp+batch_size)]).type(torch.float).cuda()
if resize is not None:
imgr_tensor = nn.functional.interpolate(imgr_tensor, size = resize, scale_factor=None, mode='bilinear', align_corners=False)
# _, Xr_tmp = PreNetFID(imgr_tensor)
Xr_tmp = PreNetFID(imgr_tensor)
Xr[tmp:(tmp+batch_size)] = Xr_tmp.detach().cpu().numpy()
tmp+=batch_size
# pb1.update(min(float(i)*100/(nr//batch_size), 100))
pb1.update(min(max(tmp/nr*100,100), 100))
del Xr_tmp,imgr_tensor; gc.collect()
torch.cuda.empty_cache()
tmp = 0
pb2 = SimpleProgressBar()
for j in range(ng//batch_size):
imgg_tensor = torch.from_numpy(IMGSg[tmp:(tmp+batch_size)]).type(torch.float).cuda()
if resize is not None:
imgg_tensor = nn.functional.interpolate(imgg_tensor, size = resize, scale_factor=None, mode='bilinear', align_corners=False)
# _, Xg_tmp = PreNetFID(imgg_tensor)
Xg_tmp = PreNetFID(imgg_tensor)
Xg[tmp:(tmp+batch_size)] = Xg_tmp.detach().cpu().numpy()
tmp+=batch_size
# pb2.update(min(float(j)*100/(ng//batch_size), 100))
pb2.update(min(max(tmp/ng*100, 100), 100))
del Xg_tmp,imgg_tensor; gc.collect()
torch.cuda.empty_cache()
fid_score = FID(Xr, Xg, eps=1e-6)
return fid_score
##############################################################################
# label_score
# difference between assigned label and predicted label
##############################################################################
def cal_labelscore(PreNet, images, labels_assi, min_label_before_shift, max_label_after_shift, batch_size = 500, resize = None, num_workers=0):
'''
PreNet: pre-trained CNN
images: fake images
labels_assi: assigned labels
resize: if None, do not resize; if resize = (H,W), resize images to 3 x H x W
'''
PreNet.eval()
# assume images are nxncximg_sizeximg_size
n = images.shape[0]
nc = images.shape[1] #number of channels
img_size = images.shape[2]
labels_assi = labels_assi.reshape(-1)
eval_trainset = IMGs_dataset(images, labels_assi, normalize=False)
eval_dataloader = torch.utils.data.DataLoader(eval_trainset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
labels_pred = np.zeros(n+batch_size)
nimgs_got = 0
pb = SimpleProgressBar()
for batch_idx, (batch_images, batch_labels) in enumerate(eval_dataloader):
batch_images = batch_images.type(torch.float).cuda()
batch_labels = batch_labels.type(torch.float).cuda()
batch_size_curr = len(batch_labels)
batch_labels_pred, _ = PreNet(batch_images)
labels_pred[nimgs_got:(nimgs_got+batch_size_curr)] = batch_labels_pred.detach().cpu().numpy().reshape(-1)
nimgs_got += batch_size_curr
pb.update((float(nimgs_got)/n)*100)
del batch_images; gc.collect()
torch.cuda.empty_cache()
#end for batch_idx
labels_pred = labels_pred[0:n]
labels_pred = (labels_pred*max_label_after_shift)-np.abs(min_label_before_shift)
labels_assi = (labels_assi*max_label_after_shift)-np.abs(min_label_before_shift)
ls_mean = np.mean(np.abs(labels_pred-labels_assi))
ls_std = np.std(np.abs(labels_pred-labels_assi))
return ls_mean, ls_std
| 6,666 | 33.365979 | 143 | py |
cGAN-KD | cGAN-KD-main/UTKFace/train_net_for_label_embed.py |
import torch
import torch.nn as nn
from torchvision.utils import save_image
import numpy as np
import os
import timeit
from PIL import Image
### horizontally flip images
def hflip_images(batch_images):
uniform_threshold = np.random.uniform(0,1,len(batch_images))
indx_gt = np.where(uniform_threshold>0.5)[0]
batch_images[indx_gt] = np.flip(batch_images[indx_gt], axis=3)
return batch_images
# def hflip_images(batch_images):
# uniform_threshold = np.random.uniform(0,1,len(batch_images))
# indx_gt = np.where(uniform_threshold>0.5)[0]
# batch_images[indx_gt] = torch.flip(batch_images[indx_gt], dims=[3])
# return batch_images
## normalize images
def normalize_images(batch_images):
batch_images = batch_images/255.0
batch_images = (batch_images - 0.5)/0.5
return batch_images
#-------------------------------------------------------------
def train_net_embed(net, train_images, train_labels, test_loader=None, epochs=200, resume_epoch = 0, save_freq=40, batch_size=128, lr_base=0.01, lr_decay_factor=0.1, lr_decay_epochs=[150, 180, 210], weight_decay=1e-4, path_to_ckpt = None, fn_denorm_labels=None):
'''
train_images: unnormalized images
train_labels: normalized labels
'''
assert train_images.max()>1 and train_images.max()<=255.0 and train_images.min()>=0
assert train_labels.min()>=0 and train_labels.max()<=1.0
unique_train_labels = np.sort(np.array(list(set(train_labels)))) ##sorted unique labels
indx_all = np.arange(len(train_labels))
''' learning rate decay '''
def adjust_learning_rate_1(optimizer, epoch):
"""decrease the learning rate """
lr = lr_base
num_decays = len(lr_decay_epochs)
for decay_i in range(num_decays):
if epoch >= lr_decay_epochs[decay_i]:
lr = lr * lr_decay_factor
#end if epoch
#end for decay_i
for param_group in optimizer.param_groups:
param_group['lr'] = lr
net = net.cuda()
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(net.parameters(), lr = lr_base, momentum= 0.9, weight_decay=weight_decay)
# resume training; load checkpoint
if path_to_ckpt is not None and resume_epoch>0:
save_file = path_to_ckpt + "/embed_x2y_checkpoint_epoch_{}.pth".format(resume_epoch)
checkpoint = torch.load(save_file)
net.load_state_dict(checkpoint['net_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
torch.set_rng_state(checkpoint['rng_state'])
#end if
train_mse_all = []
test_mae_all = []
start_tmp = timeit.default_timer()
for epoch in range(resume_epoch, epochs):
net.train()
train_loss = 0
adjust_learning_rate_1(optimizer, epoch)
for _ in range(len(train_labels)//batch_size):
# ### generate target labels
# batch_target_labels = np.random.choice(unique_train_labels, size=batch_size, replace=True)
# batch_unique_train_labels, batch_unique_label_counts = np.unique(batch_target_labels, return_counts=True)
# batch_train_indx = []
# for j in range(len(batch_unique_train_labels)):
# indx_j = np.where(train_labels==batch_unique_train_labels[j])[0]
# indx_j = np.random.choice(indx_j, size=batch_unique_label_counts[j])
# batch_train_indx.append(indx_j)
# batch_train_indx = np.concatenate(batch_train_indx)
# batch_train_indx = batch_train_indx.reshape(-1)
batch_train_indx = np.random.choice(indx_all, size=batch_size, replace=True).reshape(-1) #for cnn training only
### get some real images for training
batch_train_images = train_images[batch_train_indx]
batch_train_images = hflip_images(batch_train_images) ## randomly flip real images
batch_train_images = normalize_images(batch_train_images) ## normalize real images
batch_train_images = torch.from_numpy(batch_train_images).type(torch.float).cuda()
assert batch_train_images.max().item()<=1.0
batch_train_labels = train_labels[batch_train_indx]
batch_train_labels = torch.from_numpy(batch_train_labels).type(torch.float).view(-1,1).cuda()
#Forward pass
outputs, _ = net(batch_train_images)
loss = criterion(outputs, batch_train_labels)
#backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.cpu().item()
#end for batch_idx
train_loss = train_loss / (len(train_labels)//batch_size)
train_mse_all.append(train_loss)
if test_loader is None:
print('Train net_x2y for embedding: [epoch %d/%d] train_loss:%f Time:%.4f' % (epoch+1, epochs, train_loss, timeit.default_timer()-start_tmp))
else:
test_mae = test_cnn(net, test_loader, fn_denorm_labels=fn_denorm_labels, verbose=False)
test_mae_all.append(test_mae)
print('Train net_x2y for label embedding: [epoch %d/%d] train_loss:%f test_MAE:%f Time:%.4f' % (epoch+1, epochs, train_loss, test_mae, timeit.default_timer()-start_tmp))
# net.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
# with torch.no_grad():
# test_loss = 0
# for batch_test_images, batch_test_labels in test_loader:
# batch_test_images = batch_test_images.type(torch.float).cuda()
# batch_test_labels = batch_test_labels.type(torch.float).view(-1,1).cuda()
# outputs,_ = net(batch_test_images)
# loss = criterion(outputs, batch_test_labels)
# test_loss += loss.cpu().item()
# test_loss = test_loss/len(test_loader)
# test_loss_all.append(test_loss)
# print('Train net_x2y for label embedding: [epoch %d/%d] train_loss:%f test_loss:%f Time:%.4f' % (epoch+1, epochs, train_loss, test_loss, timeit.default_timer()-start_tmp))
#save checkpoint
if path_to_ckpt is not None and (((epoch+1) % save_freq == 0) or (epoch+1==epochs)):
save_file = path_to_ckpt + "/embed_x2y_checkpoint_epoch_{}.pth".format(epoch+1)
torch.save({
'epoch': epoch,
'net_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'rng_state': torch.get_rng_state()
}, save_file)
#end for epoch
net = net.cpu()
if test_loader is None:
return net, train_mse_all
else:
return net, train_mse_all, test_mae_all
def test_cnn(net, testloader, fn_denorm_labels=None, verbose=False):
net = net.cuda()
net.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
with torch.no_grad():
abs_diff_avg = 0
total = 0
for _, (images, labels) in enumerate(testloader):
images = images.type(torch.float).cuda()
labels = labels.type(torch.float).view(-1).cpu().numpy()
outputs, _ = net(images)
outputs = outputs.view(-1).cpu().numpy()
labels = fn_denorm_labels(labels)
outputs = fn_denorm_labels(outputs)
abs_diff_avg += np.sum(np.abs(labels-outputs)) ##comptue MAE not MSE!!!
total += len(labels)
test_mae = abs_diff_avg/total
if verbose:
print('\n Test MAE: {}.'.format(test_mae))
return test_mae
###################################################################################
class label_dataset(torch.utils.data.Dataset):
def __init__(self, labels):
super(label_dataset, self).__init__()
self.labels = labels
self.n_samples = len(self.labels)
def __getitem__(self, index):
y = self.labels[index]
return y
def __len__(self):
return self.n_samples
def train_net_y2h(unique_train_labels_norm, net_y2h, net_embed, epochs=500, lr_base=0.01, lr_decay_factor=0.1, lr_decay_epochs=[150, 250, 350], weight_decay=1e-4, batch_size=128):
'''
unique_train_labels_norm: an array of normalized unique labels
'''
''' learning rate decay '''
def adjust_learning_rate_2(optimizer, epoch):
"""decrease the learning rate """
lr = lr_base
num_decays = len(lr_decay_epochs)
for decay_i in range(num_decays):
if epoch >= lr_decay_epochs[decay_i]:
lr = lr * lr_decay_factor
#end if epoch
#end for decay_i
for param_group in optimizer.param_groups:
param_group['lr'] = lr
assert np.max(unique_train_labels_norm)<=1 and np.min(unique_train_labels_norm)>=0
trainset = label_dataset(unique_train_labels_norm)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True)
net_embed = net_embed.cuda()
net_y2h = net_y2h.cuda()
net_embed.eval()
net_h2y=net_embed.module.h2y #convert embedding labels to original labels
optimizer_y2h = torch.optim.SGD(net_y2h.parameters(), lr = lr_base, momentum= 0.9, weight_decay=weight_decay)
start_tmp = timeit.default_timer()
for epoch in range(epochs):
net_y2h.train()
train_loss = 0
adjust_learning_rate_2(optimizer_y2h, epoch)
for _, batch_labels in enumerate(trainloader):
batch_labels = batch_labels.type(torch.float).view(-1,1).cuda()
# generate noises which will be added to labels
batch_size_curr = len(batch_labels)
batch_gamma = np.random.normal(0, 0.2, batch_size_curr)
batch_gamma = torch.from_numpy(batch_gamma).view(-1,1).type(torch.float).cuda()
# add noise to labels
batch_labels_noise = torch.clamp(batch_labels+batch_gamma, 0.0, 1.0)
#Forward pass
batch_hiddens_noise = net_y2h(batch_labels_noise)
batch_rec_labels_noise = net_h2y(batch_hiddens_noise)
loss = nn.MSELoss()(batch_rec_labels_noise, batch_labels_noise)
#backward pass
optimizer_y2h.zero_grad()
loss.backward()
optimizer_y2h.step()
train_loss += loss.cpu().item()
#end for batch_idx
train_loss = train_loss / len(trainloader)
print('\n Train net_y2h: [epoch %d/%d] train_loss:%f Time:%.4f' % (epoch+1, epochs, train_loss, timeit.default_timer()-start_tmp))
#end for epoch
return net_y2h
| 10,789 | 39.111524 | 262 | py |
cGAN-KD | cGAN-KD-main/UTKFace/DiffAugment_pytorch.py | # Differentiable Augmentation for Data-Efficient GAN Training
# Shengyu Zhao, Zhijian Liu, Ji Lin, Jun-Yan Zhu, and Song Han
# https://arxiv.org/pdf/2006.10738
import torch
import torch.nn.functional as F
def DiffAugment(x, policy='', channels_first=True):
if policy:
if not channels_first:
x = x.permute(0, 3, 1, 2)
for p in policy.split(','):
for f in AUGMENT_FNS[p]:
x = f(x)
if not channels_first:
x = x.permute(0, 2, 3, 1)
x = x.contiguous()
return x
def rand_brightness(x):
x = x + (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) - 0.5)
return x
def rand_saturation(x):
x_mean = x.mean(dim=1, keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) * 2) + x_mean
return x
def rand_contrast(x):
x_mean = x.mean(dim=[1, 2, 3], keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) + 0.5) + x_mean
return x
def rand_translation(x, ratio=0.125):
shift_x, shift_y = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
translation_x = torch.randint(-shift_x, shift_x + 1, size=[x.size(0), 1, 1], device=x.device)
translation_y = torch.randint(-shift_y, shift_y + 1, size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(x.size(2), dtype=torch.long, device=x.device),
torch.arange(x.size(3), dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + translation_x + 1, 0, x.size(2) + 1)
grid_y = torch.clamp(grid_y + translation_y + 1, 0, x.size(3) + 1)
x_pad = F.pad(x, [1, 1, 1, 1, 0, 0, 0, 0])
x = x_pad.permute(0, 2, 3, 1).contiguous()[grid_batch, grid_x, grid_y].permute(0, 3, 1, 2)
return x
def rand_cutout(x, ratio=0.5):
cutout_size = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
offset_x = torch.randint(0, x.size(2) + (1 - cutout_size[0] % 2), size=[x.size(0), 1, 1], device=x.device)
offset_y = torch.randint(0, x.size(3) + (1 - cutout_size[1] % 2), size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(cutout_size[0], dtype=torch.long, device=x.device),
torch.arange(cutout_size[1], dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + offset_x - cutout_size[0] // 2, min=0, max=x.size(2) - 1)
grid_y = torch.clamp(grid_y + offset_y - cutout_size[1] // 2, min=0, max=x.size(3) - 1)
mask = torch.ones(x.size(0), x.size(2), x.size(3), dtype=x.dtype, device=x.device)
mask[grid_batch, grid_x, grid_y] = 0
x = x * mask.unsqueeze(1)
return x
AUGMENT_FNS = {
'color': [rand_brightness, rand_saturation, rand_contrast],
'translation': [rand_translation],
'cutout': [rand_cutout],
}
| 3,025 | 38.298701 | 110 | py |
cGAN-KD | cGAN-KD-main/UTKFace/generate_synthetic_data.py | print("\n===================================================================================================")
import argparse
import copy
import gc
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import matplotlib as mpl
import h5py
import os
import random
from tqdm import tqdm, trange
import torch
import torchvision
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torchvision.utils import save_image
import timeit
from PIL import Image
### import my stuffs ###
from opts import gen_synth_data_opts
from utils import SimpleProgressBar, IMGs_dataset, IMGs_dataset_v2, PlotLoss, count_parameters
from models import *
from train_ccgan import train_ccgan, SampCcGAN_given_labels
from train_net_for_label_embed import train_net_embed, train_net_y2h
from train_sparseAE import train_sparseAE
from train_cdre import train_cdre
from eval_metrics import cal_FID, cal_labelscore
#######################################################################################
''' Settings '''
#######################################################################################
args = gen_synth_data_opts()
print(args)
if args.subsampling:
subsampling_method = "cDR-RS_presae_epochs_{}_DR_{}_epochs_{}_lambda_{:.3f}".format(args.dre_presae_epochs, args.dre_net, args.dre_epochs, args.dre_lambda)
else:
subsampling_method = "None"
## filter??
if args.filter:
subsampling_method = subsampling_method + "_filter_{}_perc_{:.2f}".format(args.samp_filter_precnn_net, args.samp_filter_mae_percentile_threshold)
else:
subsampling_method = subsampling_method + "_filter_None"
## adjust labels??
subsampling_method = subsampling_method + "_adjust_{}".format(args.adjust)
#-------------------------------
# seeds
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
cudnn.benchmark = False
np.random.seed(args.seed)
#-------------------------------
# output folders
output_directory = os.path.join(args.root_path, 'output')
os.makedirs(output_directory, exist_ok=True)
## folders for CcGAN and cDRE and fake data
save_models_folder = os.path.join(output_directory, 'CcGAN/saved_models')
os.makedirs(save_models_folder, exist_ok=True)
save_images_folder = os.path.join(output_directory, 'CcGAN/saved_images')
os.makedirs(save_images_folder, exist_ok=True)
fake_data_folder = os.path.join(output_directory, 'fake_data')
os.makedirs(fake_data_folder, exist_ok=True)
#-------------------------------
# some functions
def fn_norm_labels(labels):
'''
labels: unnormalized labels
'''
return labels/args.max_label
def fn_denorm_labels(labels):
'''
labels: normalized labels; numpy array
'''
if isinstance(labels, np.ndarray):
return (labels*args.max_label).astype(int)
elif torch.is_tensor(labels):
return (labels*args.max_label).type(torch.int)
else:
return int(labels*args.max_label)
#######################################################################################
''' Data loader '''
#######################################################################################
print('\n Loading real data...')
hf = h5py.File(os.path.join(args.data_path, 'UTKFace_64x64_prop_0.8.h5'), 'r')
images_train = hf['images_train'][:]
labels_train = hf['labels_train'][:]
images_test = hf['images_test'][:]
labels_test = hf['labels_test'][:]
hf.close()
## for each age, take no more than args.max_num_img_per_label images
image_num_threshold = args.max_num_img_per_label
print("\n Original training set has {} images; For each age, take no more than {} images>>>".format(len(images_train), image_num_threshold))
unique_labels_tmp = np.sort(np.array(list(set(labels_train))))
sel_indx = []
for i in tqdm(range(len(unique_labels_tmp))):
indx_i = np.where(labels_train == unique_labels_tmp[i])[0]
if len(indx_i)>image_num_threshold:
np.random.shuffle(indx_i)
indx_i = indx_i[0:image_num_threshold]
sel_indx.append(indx_i)
sel_indx = np.concatenate(sel_indx, axis=0)
images_train = images_train[sel_indx]
labels_train = labels_train[sel_indx]
print("\r {} training images left.".format(len(images_train)))
# unique normalized training labels
## normalize training labels to [0,1]
labels_train_norm = fn_norm_labels(labels_train)
unique_labels_train_norm = np.sort(np.array(list(set(labels_train_norm))))
labels_test_norm = fn_norm_labels(labels_test)
# counts_unique_labels = [] #distribution of training images
# for i in range(len(unique_labels_train_norm)):
# indx_i = np.where(labels_train_norm == unique_labels_train_norm[i])[0]
# counts_unique_labels.append(len(indx_i))
# print(counts_unique_labels)
# ### visualize real data distribution
# unique_labels_unnorm = np.arange(1,int(args.max_label)+1)
# frequencies = []
# for i in range(len(unique_labels_unnorm)):
# indx_i = np.where(labels_train==unique_labels_unnorm[i])[0]
# frequencies.append(len(indx_i))
# frequencies = np.array(frequencies).astype(int)
# width = 0.8
# x = np.arange(1,int(args.max_label)+1)
# # plot data in grouped manner of bar type
# fig, ax = plt.subplots(1,1, figsize=(6,4))
# ax.grid(color='lightgrey', linestyle='--', zorder=0)
# ax.bar(x, frequencies, width, align='center', color='tab:green', zorder=3)
# ax.set_xlabel("Age")
# ax.set_ylabel("Frequency")
# plt.tight_layout()
# plt.savefig(os.path.join(fake_data_folder, "utkface_real_images_data_dist.pdf"))
# plt.close()
## set sigma and kappa/nu in CcGAN
if args.gan_kernel_sigma<0:
std_label = np.std(labels_train_norm)
args.gan_kernel_sigma = 1.06*std_label*(len(labels_train_norm))**(-1/5)
print("\n Use rule-of-thumb formula to compute kernel_sigma >>>")
print("\n The std of {} normalied training labels is {} so the kernel sigma is {}".format(len(labels_train_norm), std_label, args.gan_kernel_sigma))
##end if
if args.gan_kappa<0:
n_unique = len(unique_labels_train_norm)
diff_list = []
for i in range(1,n_unique):
diff_list.append(unique_labels_train_norm[i] - unique_labels_train_norm[i-1])
kappa_base = np.abs(args.gan_kappa)*np.max(np.array(diff_list))
if args.gan_threshold_type=="hard":
args.gan_kappa = kappa_base
else:
args.gan_kappa = 1/kappa_base**2
## end if
if args.dre_kappa<0:
n_unique = len(unique_labels_train_norm)
diff_list = []
for i in range(1,n_unique):
diff_list.append(unique_labels_train_norm[i] - unique_labels_train_norm[i-1])
kappa_base = np.abs(args.dre_kappa)*np.max(np.array(diff_list))
args.dre_kappa = kappa_base
assert args.dre_kappa>=0
#######################################################################################
''' Pre-trained CNN for label embedding '''
#######################################################################################
net_embed_x2y_filename_ckpt = save_models_folder + '/ckpt_embed_{}_epoch_{}_seed_{}.pth'.format(args.gan_embed_x2y_net_name, args.gan_embed_x2y_epoch, args.seed)
print(net_embed_x2y_filename_ckpt)
net_embed_y2h_filename_ckpt = save_models_folder + '/ckpt_embed_y2h_epoch_{}_seed_{}.pth'.format(args.gan_embed_y2h_epoch, args.seed)
print(net_embed_y2h_filename_ckpt)
testset_embed_x2y = IMGs_dataset(images_test, labels_test_norm, normalize=True)
testloader_embed_x2y = torch.utils.data.DataLoader(testset_embed_x2y, batch_size=100, shuffle=False, num_workers=args.num_workers)
if args.gan_embed_x2y_net_name == "ResNet18":
net_embed_x2y = ResNet18_embed(dim_embed=args.gan_dim_embed)
elif args.gan_embed_x2y_net_name == "ResNet34":
net_embed_x2y = ResNet34_embed(dim_embed=args.gan_dim_embed)
elif args.gan_embed_x2y_net_name == "ResNet50":
net_embed_x2y = ResNet50_embed(dim_embed=args.gan_dim_embed)
else:
raise Exception("Wrong embedding net name!")
net_embed_x2y = net_embed_x2y.cuda()
net_embed_x2y = nn.DataParallel(net_embed_x2y)
net_embed_y2h = model_y2h(dim_embed=args.gan_dim_embed)
net_embed_y2h = net_embed_y2h.cuda()
net_embed_y2h = nn.DataParallel(net_embed_y2h)
## (1). Train net_embed first: x2h+h2y
if not os.path.isfile(net_embed_x2y_filename_ckpt):
print("\n Start training CNN for label embedding >>>")
# lr decay epochs
net_embed_x2y_lr_decay_epochs = (args.gan_embed_x2y_lr_decay_epochs).split("_")
net_embed_x2y_lr_decay_epochs = [int(epoch) for epoch in net_embed_x2y_lr_decay_epochs]
# ckpts in training
ckpts_in_train_net_embed_x2y = os.path.join(save_models_folder, 'ckpts_in_train_embed_x2y_{}'.format(args.gan_embed_x2y_net_name))
os.makedirs(ckpts_in_train_net_embed_x2y, exist_ok=True)
net_test_mae_file_fullpath = os.path.join(ckpts_in_train_net_embed_x2y, 'test_mae_embed_{}_epoch_{}_seed_{}.png'.format(args.gan_embed_x2y_net_name, args.gan_embed_x2y_epoch, args.seed))
# training function
net_embed_x2y, train_mse_all, test_mae_all = train_net_embed(net=net_embed_x2y, train_images=images_train, train_labels=labels_train_norm, test_loader=testloader_embed_x2y, epochs=args.gan_embed_x2y_epoch, resume_epoch=args.gan_embed_x2y_resume_epoch, save_freq=40, batch_size=args.gan_embed_x2y_batch_size, lr_base=args.gan_embed_x2y_lr_base, lr_decay_factor=args.gan_embed_x2y_lr_decay_factor, lr_decay_epochs=net_embed_x2y_lr_decay_epochs, weight_decay=1e-4, path_to_ckpt = ckpts_in_train_net_embed_x2y, fn_denorm_labels=fn_denorm_labels)
PlotLoss(loss=test_mae_all, filename=net_test_mae_file_fullpath)
# save model
torch.save({
'net_state_dict': net_embed_x2y.state_dict(),
}, net_embed_x2y_filename_ckpt)
else:
print("\n net_embed ckpt already exists")
print("\n Loading...")
checkpoint = torch.load(net_embed_x2y_filename_ckpt)
net_embed_x2y.load_state_dict(checkpoint['net_state_dict'])
#end not os.path.isfile
## (2). Train y2h
#train a net which maps a label back to the embedding space
if not os.path.isfile(net_embed_y2h_filename_ckpt):
print("\n Start training net_embed_y2h >>>")
# lr decay epochs
net_embed_y2h_lr_decay_epochs = (args.gan_embed_y2h_lr_decay_epochs).split("_")
net_embed_y2h_lr_decay_epochs = [int(epoch) for epoch in net_embed_y2h_lr_decay_epochs]
# training function
net_embed_y2h = train_net_y2h(unique_train_labels_norm=unique_labels_train_norm, net_y2h=net_embed_y2h, net_embed=net_embed_x2y, epochs=args.gan_embed_y2h_epoch, lr_base=args.gan_embed_y2h_lr_base, lr_decay_factor=args.gan_embed_y2h_lr_decay_factor, lr_decay_epochs=net_embed_y2h_lr_decay_epochs, weight_decay=1e-4, batch_size=args.gan_embed_y2h_batch_size)
# save model
torch.save({
'net_state_dict': net_embed_y2h.state_dict(),
}, net_embed_y2h_filename_ckpt)
else:
print("\n net_embed_y2h ckpt already exists")
print("\n Loading...")
checkpoint = torch.load(net_embed_y2h_filename_ckpt)
net_embed_y2h.load_state_dict(checkpoint['net_state_dict'])
#end not os.path.isfile
##some simple test after the embedding nets training
indx_tmp = np.arange(len(unique_labels_train_norm))
np.random.shuffle(indx_tmp)
indx_tmp = indx_tmp[:10]
labels_tmp = unique_labels_train_norm[indx_tmp].reshape(-1,1)
labels_tmp = torch.from_numpy(labels_tmp).type(torch.float).cuda()
epsilons_tmp = np.random.normal(0, 0.2, len(labels_tmp))
epsilons_tmp = torch.from_numpy(epsilons_tmp).view(-1,1).type(torch.float).cuda()
labels_tmp = torch.clamp(labels_tmp+epsilons_tmp, 0.0, 1.0)
net_embed_x2y.eval()
net_embed_h2y = net_embed_x2y.module.h2y
net_embed_y2h.eval()
with torch.no_grad():
labels_rec_tmp = net_embed_h2y(net_embed_y2h(labels_tmp)).cpu().numpy().reshape(-1,1)
results = np.concatenate((labels_tmp.cpu().numpy(), labels_rec_tmp), axis=1)
print("\n labels vs reconstructed labels")
print(results)
#######################################################################################
''' GAN training '''
#######################################################################################
print("\n Start CcGAN training: {}, Sigma is {:.3f}, Kappa is {:.3f}".format(args.gan_threshold_type, args.gan_kernel_sigma, args.gan_kappa))
path_to_ckpt_ccgan = os.path.join(save_models_folder, 'ckpt_{}_loss_{}_niters_{}_seed_{}_{}_sigma{:.3f}_kappa{:.3f}.pth'.format(args.gan_arch, args.gan_loss_type, args.gan_niters, args.seed, args.gan_threshold_type, args.gan_kernel_sigma, args.gan_kappa))
print(path_to_ckpt_ccgan)
start = timeit.default_timer()
if not os.path.isfile(path_to_ckpt_ccgan):
## images generated during training
images_in_train_ccgan = os.path.join(save_images_folder, 'images_in_train_{}'.format(args.gan_arch))
os.makedirs(images_in_train_ccgan, exist_ok=True)
# ckpts in training
ckpts_in_train_ccgan = os.path.join(save_models_folder, 'ckpts_in_train_{}'.format(args.gan_arch))
os.makedirs(ckpts_in_train_ccgan, exist_ok=True)
# init models
if args.gan_arch == 'SNGAN':
netG = SNGAN_Generator(z_dim=args.gan_dim_g, gene_ch=args.gan_gene_ch, dim_embed=args.gan_dim_embed).cuda()
netD = SNGAN_Discriminator(disc_ch=args.gan_disc_ch, dim_embed=args.gan_dim_embed).cuda()
elif args.gan_arch == 'SAGAN':
netG = SAGAN_Generator(z_dim=args.gan_dim_g, gene_ch=args.gan_gene_ch, dim_embed=args.gan_dim_embed).cuda()
netD = SAGAN_Discriminator(disc_ch=args.gan_disc_ch, dim_embed=args.gan_dim_embed).cuda()
else:
raise Exception('Wrong CcGAN name!')
netG = nn.DataParallel(netG)
netD = nn.DataParallel(netD)
# training function
netG, netD = train_ccgan(kernel_sigma=args.gan_kernel_sigma, kappa=args.gan_kappa, train_images=images_train, train_labels=labels_train_norm, netG=netG, netD=netD, net_y2h = net_embed_y2h, save_images_folder = images_in_train_ccgan, path_to_ckpt = ckpts_in_train_ccgan, clip_label=False)
# store model
torch.save({
'netG_state_dict': netG.state_dict(),
'netD_state_dict': netD.state_dict(),
}, path_to_ckpt_ccgan)
else:
print("Loading pre-trained generator >>>")
checkpoint = torch.load(path_to_ckpt_ccgan)
if args.gan_arch == 'SNGAN':
netG = SNGAN_Generator(z_dim=args.gan_dim_g, gene_ch=args.gan_gene_ch, dim_embed=args.gan_dim_embed).cuda()
elif args.gan_arch == 'SAGAN':
netG = SAGAN_Generator(z_dim=args.gan_dim_g, gene_ch=args.gan_gene_ch, dim_embed=args.gan_dim_embed).cuda()
else:
raise Exception('Wrong CcGAN name!')
netG = nn.DataParallel(netG)
netG.load_state_dict(checkpoint['netG_state_dict'])
## end if
stop = timeit.default_timer()
print("CcGAN training finished; Time elapses: {}s".format(stop - start))
def fn_sampleGAN_given_label(nfake, given_label, netG=netG, net_y2h=net_embed_y2h, batch_size = 100, to_numpy=True, denorm=True, verbose=False):
''' label: normalized label in [0,1] '''
''' output labels are still normalized '''
assert 0<=given_label<=1.0
netG = netG.cuda()
net_y2h = net_y2h.cuda()
netG.eval()
net_y2h.eval()
if batch_size>nfake:
batch_size = nfake
fake_images = []
with torch.no_grad():
if verbose:
pb = SimpleProgressBar()
n_img_got = 0
while n_img_got < nfake:
y = np.ones(batch_size) * given_label
y = torch.from_numpy(y).type(torch.float).view(-1,1).cuda()
z = torch.randn(batch_size, args.gan_dim_g, dtype=torch.float).cuda()
batch_fake_images = netG(z, net_y2h(y))
if denorm:
batch_fake_images = (batch_fake_images*0.5+0.5)*255.0
batch_fake_images = batch_fake_images.type(torch.uint8)
fake_images.append(batch_fake_images.cpu())
n_img_got += len(batch_fake_images)
if verbose:
pb.update(min(float(n_img_got)/nfake, 1)*100)
fake_images = torch.cat(fake_images, dim=0)
fake_labels = torch.ones(nfake) * given_label #use assigned label
if to_numpy:
fake_images = fake_images.numpy()
fake_labels = fake_labels.numpy()
netG = netG.cpu()
net_y2h = net_y2h.cpu()
return fake_images[0:nfake], fake_labels[0:nfake]
#######################################################################################
''' cDRE training '''
#######################################################################################
if args.subsampling:
##############################################
''' Pre-trained CNN for feature extraction '''
print("\n -----------------------------------------------------------------------------------------")
print("\n Pre-trained CNN for feature extraction")
# filename
filename_presae_ckpt = save_models_folder + '/ckpt_PreSAEForDRE_epoch_{}_sparsity_{:.3f}_regre_{:.3f}_seed_{}.pth'.format(args.dre_presae_epochs, args.dre_presae_lambda_sparsity, args.dre_presae_lambda_regression, args.seed)
print('\n ' + filename_presae_ckpt)
# training
if not os.path.isfile(filename_presae_ckpt):
save_sae_images_InTrain_folder = save_images_folder + '/PreSAEForDRE_reconstImages_sparsity_{:.3f}_regre_{:.3f}_InTrain_{}'.format(args.dre_presae_lambda_sparsity, args.dre_presae_lambda_regression, args.seed)
os.makedirs(save_sae_images_InTrain_folder, exist_ok=True)
# initialize net
dre_presae_encoder_net = encoder_extract(ch=args.dre_presae_ch, dim_bottleneck=args.img_size*args.img_size*args.num_channels)
dre_presae_decoder_net = decoder_extract(ch=args.dre_presae_ch, dim_bottleneck=args.img_size*args.img_size*args.num_channels)
dre_presae_predict_net = decoder_predict(dim_bottleneck=args.img_size*args.img_size*args.num_channels)
dre_presae_encoder_net = nn.DataParallel(dre_presae_encoder_net)
dre_presae_decoder_net = nn.DataParallel(dre_presae_decoder_net)
dre_presae_predict_net = nn.DataParallel(dre_presae_predict_net)
count_parameters(dre_presae_encoder_net)
count_parameters(dre_presae_decoder_net)
count_parameters(dre_presae_predict_net)
print("\n Start training sparseAE model for feature extraction in the DRE >>>")
dre_presae_encoder_net, dre_presae_decoder_net, dre_presae_predict_net = train_sparseAE(unique_labels=unique_labels_train_norm, train_images=images_train, train_labels=labels_train_norm, net_encoder=dre_presae_encoder_net, net_decoder=dre_presae_decoder_net, net_predict=dre_presae_predict_net, save_sae_images_folder=save_sae_images_InTrain_folder, path_to_ckpt=save_models_folder)
# store model
torch.save({
'encoder_net_state_dict': dre_presae_encoder_net.state_dict(),
'predict_net_state_dict': dre_presae_predict_net.state_dict(),
# 'decoder_net_state_dict': dre_presae_decoder_net.state_dict(),
}, filename_presae_ckpt)
print("\n End training CNN.")
else:
print("\n Loading pre-trained sparseAE for feature extraction in DRE.")
dre_presae_encoder_net = encoder_extract(ch=args.dre_presae_ch, dim_bottleneck=args.img_size*args.img_size*args.num_channels)
dre_presae_predict_net = decoder_predict(dim_bottleneck=args.img_size*args.img_size*args.num_channels)
dre_presae_encoder_net = nn.DataParallel(dre_presae_encoder_net)
dre_presae_predict_net = nn.DataParallel(dre_presae_predict_net)
checkpoint = torch.load(filename_presae_ckpt)
dre_presae_encoder_net.load_state_dict(checkpoint['encoder_net_state_dict'])
dre_presae_predict_net.load_state_dict(checkpoint['predict_net_state_dict'])
#end if
##############################################
''' cDRE Training '''
print("\n -----------------------------------------------------------------------------------------")
print("\n cDRE training")
### dr model filename
drefile_fullpath = save_models_folder + "/ckpt_cDR-RS_presae_epochs_{}_sparsity_{:.3f}_regre_{:.3f}_DR_{}_lambda_{:.3f}_kappa_{:.3f}_epochs_{}_seed_{}.pth".format(args.dre_presae_epochs, args.dre_presae_lambda_sparsity, args.dre_presae_lambda_regression, args.dre_net, args.dre_lambda, args.dre_kappa, args.dre_epochs, args.seed)
print('\n' + drefile_fullpath)
path_to_ckpt_in_train = save_models_folder + '/ckpt_cDR-RS_presae_epochs_{}_sparsity_{:.3f}_regre_{:.3f}_DR_{}_lambda_{:.3f}_kappa_{:.3f}_seed_{}'.format(args.dre_presae_epochs, args.dre_presae_lambda_sparsity, args.dre_presae_lambda_regression, args.dre_net, args.dre_lambda, args.dre_kappa, args.seed)
os.makedirs(path_to_ckpt_in_train, exist_ok=True)
dre_loss_file_fullpath = save_models_folder + '/train_loss_cDR-RS_presae_epochs_{}_sparsity_{:.3f}_regre_{:.3f}_DR_{}_epochs_{}_lambda_{:.3f}_kappa_{:.3f}_seed_{}.png'.format(args.dre_presae_epochs, args.dre_presae_lambda_sparsity, args.dre_presae_lambda_regression, args.dre_net, args.dre_epochs, args.dre_lambda, args.dre_kappa, args.seed)
### dre training
if args.dre_net in ["MLP3","MLP5"]:
dre_net = cDR_MLP(args.dre_net, p_dropout=0.5, init_in_dim = args.num_channels*args.img_size*args.img_size, dim_embed = args.gan_dim_embed)
else:
raise Exception('Wrong DR name!')
num_parameters_DR = count_parameters(dre_net)
dre_net = nn.DataParallel(dre_net)
#if DR model exists, then load the pretrained model; otherwise, start training the model.
if not os.path.isfile(drefile_fullpath):
print("\n Begin Training conditional DR in Feature Space: >>>")
dre_net, avg_train_loss = train_cdre(kappa=args.dre_kappa, unique_labels=unique_labels_train_norm, train_images=images_train, train_labels=labels_train_norm, dre_net=dre_net, dre_precnn_net=dre_presae_encoder_net, netG=netG, net_y2h=net_embed_y2h, path_to_ckpt=path_to_ckpt_in_train)
# save model
torch.save({
'net_state_dict': dre_net.state_dict(),
}, drefile_fullpath)
PlotLoss(avg_train_loss, dre_loss_file_fullpath)
else:
# if already trained, load pre-trained DR model
checkpoint_dre_net = torch.load(drefile_fullpath)
dre_net.load_state_dict(checkpoint_dre_net['net_state_dict'])
##end if not
# Compute density ratio: function for computing a bunch of images in a numpy array
def comp_cond_density_ratio(imgs, labels, dre_precnn_net=dre_presae_encoder_net, dre_net=dre_net, net_y2h=net_embed_y2h, batch_size=args.samp_batch_size):
assert imgs.max()>1
assert labels.min()>=0 and labels.max()<=1.0
dre_precnn_net = dre_precnn_net.cuda()
dre_net = dre_net.cuda()
net_y2h = net_y2h.cuda()
dre_precnn_net.eval()
dre_net.eval()
net_y2h.eval()
#imgs: a torch tensor
n_imgs = len(imgs)
if batch_size>n_imgs:
batch_size = n_imgs
assert imgs.max().item()>1.0 ##make sure all images are not normalized
assert labels.max()<=1.0 and labels.min()>=0 ##make sure all labels are normalized to [0,1]
##make sure the last iteration has enough samples
imgs = torch.cat((imgs, imgs[0:batch_size]), dim=0)
labels = torch.cat((labels, labels[0:batch_size]), dim=0)
density_ratios = []
# print("\n Begin computing density ratio for images >>")
with torch.no_grad():
n_imgs_got = 0
while n_imgs_got < n_imgs:
batch_images = imgs[n_imgs_got:(n_imgs_got+batch_size)]
batch_images = (batch_images/255.0-0.5)/0.5 ## normalize
batch_labels = labels[n_imgs_got:(n_imgs_got+batch_size)]
batch_images = batch_images.type(torch.float).cuda()
batch_labels = batch_labels.type(torch.float).view(-1,1).cuda()
batch_labels = net_y2h(batch_labels)
batch_features = dre_precnn_net(batch_images)
batch_ratios = dre_net(batch_features, batch_labels)
density_ratios.append(batch_ratios.cpu().detach())
n_imgs_got += batch_size
### while n_imgs_got
density_ratios = torch.cat(density_ratios)
density_ratios = density_ratios[0:n_imgs].numpy()
return density_ratios
# Enhanced sampler based on the trained DR model
# Rejection Sampling:"Discriminator Rejection Sampling"; based on https://github.com/shinseung428/DRS_Tensorflow/blob/master/config.py
def fn_enhancedSampler_given_label(nfake, given_label, batch_size=args.samp_batch_size, verbose=True):
''' given_label is normalized '''
assert 0<=given_label<=1.0
## Burn-in Stage
n_burnin = args.samp_burnin_size
burnin_imgs, burnin_labels = fn_sampleGAN_given_label(n_burnin, given_label, batch_size = batch_size, to_numpy=False, denorm=True)
burnin_densityratios = comp_cond_density_ratio(burnin_imgs, burnin_labels)
# print((burnin_densityratios.min(),np.median(burnin_densityratios),burnin_densityratios.max()))
M_bar = np.max(burnin_densityratios)
del burnin_imgs, burnin_densityratios; gc.collect()
## Rejection sampling
enhanced_imgs = []
if verbose:
pb = SimpleProgressBar()
# pbar = tqdm(total=nfake)
num_imgs = 0
while num_imgs < nfake:
batch_imgs, batch_labels = fn_sampleGAN_given_label(batch_size, given_label, batch_size = batch_size, to_numpy=False, denorm=True)
batch_ratios = comp_cond_density_ratio(batch_imgs, batch_labels)
batch_imgs = batch_imgs.numpy() #convert to numpy array
M_bar = np.max([M_bar, np.max(batch_ratios)])
#threshold
batch_p = batch_ratios/M_bar
batch_psi = np.random.uniform(size=batch_size).reshape(-1,1)
indx_accept = np.where(batch_psi<=batch_p)[0]
if len(indx_accept)>0:
enhanced_imgs.append(batch_imgs[indx_accept])
num_imgs+=len(indx_accept)
del batch_imgs, batch_ratios; gc.collect()
if verbose:
pb.update(np.min([float(num_imgs)*100/nfake,100]))
# pbar.update(len(indx_accept))
# pbar.close()
enhanced_imgs = np.concatenate(enhanced_imgs, axis=0)
enhanced_imgs = enhanced_imgs[0:nfake]
return enhanced_imgs, given_label*np.ones(nfake)
#######################################################################################
''' Sampling '''
#######################################################################################
#--------------------------------------------------------------------------------------
''' Synthetic Data Generation '''
print('\n Start sampling ...')
fake_data_h5file_fullpath = os.path.join(fake_data_folder, args.unfiltered_fake_dataset_filename)
if os.path.isfile(fake_data_h5file_fullpath) and (args.filter or args.adjust):
print("\n Loading exiting unfiltered fake data >>>")
hf = h5py.File(fake_data_h5file_fullpath, 'r')
fake_images = hf['fake_images'][:]
fake_labels = hf['fake_labels'][:] #unnormalized
hf.close()
else:
if args.samp_num_fake_labels>0:
target_labels_norm = np.linspace(0.0, 1.0, args.samp_num_fake_labels)
else:
target_labels_norm = np.sort(np.array(list(set(labels_train))))
target_labels_norm = fn_norm_labels(target_labels_norm)
assert target_labels_norm.min()>=0 and target_labels_norm.max()<=1
if args.subsampling:
print("\n Generating {} fake images for each of {} distinct labels with subsampling: {}.".format(args.samp_nfake_per_label, len(target_labels_norm), subsampling_method))
fake_images = []
fake_labels = []
for i in trange(len(target_labels_norm)):
fake_images_i, fake_labels_i = fn_enhancedSampler_given_label(args.samp_nfake_per_label, target_labels_norm[i], batch_size=args.samp_batch_size, verbose=False)
### denormalize labels
fake_labels_i = fn_denorm_labels(fake_labels_i)
### append
fake_images.append(fake_images_i)
fake_labels.append(fake_labels_i)
##end for i
else:
print("\n Generating {} fake images for each of {} distinct labels without subsampling.".format(args.samp_nfake_per_label, len(target_labels_norm)))
fake_images = []
fake_labels = []
for i in trange(len(target_labels_norm)):
fake_images_i, fake_labels_i = fn_sampleGAN_given_label(nfake=args.samp_nfake_per_label, given_label=target_labels_norm[i], batch_size=args.samp_batch_size, verbose=False)
### denormalize labels
fake_labels_i = fn_denorm_labels(fake_labels_i)
### append
fake_images.append(fake_images_i)
fake_labels.append(fake_labels_i)
##end for i
fake_images = np.concatenate(fake_images, axis=0)
fake_labels = np.concatenate(fake_labels, axis=0)
assert len(fake_images) == args.samp_nfake_per_label*len(target_labels_norm)
assert len(fake_labels) == args.samp_nfake_per_label*len(target_labels_norm)
assert fake_images.max()>1
assert fake_labels.max()>1
##end if
##end if os
#--------------------------------------------------------------------------------------
''' Filtered and Adjusted by a pre-trained CNN '''
if args.filter or args.adjust:
print("\n -----------------------------------------------------------------------------------------")
print("\n Start Filtering Synthetic Data >>>")
## dataset
assert fake_labels.max()>1
dataset_filtering = IMGs_dataset(fake_images, fake_labels, normalize=True)
dataloader_filtering = torch.utils.data.DataLoader(dataset_filtering, batch_size=args.samp_filter_batch_size, shuffle=False, num_workers=args.num_workers)
## load pre-trained cnn
filter_precnn_net = cnn_dict[args.samp_filter_precnn_net]().cuda()
checkpoint = torch.load(args.samp_filter_precnn_net_ckpt_path)
filter_precnn_net.load_state_dict(checkpoint['net_state_dict'])
## evaluate on fake data
fake_mae_loss = []
fake_labels_pred = []
filter_precnn_net.eval()
pbar = tqdm(total=len(fake_images))
with torch.no_grad():
total = 0
loss_all = 0
for batch_idx, (images, labels) in enumerate(dataloader_filtering):
images = images.type(torch.float).cuda()
labels = labels.type(torch.float) #unnormalized label
labels_pred = filter_precnn_net(images)
labels_pred = torch.clip(labels_pred, 0, 1)
labels_pred = fn_denorm_labels(labels_pred.cpu()) #denormalize
labels = labels.view(-1)
labels_pred = labels_pred.view(-1)
loss = torch.abs(labels_pred-labels)
loss_all += loss.sum().item()
total += labels.size(0)
fake_labels_pred.append(labels_pred.numpy())
fake_mae_loss.append(loss.numpy())
pbar.update(len(images))
print('\n Test MAE of {} on the {} fake images: {}.'.format(args.samp_filter_precnn_net, len(fake_images), loss_all / total))
fake_mae_loss = np.concatenate(fake_mae_loss, axis=0)
fake_labels_pred = np.concatenate(fake_labels_pred, axis=0)
mae_cutoff_point = np.quantile(fake_mae_loss, q=args.samp_filter_mae_percentile_threshold)
indx_sel = np.where(fake_mae_loss<mae_cutoff_point)[0]
fake_images = fake_images[indx_sel]
if args.adjust:
fake_labels = fake_labels_pred[indx_sel] #adjust the labels of fake data by using the pre-trained CNN
else:
fake_labels = fake_labels[indx_sel]
## histogram of MAEs
fig = plt.figure()
ax = plt.subplot(111)
n, bins, patches = plt.hist(fake_mae_loss, 100, density=False, facecolor='g', alpha=0.75)
plt.axvline(x=mae_cutoff_point, c='grey')
plt.xlabel('MAE')
plt.ylabel('Frequency')
plt.title('Histogram of MAE')
plt.grid(True)
#plt.show()
plt.savefig(os.path.join(fake_data_folder, 'histogram_of_fake_data_MAE_with_subsampling_{}_MAEFilter_{}.png'.format(subsampling_method, args.samp_filter_mae_percentile_threshold)))
#--------------------------------------------------------------------------------------
''' Dump synthetic data to h5 file '''
dump_fake_images_filename = os.path.join(fake_data_folder, 'utkface_fake_images_{}_{}_NFakePerLabel_{}_seed_{}.h5'.format(args.gan_arch, subsampling_method, args.samp_nfake_per_label, args.seed))
print(dump_fake_images_filename)
if not os.path.isfile(dump_fake_images_filename):
with h5py.File(dump_fake_images_filename, "w") as f:
f.create_dataset('fake_images', data = fake_images, dtype='uint8', compression="gzip", compression_opts=6)
f.create_dataset('fake_labels', data = fake_labels, dtype='int')
else:
print('\n Start loading generated fake data...')
with h5py.File(dump_fake_images_filename, "r") as f:
fake_images = f['fake_images'][:]
fake_labels = f['fake_labels'][:]
print("\n The dim of the fake dataset: ", fake_images.shape)
print("\n The range of generated fake dataset: MIN={}, MAX={}.".format(fake_labels.min(), fake_labels.max()))
### visualize data distribution
unique_labels_unnorm = np.arange(1,int(args.max_label)+1)
frequencies = []
for i in range(len(unique_labels_unnorm)):
indx_i = np.where(fake_labels==unique_labels_unnorm[i])[0]
frequencies.append(len(indx_i))
frequencies = np.array(frequencies).astype(int)
width = 0.8
x = np.arange(1,int(args.max_label)+1)
# plot data in grouped manner of bar type
fig, ax = plt.subplots(1,1, figsize=(6,4))
ax.grid(color='lightgrey', linestyle='--', zorder=0)
ax.bar(x, frequencies, width, align='center', color='tab:green', zorder=3)
ax.set_xlabel("Age")
ax.set_ylabel("Frequency")
plt.tight_layout()
plt.savefig(os.path.join(fake_data_folder, "utkface_fake_images_{}_{}_NFakePerLabel_{}_data_dist.pdf".format(args.gan_arch, subsampling_method, args.samp_nfake_per_label)))
plt.close()
print('\n Frequence of ages: MIN={}, MEAN={}, MAX={}.'.format(np.min(frequencies),np.mean(frequencies),np.max(frequencies)))
print("\n===================================================================================================")
| 34,527 | 45.659459 | 545 | py |
cGAN-KD | cGAN-KD-main/UTKFace/utils.py | """
Some helpful functions
"""
import numpy as np
import torch
import torch.nn as nn
import torchvision
import matplotlib.pyplot as plt
import matplotlib as mpl
from torch.nn import functional as F
import sys
import PIL
from PIL import Image
# ### import my stuffs ###
# from models import *
# ################################################################################
# Progress Bar
class SimpleProgressBar():
def __init__(self, width=50):
self.last_x = -1
self.width = width
def update(self, x):
assert 0 <= x <= 100 # `x`: progress in percent ( between 0 and 100)
if self.last_x == int(x): return
self.last_x = int(x)
pointer = int(self.width * (x / 100.0))
sys.stdout.write( '\r%d%% [%s]' % (int(x), '#' * pointer + '.' * (self.width - pointer)))
sys.stdout.flush()
if x == 100:
print('')
################################################################################
# torch dataset from numpy array
class IMGs_dataset(torch.utils.data.Dataset):
def __init__(self, images, labels=None, normalize=False):
super(IMGs_dataset, self).__init__()
self.images = images
self.n_images = len(self.images)
self.labels = labels
if labels is not None:
if len(self.images) != len(self.labels):
raise Exception('images (' + str(len(self.images)) +') and labels ('+str(len(self.labels))+') do not have the same length!!!')
self.normalize = normalize
def __getitem__(self, index):
image = self.images[index]
if self.normalize:
image = image/255.0
image = (image-0.5)/0.5
if self.labels is not None:
label = self.labels[index]
return (image, label)
else:
return image
def __len__(self):
return self.n_images
class IMGs_dataset_v2(torch.utils.data.Dataset):
def __init__(self, images, labels, labels_gt, normalize=False):
super(IMGs_dataset_v2, self).__init__()
assert len(images) == len(labels)
assert len(labels) == len(labels_gt)
self.images = images
self.n_images = len(self.images)
self.labels = labels
self.labels_gt = labels_gt
self.normalize = normalize
def __getitem__(self, index):
image = self.images[index]
label = self.labels[index]
label_gt = self.labels_gt[index]
if self.normalize:
image = image/255.0
image = (image-0.5)/0.5
return (image, label, label_gt)
def __len__(self):
return self.n_images
################################################################################
def PlotLoss(loss, filename):
x_axis = np.arange(start = 1, stop = len(loss)+1)
plt.switch_backend('agg')
mpl.style.use('seaborn')
fig = plt.figure()
ax = plt.subplot(111)
ax.plot(x_axis, np.array(loss))
plt.xlabel("epoch")
plt.ylabel("training loss")
plt.legend()
#ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), shadow=True, ncol=3)
#plt.title('Training Loss')
plt.savefig(filename)
################################################################################
# compute entropy of class labels; labels is a numpy array
def compute_entropy(labels, base=None):
value,counts = np.unique(labels, return_counts=True)
norm_counts = counts / counts.sum()
base = np.e if base is None else base
return -(norm_counts * np.log(norm_counts)/np.log(base)).sum()
def predict_class_labels(net, images, batch_size=500, verbose=False, num_workers=0):
net = net.cuda()
net.eval()
n = len(images)
if batch_size>n:
batch_size=n
dataset_pred = IMGs_dataset(images, normalize=False)
dataloader_pred = torch.utils.data.DataLoader(dataset_pred, batch_size=batch_size, shuffle=False, num_workers=num_workers)
class_labels_pred = np.zeros(n+batch_size)
with torch.no_grad():
nimgs_got = 0
if verbose:
pb = SimpleProgressBar()
for batch_idx, batch_images in enumerate(dataloader_pred):
batch_images = batch_images.type(torch.float).cuda()
batch_size_curr = len(batch_images)
outputs,_ = net(batch_images)
_, batch_class_labels_pred = torch.max(outputs.data, 1)
class_labels_pred[nimgs_got:(nimgs_got+batch_size_curr)] = batch_class_labels_pred.detach().cpu().numpy().reshape(-1)
nimgs_got += batch_size_curr
if verbose:
pb.update((float(nimgs_got)/n)*100)
#end for batch_idx
class_labels_pred = class_labels_pred[0:n]
return class_labels_pred
################################################################################
# Convenience function to count the number of parameters in a module
def count_parameters(module, verbose=True):
num_parameters = sum([p.data.nelement() for p in module.parameters()])
if verbose:
print('Number of parameters: {}'.format(num_parameters))
return num_parameters
| 5,139 | 29.595238 | 143 | py |
cGAN-KD | cGAN-KD-main/UTKFace/train_cdre.py | '''
Functions for Training Class-conditional Density-ratio model
'''
import torch
import torch.nn as nn
import numpy as np
import os
import timeit
import gc
from utils import *
from opts import gen_synth_data_opts
''' Settings '''
args = gen_synth_data_opts()
# some parameters in the opts
dim_gan = args.gan_dim_g
dre_net_name = args.dre_net
dre_epochs = args.dre_epochs
dre_lr_base = args.dre_lr_base
dre_lr_decay_factor = args.dre_lr_decay_factor
dre_lr_decay_epochs = (args.dre_lr_decay_epochs).split("_")
dre_lr_decay_epochs = [int(epoch) for epoch in dre_lr_decay_epochs]
dre_lambda = args.dre_lambda
dre_resume_epoch = args.dre_resume_epoch
dre_batch_size = args.dre_batch_size
dre_optimizer = args.dre_optimizer
dre_save_freq = args.dre_save_freq
## horizontal flip images
def hflip_images(batch_images):
''' for numpy arrays '''
uniform_threshold = np.random.uniform(0,1,len(batch_images))
indx_gt = np.where(uniform_threshold>0.5)[0]
batch_images[indx_gt] = np.flip(batch_images[indx_gt], axis=3)
return batch_images
# def hflip_images(batch_images):
# ''' for torch tensors '''
# uniform_threshold = np.random.uniform(0,1,len(batch_images))
# indx_gt = np.where(uniform_threshold>0.5)[0]
# batch_images[indx_gt] = torch.flip(batch_images[indx_gt], dims=[3])
# return batch_images
## normalize images
def normalize_images(batch_images):
batch_images = batch_images/255.0
batch_images = (batch_images - 0.5)/0.5
return batch_images
# training function
def train_cdre(kappa, unique_labels, train_images, train_labels, dre_net, dre_precnn_net, netG, net_y2h, net_filter=None, reg_niters=10, path_to_ckpt=None):
##data; train_images are unnormalized, train_labels are normalized
## unique_labels: normalized unique labels
assert train_images.max()>1.0 and train_images.max()<=255.0
assert train_labels.max()<=1.0 and train_labels.min()>=0
assert 0<=kappa<=1
indx_all = np.arange(len(train_labels))
''' learning rate decay '''
def adjust_learning_rate(optimizer, epoch):
lr = dre_lr_base
num_decays = len(dre_lr_decay_epochs)
for decay_i in range(num_decays):
if epoch >= dre_lr_decay_epochs[decay_i]:
lr = lr * dre_lr_decay_factor
#end if epoch
#end for decay_i
for param_group in optimizer.param_groups:
param_group['lr'] = lr
#end def adjust lr
# nets
dre_precnn_net = dre_precnn_net.cuda()
netG = netG.cuda()
net_y2h = net_y2h.cuda()
dre_net = dre_net.cuda()
dre_precnn_net.eval()
netG.eval()
net_y2h.eval()
if net_filter is not None and kappa>1e-30: #the predicting branch of the sparse AE
print("\n Do filtering in cDRE training with kappa {}.".format(kappa))
net_filter = net_filter.cuda()
net_filter.eval()
# define optimizer
if dre_optimizer=="SGD":
optimizer = torch.optim.SGD(dre_net.parameters(), lr = dre_lr_base, momentum= 0.9, weight_decay=1e-4)
else:
optimizer = torch.optim.Adam(dre_net.parameters(), lr = dre_lr_base, betas=(0.5, 0.999), weight_decay=1e-4)
if path_to_ckpt is not None and dre_resume_epoch>0:
print("Loading ckpt to resume training dre_net >>>")
ckpt_fullpath = path_to_ckpt + "/cDRE_{}_checkpoint_epoch_{}.pth".format(dre_net_name, dre_resume_epoch)
checkpoint = torch.load(ckpt_fullpath)
dre_net.load_state_dict(checkpoint['net_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
#load d_loss and g_loss
logfile_fullpath = path_to_ckpt + "/cDRE_{}_train_loss_epoch_{}.npz".format(dre_net_name, dre_resume_epoch)
if os.path.isfile(logfile_fullpath):
avg_train_loss = list(np.load(logfile_fullpath))
else:
avg_train_loss = []
else:
avg_train_loss = []
start_time = timeit.default_timer()
for epoch in range(dre_resume_epoch, dre_epochs):
adjust_learning_rate(optimizer, epoch)
train_loss = 0
for batch_idx in range(len(train_labels)//dre_batch_size):
dre_net.train()
#################################################
''' generate target labels '''
batch_target_labels = np.random.choice(unique_labels, size=dre_batch_size, replace=True)
batch_unique_labels, batch_unique_label_counts = np.unique(batch_target_labels, return_counts=True)
batch_real_indx = []
for j in range(len(batch_unique_labels)):
indx_j = np.where(train_labels==batch_unique_labels[j])[0]
indx_j = np.random.choice(indx_j, size=batch_unique_label_counts[j])
batch_real_indx.append(indx_j)
batch_real_indx = np.concatenate(batch_real_indx)
batch_real_indx = batch_real_indx.reshape(-1)
# batch_real_indx = np.random.choice(indx_all, size=dre_batch_size, replace=True).reshape(-1)
#################################################
''' density ratios of real images '''
## get some real images for training
batch_real_images = train_images[batch_real_indx]
batch_real_images = hflip_images(batch_real_images) ## randomly flip real images
batch_real_images = normalize_images(batch_real_images) ## normalize real images
batch_real_images = torch.from_numpy(batch_real_images).type(torch.float).cuda()
assert batch_real_images.max().item()<=1.0
batch_real_labels = train_labels[batch_real_indx]
batch_real_labels = torch.from_numpy(batch_real_labels).type(torch.float).view(-1,1).cuda()
#################################################
''' density ratios of fake images '''
## generate fake labels first
if kappa>1e-30:
batch_fake_labels = np.zeros(dre_batch_size)
vicinity_start = torch.zeros(dre_batch_size).cuda()
vicinity_end = torch.zeros(dre_batch_size).cuda()
for j in range(dre_batch_size):
# start_j = max(0, batch_real_labels[j].item()-kappa)
# end_j = min(1, batch_real_labels[j].item()+kappa)
start_j = batch_real_labels[j].item()-kappa
end_j = batch_real_labels[j].item()+kappa
assert batch_real_labels[j].item()>=start_j and batch_real_labels[j].item()<=end_j
batch_fake_labels[j] = np.random.uniform(low=start_j, high=end_j, size=1)
vicinity_start[j] = start_j
vicinity_end[j] = end_j
batch_fake_labels = torch.from_numpy(batch_fake_labels).type(torch.float).view(-1,1).cuda()
## then, generate fake images
## drop fake images with predicted labels not in the vicinity
with torch.no_grad():
z = torch.randn(dre_batch_size, dim_gan, dtype=torch.float).cuda()
batch_fake_images = netG(z, net_y2h(batch_fake_labels))
batch_fake_images = batch_fake_images.detach()
batch_fake_labels_pred = net_filter(batch_fake_images)
indx_drop_1 = batch_fake_labels_pred.view(-1)<vicinity_start
indx_drop_2 = batch_fake_labels_pred.view(-1)>vicinity_end
indx_drop = torch.cat((indx_drop_1.view(-1,1), indx_drop_2.view(-1,1)), dim=1)
indx_drop = torch.any(indx_drop, 1)
## regenerate fake images whose labels are not in the vicinity; at most niter_tmp rounds
niter_tmp = 0
while indx_drop.sum().item()>0 and niter_tmp<=reg_niters:
batch_size_tmp = indx_drop.sum().item()
batch_fake_labels_tmp = batch_fake_labels[indx_drop]
assert len(batch_fake_labels_tmp)==batch_size_tmp
##update corresponding fake images
z = torch.randn(batch_size_tmp, dim_gan, dtype=torch.float).cuda()
batch_fake_images_tmp = netG(z, net_y2h(batch_fake_labels_tmp))
batch_fake_images[indx_drop] = batch_fake_images_tmp
batch_fake_labels_pred[indx_drop] = net_filter(batch_fake_images_tmp)
##update indices of dropped images
indx_drop_1 = batch_fake_labels_pred.view(-1)<vicinity_start
indx_drop_2 = batch_fake_labels_pred.view(-1)>vicinity_end
indx_drop = torch.cat((indx_drop_1.view(-1,1), indx_drop_2.view(-1,1)),dim=1)
indx_drop = torch.any(indx_drop, 1)
niter_tmp+=1
# print(niter_tmp, indx_drop.sum().item(), dre_batch_size)
###end while
indx_keep = (batch_fake_labels_pred.view(-1)>=vicinity_start)*(batch_fake_labels_pred.view(-1)<=vicinity_end)
assert indx_keep.sum().item()>0
batch_fake_images = batch_fake_images[indx_keep]
batch_real_images = batch_real_images[indx_keep] ##if do not do subsampling for real images too, the cDRE training does not converge
batch_real_labels = batch_real_labels[indx_keep] ##note that, here is batch_real_labels not batch_fake_labels!!!!
else:
with torch.no_grad():
z = torch.randn(dre_batch_size, dim_gan, dtype=torch.float).cuda()
batch_fake_images = netG(z, net_y2h(batch_real_labels))
batch_fake_images = batch_fake_images.detach()
## extract features from real and fake images
with torch.no_grad():
batch_features_real = dre_precnn_net(batch_real_images)
batch_features_real = batch_features_real.detach()
batch_features_fake = dre_precnn_net(batch_fake_images)
batch_features_fake = batch_features_fake.detach()
del batch_real_images, batch_fake_images; gc.collect()
## density ratios for real images
DR_real = dre_net(batch_features_real, net_y2h(batch_real_labels))
## density ratios for fake images
DR_fake = dre_net(batch_features_fake, net_y2h(batch_real_labels)) ##Please note that use batch_real_labels here !!!!
#################################################
#Softplus loss
softplus_fn = torch.nn.Softplus(beta=1,threshold=20)
sigmoid_fn = torch.nn.Sigmoid()
SP_div = torch.mean(sigmoid_fn(DR_fake) * DR_fake) - torch.mean(softplus_fn(DR_fake)) - torch.mean(sigmoid_fn(DR_real))
penalty = dre_lambda * (torch.mean(DR_fake) - 1)**2
loss = SP_div + penalty
#backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.cpu().item()
print("cDRE+{}+lambda{}: [step {}/{}] [epoch {}/{}] [train loss {:.5f}] [fake batch {}/{}] [Time {:.4f}]".format(dre_net_name, dre_lambda, batch_idx+1, len(train_labels)//dre_batch_size, epoch+1, dre_epochs, train_loss/(batch_idx+1), len(batch_features_fake), dre_batch_size, timeit.default_timer()-start_time))
# #################################################
# ### debugging
# dre_net.eval()
# with torch.no_grad():
# DR_real2 = dre_net(batch_features_real, net_y2h(batch_real_labels))
# DR_fake2 = dre_net(batch_features_fake, net_y2h(batch_fake_labels))
# print("[Iter {}/{}], [epoch {}/{}], Debug (train):{:.4f}/{:.4f}".format(batch_idx, len(train_labels)//dre_batch_size, epoch+1, dre_epochs, DR_real.mean(),DR_fake.mean()))
# print("[Iter {}/{}], [epoch {}/{}], Debug (eval):{:.4f}/{:.4f}".format(batch_idx, len(train_labels)//dre_batch_size, epoch+1, dre_epochs, DR_real2.mean(),DR_fake2.mean()))
# end for batch_idx
# print("cDRE+{}+lambda{}: [epoch {}/{}] [train loss {}] [Time {}]".format(dre_net_name, dre_lambda, epoch+1, dre_epochs, train_loss/(batch_idx+1), timeit.default_timer()-start_time))
avg_train_loss.append(train_loss/(batch_idx+1))
# save checkpoint
if path_to_ckpt is not None and ((epoch+1) % dre_save_freq == 0 or (epoch+1)==dre_epochs):
ckpt_fullpath = path_to_ckpt + "/cDRE_{}_checkpoint_epoch_{}.pth".format(dre_net_name, epoch+1)
torch.save({
'epoch': epoch,
'net_state_dict': dre_net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, ckpt_fullpath)
# save loss
logfile_fullpath = path_to_ckpt + "/cDRE_{}_train_loss_epoch_{}.npz".format(dre_net_name, epoch+1)
np.savez(logfile_fullpath, np.array(avg_train_loss))
#end for epoch
#back to memory
dre_precnn_net = dre_precnn_net.cpu()
netG = netG.cpu()
net_y2h = net_y2h.cpu()
dre_net = dre_net.cpu()
if net_filter is not None:
net_filter = net_filter.cpu()
return dre_net, avg_train_loss
#end for def
| 13,429 | 45.958042 | 323 | py |
cGAN-KD | cGAN-KD-main/UTKFace/test_infer_speed.py | import os
import argparse
import shutil
import timeit
import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
import torch.nn as nn
import torch.backends.cudnn as cudnn
import random
import matplotlib.pyplot as plt
import matplotlib as mpl
from torch import autograd
from torchvision.utils import save_image
import csv
from tqdm import tqdm
import gc
import h5py
### import my stuffs ###
from models import cnn_dict
parser = argparse.ArgumentParser()
parser.add_argument('--cnn_name', type=str, default='',
help='The CNN used in the classification.')
parser.add_argument('--nsamp', type=int, default=10000, help='number of images')
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--img_size', type=int, default=64)
args = parser.parse_args()
################################################################################
# Convenience function to count the number of parameters in a module
def count_parameters(module, verbose=True):
num_parameters = sum([p.data.nelement() for p in module.parameters()])
if verbose:
print('Number of parameters: {}'.format(num_parameters))
return num_parameters
# model
net = cnn_dict[args.cnn_name]()
num_parameters = count_parameters(net, verbose=False)
## randomly generate args.nsamp images
images = np.random.randint(low=0, high=255, size=args.nsamp*3*args.img_size**2).reshape((args.nsamp, 3, args.img_size, args.img_size))
print(images.shape)
class IMGs_dataset(torch.utils.data.Dataset):
def __init__(self, images, labels=None, normalize=False):
super(IMGs_dataset, self).__init__()
self.images = images
self.n_images = len(self.images)
self.labels = labels
if labels is not None:
if len(self.images) != len(self.labels):
raise Exception('images (' + str(len(self.images)) +') and labels ('+str(len(self.labels))+') do not have the same length!!!')
self.normalize = normalize
def __getitem__(self, index):
image = self.images[index]
if self.normalize:
image = image/255.0
image = (image-0.5)/0.5
if self.labels is not None:
label = self.labels[index]
return (image, label)
else:
return image
def __len__(self):
return self.n_images
trainset = IMGs_dataset(images, None, normalize=True)
dataloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=False)
net = net.cuda()
net.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
with torch.no_grad():
start_time = timeit.default_timer()
for _, images in enumerate(dataloader):
images = images.type(torch.float).cuda()
outputs = net(images)
total_time = timeit.default_timer()-start_time
print("\n {} has {} parameters...".format(args.cnn_name, num_parameters))
print('\r Total time: {}s; Inference FPS: {}'.format(total_time, args.nsamp/total_time))
| 3,059 | 30.22449 | 143 | py |
cGAN-KD | cGAN-KD-main/UTKFace/train_cnn.py | ''' For CNN training and testing. '''
import os
import timeit
import torch
import torch.nn as nn
import numpy as np
from torch.nn import functional as F
## horizontal flipping
def hflip_images(batch_images):
''' for numpy arrays '''
uniform_threshold = np.random.uniform(0,1,len(batch_images))
indx_gt = np.where(uniform_threshold>0.5)[0]
batch_images[indx_gt] = np.flip(batch_images[indx_gt], axis=3)
return batch_images
## normalize images
def normalize_images(batch_images):
batch_images = batch_images/255.0
batch_images = (batch_images - 0.5)/0.5
return batch_images
''' function for cnn training '''
def train_cnn(net, net_name, train_images, train_labels, testloader, epochs, resume_epoch=0, save_freq=40, batch_size=128, lr_base=0.01, lr_decay_factor=0.1, lr_decay_epochs=[150, 250], weight_decay=1e-4, path_to_ckpt = None, fn_denorm_labels=None):
'''
train_images: unnormalized images
train_labels: normalized labels
'''
assert train_images.max()>1 and train_images.max()<=255.0 and train_images.min()>=0
assert train_labels.min()>=0 and train_labels.max()<=1.0
unique_train_labels = np.sort(np.array(list(set(train_labels)))) ##sorted unique labels
indx_all = np.arange(len(train_labels))
''' learning rate decay '''
def adjust_learning_rate(optimizer, epoch):
"""decrease the learning rate """
lr = lr_base
num_decays = len(lr_decay_epochs)
for decay_i in range(num_decays):
if epoch >= lr_decay_epochs[decay_i]:
lr = lr * lr_decay_factor
#end if epoch
#end for decay_i
for param_group in optimizer.param_groups:
param_group['lr'] = lr
net = net.cuda()
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(net.parameters(), lr = lr_base, momentum= 0.9, weight_decay=weight_decay)
if path_to_ckpt is not None and resume_epoch>0:
save_file = path_to_ckpt + "/{}_checkpoint_epoch_{}.pth".format(net_name, resume_epoch)
checkpoint = torch.load(save_file)
net.load_state_dict(checkpoint['net_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
torch.set_rng_state(checkpoint['rng_state'])
#end if
train_loss_all = []
test_mae_all = []
start_time = timeit.default_timer()
for epoch in range(resume_epoch, epochs):
net.train()
train_loss = 0
adjust_learning_rate(optimizer, epoch)
for batch_idx in range(len(train_labels)//batch_size):
# ### generate target labels
# batch_target_labels = np.random.choice(unique_train_labels, size=batch_size, replace=True)
# batch_unique_train_labels, batch_unique_label_counts = np.unique(batch_target_labels, return_counts=True)
# batch_train_indx = []
# for j in range(len(batch_unique_train_labels)):
# indx_j = np.where(train_labels==batch_unique_train_labels[j])[0]
# indx_j = np.random.choice(indx_j, size=batch_unique_label_counts[j])
# batch_train_indx.append(indx_j)
# batch_train_indx = np.concatenate(batch_train_indx)
# batch_train_indx = batch_train_indx.reshape(-1)
batch_train_indx = np.random.choice(indx_all, size=batch_size, replace=True).reshape(-1) #for cnn training only
### get some real images for training
batch_train_images = train_images[batch_train_indx]
batch_train_images = hflip_images(batch_train_images) ## randomly flip real images
batch_train_images = normalize_images(batch_train_images) ## normalize real images
batch_train_images = torch.from_numpy(batch_train_images).type(torch.float).cuda()
assert batch_train_images.max().item()<=1.0
batch_train_labels = train_labels[batch_train_indx]
batch_train_labels = torch.from_numpy(batch_train_labels).type(torch.float).view(-1,1).cuda()
#Forward pass
outputs = net(batch_train_images)
loss = criterion(outputs, batch_train_labels)
#backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.cpu().item()
#end for batch_idx
train_loss = train_loss / (len(train_labels)//batch_size)
train_loss_all.append(train_loss)
test_mae = test_cnn(net, testloader, fn_denorm_labels=fn_denorm_labels, verbose=False)
test_mae_all.append(test_mae)
print('%s: [epoch %d/%d] train_loss:%.3f, test_mae:%.3f Time: %.4f' % (net_name, epoch+1, epochs, train_loss, test_mae, timeit.default_timer()-start_time))
# save checkpoint
if path_to_ckpt is not None and ((epoch+1) % save_freq == 0 or (epoch+1) == epochs) :
save_file = path_to_ckpt + "/{}_checkpoint_epoch_{}.pth".format(net_name, epoch+1)
torch.save({
'net_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'rng_state': torch.get_rng_state()
}, save_file)
#end for epoch
return net
def test_cnn(net, testloader, fn_denorm_labels=None, verbose=False):
net = net.cuda()
net.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
with torch.no_grad():
abs_diff_avg = 0
total = 0
for _, (images, labels) in enumerate(testloader):
images = images.type(torch.float).cuda()
labels = labels.type(torch.float).view(-1).cpu().numpy()
outputs = net(images)
outputs = outputs.view(-1).cpu().numpy()
labels = fn_denorm_labels(labels)
outputs = fn_denorm_labels(outputs)
abs_diff_avg += np.sum(np.abs(labels-outputs))
total += len(labels)
test_mae = abs_diff_avg/total
if verbose:
print('\n Test MAE: {}.'.format(test_mae))
return test_mae
| 6,160 | 37.748428 | 249 | py |
cGAN-KD | cGAN-KD-main/UTKFace/train_sparseAE.py |
import torch
import torch.nn as nn
from torchvision.utils import save_image
import numpy as np
import os
import timeit
from utils import SimpleProgressBar
from opts import gen_synth_data_opts
''' Settings '''
args = gen_synth_data_opts()
# some parameters in the opts
epochs = args.dre_presae_epochs
base_lr = args.dre_presae_lr_base
lr_decay_epochs = args.dre_presae_lr_decay_freq
lr_decay_factor = args.dre_presae_lr_decay_factor
lambda_sparsity = args.dre_presae_lambda_sparsity
lambda_regression = args.dre_presae_lambda_regression
resume_epoch = args.dre_presae_resume_epoch
weigth_decay = args.dre_presae_weight_decay
batch_size = args.dre_presae_batch_size_train
## horizontal flipping
def hflip_images(batch_images):
''' for numpy arrays '''
uniform_threshold = np.random.uniform(0,1,len(batch_images))
indx_gt = np.where(uniform_threshold>0.5)[0]
batch_images[indx_gt] = np.flip(batch_images[indx_gt], axis=3)
return batch_images
## normalize images
def normalize_images(batch_images):
batch_images = batch_images/255.0
batch_images = (batch_images - 0.5)/0.5
return batch_images
# decay learning rate every args.dre_lr_decay_epochs epochs
def adjust_learning_rate(epoch, epochs, optimizer, base_lr, lr_decay_epochs, lr_decay_factor):
lr = base_lr #1e-4
for i in range(epochs//lr_decay_epochs):
if epoch >= (i+1)*lr_decay_epochs:
lr *= lr_decay_factor
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def train_sparseAE(unique_labels, train_images, train_labels, net_encoder, net_decoder, net_predict, save_sae_images_folder, path_to_ckpt=None):
'''
train_images: unnormalized
train_labels: normalized to [0,1]
'''
assert train_images.max()>1.0 and train_images.max()<=255.0
assert train_labels.max()<=1.0 and train_labels.min()>=0
# nets
net_encoder = net_encoder.cuda()
net_decoder = net_decoder.cuda()
net_predict = net_predict.cuda()
# define optimizer
params = list(net_encoder.parameters()) + list(net_decoder.parameters()) + list(net_predict.parameters())
optimizer = torch.optim.SGD(params, lr = base_lr, momentum= 0.9, weight_decay=weigth_decay)
# optimizer = torch.optim.Adam(params, lr = base_lr, betas=(0, 0.999), weight_decay=weigth_decay)
# criterion
criterion = nn.MSELoss()
if path_to_ckpt is not None and resume_epoch>0:
print("Loading ckpt to resume training sparseAE >>>")
ckpt_fullpath = path_to_ckpt + "/PreSAEForDRE_checkpoint_intrain/PreSAEForDRE_checkpoint_epoch_{}_sparsity_{:.3f}_regre_{:.3f}.pth".format(resume_epoch, lambda_sparsity, lambda_regression)
checkpoint = torch.load(ckpt_fullpath)
net_encoder.load_state_dict(checkpoint['net_encoder_state_dict'])
net_decoder.load_state_dict(checkpoint['net_decoder_state_dict'])
net_predict.load_state_dict(checkpoint['net_predict_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
torch.set_rng_state(checkpoint['rng_state'])
gen_iterations = checkpoint['gen_iterations']
else:
gen_iterations = 0
start_time = timeit.default_timer()
for epoch in range(resume_epoch, epochs):
adjust_learning_rate(epoch, epochs, optimizer, base_lr, lr_decay_epochs, lr_decay_factor)
train_loss = 0
train_loss1 = 0
train_loss2 = 0
for batch_idx in range(len(train_labels)//batch_size):
net_encoder.train()
net_decoder.train()
net_predict.train()
#################################################
''' generate target labels '''
batch_target_labels = np.random.choice(unique_labels, size=batch_size, replace=True)
batch_unique_labels, batch_unique_label_counts = np.unique(batch_target_labels, return_counts=True)
batch_real_indx = []
for j in range(len(batch_unique_labels)):
indx_j = np.where(train_labels==batch_unique_labels[j])[0]
indx_j = np.random.choice(indx_j, size=batch_unique_label_counts[j])
batch_real_indx.append(indx_j)
batch_real_indx = np.concatenate(batch_real_indx)
batch_real_indx = batch_real_indx.reshape(-1)
#################################################
## get some real images for training
batch_real_images = train_images[batch_real_indx]
batch_real_images = hflip_images(batch_real_images) ## randomly flip real images
batch_real_images = normalize_images(batch_real_images) ## normalize real images
batch_real_images = torch.from_numpy(batch_real_images).type(torch.float).cuda()
assert batch_real_images.max().item()<=1.0
batch_real_labels = train_labels[batch_real_indx]
batch_real_labels = torch.from_numpy(batch_real_labels).type(torch.float).cuda()
#################################################
## forward pass
batch_features = net_encoder(batch_real_images)
batch_recons_images = net_decoder(batch_features)
batch_pred_labels = net_predict(batch_features)
'''
based on https://debuggercafe.com/sparse-autoencoders-using-l1-regularization-with-pytorch/
'''
loss1 = criterion(batch_recons_images, batch_real_images) + lambda_sparsity * batch_features.mean()
loss2 = criterion(batch_pred_labels.view(-1), batch_real_labels.view(-1))
loss = loss1 + loss2*lambda_regression
#backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.cpu().item()
train_loss1 += loss1.cpu().item()
train_loss2 += loss2.cpu().item()
gen_iterations += 1
if gen_iterations % 100 == 0:
net_encoder.eval()
net_decoder.eval()
n_row=min(10, int(np.sqrt(batch_size)))
with torch.no_grad():
batch_recons_images = net_decoder(net_encoder(batch_real_images[0:n_row**2]))
batch_recons_images = batch_recons_images.detach().cpu()
save_image(batch_recons_images.data, save_sae_images_folder + '/{}.png'.format(gen_iterations), nrow=n_row, normalize=True)
if gen_iterations % 20 == 0:
print("\r SparseAE+sparsity{:.3f}+regre{:.3f}: [step {}] [epoch {}/{}] [train loss {:.4f}={:.4f}+{:.4f}] [Time {:.4f}]".format(lambda_sparsity, lambda_regression, gen_iterations, epoch+1, epochs, train_loss/(batch_idx+1), train_loss1/(batch_idx+1), train_loss2/(batch_idx+1), timeit.default_timer()-start_time) )
# end for batch_idx
if path_to_ckpt is not None and (epoch+1) % 50 == 0:
save_file = path_to_ckpt + "/PreSAEForDRE_checkpoint_intrain/PreSAEForDRE_checkpoint_epoch_{}_sparsity_{:.3f}_regre_{:.3f}.pth".format(epoch+1, lambda_sparsity, lambda_regression)
os.makedirs(os.path.dirname(save_file), exist_ok=True)
torch.save({
'gen_iterations': gen_iterations,
'net_encoder_state_dict': net_encoder.state_dict(),
'net_decoder_state_dict': net_decoder.state_dict(),
'net_predict_state_dict': net_predict.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'rng_state': torch.get_rng_state()
}, save_file)
#end for epoch
net_encoder = net_encoder.cpu()
net_decoder = net_decoder.cpu()
net_predict = net_predict.cpu()
return net_encoder, net_decoder, net_predict
| 7,789 | 41.802198 | 328 | py |
cGAN-KD | cGAN-KD-main/UTKFace/train_ccgan.py | import torch
import numpy as np
import os
import timeit
from PIL import Image
from torchvision.utils import save_image
from utils import *
from opts import gen_synth_data_opts
from DiffAugment_pytorch import DiffAugment
''' Settings '''
args = gen_synth_data_opts()
# some parameters in opts
loss_type = args.gan_loss_type
niters = args.gan_niters
resume_niters = args.gan_resume_niters
d_niters = args.gan_d_niters
dim_gan = args.gan_dim_g
lr_g = args.gan_lr_g
lr_d = args.gan_lr_d
save_niters_freq = args.gan_save_niters_freq
batch_size_disc = args.gan_batch_size_disc
batch_size_gene = args.gan_batch_size_gene
batch_size_max = max(batch_size_disc, batch_size_gene)
threshold_type = args.gan_threshold_type
nonzero_soft_weight_threshold = args.gan_nonzero_soft_weight_threshold
use_DiffAugment = args.gan_DiffAugment
policy = args.gan_DiffAugment_policy
## horizontal flip images
def hflip_images(batch_images):
uniform_threshold = np.random.uniform(0,1,len(batch_images))
indx_gt = np.where(uniform_threshold>0.5)[0]
batch_images[indx_gt] = np.flip(batch_images[indx_gt], axis=3)
return batch_images
# def hflip_images(batch_images):
# uniform_threshold = np.random.uniform(0,1,len(batch_images))
# indx_gt = np.where(uniform_threshold>0.5)[0]
# batch_images[indx_gt] = torch.flip(batch_images[indx_gt], dims=[3])
# return batch_images
def train_ccgan(kernel_sigma, kappa, train_images, train_labels, netG, netD, net_y2h, save_images_folder, path_to_ckpt = None, clip_label=False):
'''
Note that train_images are not normalized to [-1,1]
train_labels are normalized to [0,1]
'''
assert train_images.max()>1.0 and train_images.min()>=0 and train_images.max()<=255.0
assert train_labels.min()>=0 and train_labels.max()<=1.0
unique_train_labels = np.sort(np.array(list(set(train_labels))))
netG = netG.cuda()
netD = netD.cuda()
net_y2h = net_y2h.cuda()
net_y2h.eval()
optimizerG = torch.optim.Adam(netG.parameters(), lr=lr_g, betas=(0.5, 0.999))
optimizerD = torch.optim.Adam(netD.parameters(), lr=lr_d, betas=(0.5, 0.999))
if path_to_ckpt is not None and resume_niters>0:
save_file = path_to_ckpt + "/CcGAN_checkpoint_niters_{}.pth".format(resume_niters)
checkpoint = torch.load(save_file)
netG.load_state_dict(checkpoint['netG_state_dict'])
netD.load_state_dict(checkpoint['netD_state_dict'])
optimizerG.load_state_dict(checkpoint['optimizerG_state_dict'])
optimizerD.load_state_dict(checkpoint['optimizerD_state_dict'])
torch.set_rng_state(checkpoint['rng_state'])
#end if
# printed images with labels between the 5-th quantile and 95-th quantile of training labels
n_row=10; n_col = n_row
z_fixed = torch.randn(n_row*n_col, dim_gan, dtype=torch.float).cuda()
start_label = np.quantile(train_labels, 0.05)
end_label = np.quantile(train_labels, 0.95)
selected_labels = np.linspace(start_label, end_label, num=n_row)
y_fixed = np.zeros(n_row*n_col)
for i in range(n_row):
curr_label = selected_labels[i]
for j in range(n_col):
y_fixed[i*n_col+j] = curr_label
print(y_fixed)
y_fixed = torch.from_numpy(y_fixed).type(torch.float).view(-1,1).cuda()
start_time = timeit.default_timer()
for niter in range(resume_niters, niters):
for _ in range(d_niters):
''' Train Discriminator '''
## randomly draw batch_size_disc y's from unique_train_labels
batch_target_labels_in_dataset = np.random.choice(unique_train_labels, size=batch_size_disc, replace=True)
## add Gaussian noise; we estimate image distribution conditional on these labels
batch_epsilons = np.random.normal(0, kernel_sigma, batch_size_disc)
batch_target_labels = batch_target_labels_in_dataset + batch_epsilons
if clip_label:
batch_target_labels = np.clip(batch_target_labels, 0.0, 1.0)
## find index of real images with labels in the vicinity of batch_target_labels
## generate labels for fake image generation; these labels are also in the vicinity of batch_target_labels
batch_real_indx = np.zeros(batch_size_disc, dtype=int) #index of images in the data; the labels of these images are in the vicinity
batch_fake_labels = np.zeros(batch_size_disc)
batch_size_of_vicinity = torch.zeros(batch_size_disc)
for j in range(batch_size_disc):
## index for real images
if threshold_type == "hard":
indx_real_in_vicinity = np.where(np.abs(train_labels-batch_target_labels[j])<= kappa)[0]
else:
# reverse the weight function for SVDL
indx_real_in_vicinity = np.where((train_labels-batch_target_labels[j])**2 <= -np.log(nonzero_soft_weight_threshold)/kappa)[0]
## if the max gap between two consecutive ordered unique labels is large, it is possible that len(indx_real_in_vicinity)<1
while len(indx_real_in_vicinity)<1:
batch_epsilons_j = np.random.normal(0, kernel_sigma, 1)
batch_target_labels[j] = batch_target_labels_in_dataset[j] + batch_epsilons_j
if clip_label:
batch_target_labels = np.clip(batch_target_labels, 0.0, 1.0)
# index for real images
if threshold_type == "hard":
indx_real_in_vicinity = np.where(np.abs(train_labels-batch_target_labels[j])<= kappa)[0]
else:
# reverse the weight function for SVDL
indx_real_in_vicinity = np.where((train_labels-batch_target_labels[j])**2 <= -np.log(nonzero_soft_weight_threshold)/kappa)[0]
#end while len(indx_real_in_vicinity)<1
assert len(indx_real_in_vicinity)>=1
batch_size_of_vicinity[j] = len(indx_real_in_vicinity)
batch_real_indx[j] = np.random.choice(indx_real_in_vicinity, size=1)[0]
## labels for fake images generation
if threshold_type == "hard":
lb = batch_target_labels[j] - kappa
ub = batch_target_labels[j] + kappa
else:
lb = batch_target_labels[j] - np.sqrt(-np.log(nonzero_soft_weight_threshold)/kappa)
ub = batch_target_labels[j] + np.sqrt(-np.log(nonzero_soft_weight_threshold)/kappa)
lb = max(0.0, lb); ub = min(ub, 1.0)
assert lb<=ub
assert lb>=0 and ub>=0
assert lb<=1 and ub<=1
batch_fake_labels[j] = np.random.uniform(lb, ub, size=1)[0]
#end for j
## draw the real image batch from the training set
batch_real_images = hflip_images(train_images[batch_real_indx])
assert batch_real_images.max()>1
batch_real_labels = train_labels[batch_real_indx]
batch_real_labels = torch.from_numpy(batch_real_labels).type(torch.float).cuda()
## normalize real images
trainset = IMGs_dataset(batch_real_images, labels=None, normalize=True)
train_dataloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size_disc, shuffle=False)
train_dataloader = iter(train_dataloader)
batch_real_images = train_dataloader.next()
assert len(batch_real_images) == batch_size_disc
batch_real_images = batch_real_images.type(torch.float).cuda()
assert batch_real_images.max().item()<=1
## generate the fake image batch
batch_fake_labels = torch.from_numpy(batch_fake_labels).type(torch.float).cuda()
z = torch.randn(batch_size_disc, dim_gan, dtype=torch.float).cuda()
batch_fake_images = netG(z, net_y2h(batch_fake_labels))
## target labels on gpu
batch_target_labels = torch.from_numpy(batch_target_labels).type(torch.float).cuda()
## weight vector
if threshold_type == "soft":
real_weights = torch.exp(-kappa*(batch_real_labels-batch_target_labels)**2).cuda()
fake_weights = torch.exp(-kappa*(batch_fake_labels-batch_target_labels)**2).cuda()
else:
real_weights = torch.ones(batch_size_disc, dtype=torch.float).cuda()
fake_weights = torch.ones(batch_size_disc, dtype=torch.float).cuda()
# forward pass
if use_DiffAugment:
real_dis_out = netD(DiffAugment(batch_real_images, policy=policy), net_y2h(batch_target_labels))
fake_dis_out = netD(DiffAugment(batch_fake_images.detach(), policy=policy), net_y2h(batch_target_labels))
else:
real_dis_out = netD(batch_real_images, net_y2h(batch_target_labels))
fake_dis_out = netD(batch_fake_images.detach(), net_y2h(batch_target_labels))
if loss_type == "vanilla":
real_dis_out = torch.nn.Sigmoid()(real_dis_out)
fake_dis_out = torch.nn.Sigmoid()(fake_dis_out)
d_loss_real = - torch.log(real_dis_out+1e-20)
d_loss_fake = - torch.log(1-fake_dis_out+1e-20)
elif loss_type == "hinge":
d_loss_real = torch.nn.ReLU()(1.0 - real_dis_out)
d_loss_fake = torch.nn.ReLU()(1.0 + fake_dis_out)
d_loss = torch.mean(real_weights.view(-1) * d_loss_real.view(-1)) + torch.mean(fake_weights.view(-1) * d_loss_fake.view(-1))
optimizerD.zero_grad()
d_loss.backward()
optimizerD.step()
##end D update
''' Train Generator '''
netG.train()
# generate fake images
## randomly draw batch_size_disc y's from unique_train_labels
batch_target_labels_in_dataset = np.random.choice(unique_train_labels, size=batch_size_gene, replace=True)
## add Gaussian noise; we estimate image distribution conditional on these labels
batch_epsilons = np.random.normal(0, kernel_sigma, batch_size_gene)
batch_target_labels = batch_target_labels_in_dataset + batch_epsilons
if clip_label:
batch_target_labels = np.clip(batch_target_labels, 0.0, 1.0)
batch_target_labels = torch.from_numpy(batch_target_labels).type(torch.float).cuda()
z = torch.randn(batch_size_gene, dim_gan, dtype=torch.float).cuda()
batch_fake_images = netG(z, net_y2h(batch_target_labels))
# loss
if use_DiffAugment:
dis_out = netD(DiffAugment(batch_fake_images, policy=policy), net_y2h(batch_target_labels))
else:
dis_out = netD(batch_fake_images, net_y2h(batch_target_labels))
if loss_type == "vanilla":
dis_out = torch.nn.Sigmoid()(dis_out)
g_loss = - torch.mean(torch.log(dis_out+1e-20))
elif loss_type == "hinge":
g_loss = - dis_out.mean()
# backward
optimizerG.zero_grad()
g_loss.backward()
optimizerG.step()
# print loss
if (niter+1) % 20 == 0:
print ("CcGAN: [Iter %d/%d] [D loss: %.4f] [G loss: %.4f] [real prob: %.3f] [fake prob: %.3f] [Time: %.4f]" % (niter+1, niters, d_loss.item(), g_loss.item(), real_dis_out.mean().item(), fake_dis_out.mean().item(), timeit.default_timer()-start_time))
if (niter+1) % 100 == 0:
netG.eval()
with torch.no_grad():
gen_imgs = netG(z_fixed, net_y2h(y_fixed))
gen_imgs = gen_imgs.detach().cpu()
save_image(gen_imgs.data, save_images_folder + '/{}.png'.format(niter+1), nrow=n_row, normalize=True)
if path_to_ckpt is not None and ((niter+1) % save_niters_freq == 0 or (niter+1) == niters):
save_file = path_to_ckpt + "/CcGAN_checkpoint_niters_{}.pth".format(niter+1)
os.makedirs(os.path.dirname(save_file), exist_ok=True)
torch.save({
'netG_state_dict': netG.state_dict(),
'netD_state_dict': netD.state_dict(),
'optimizerG_state_dict': optimizerG.state_dict(),
'optimizerD_state_dict': optimizerD.state_dict(),
'rng_state': torch.get_rng_state()
}, save_file)
#end for niter
return netG, netD
def SampCcGAN_given_labels(netG, net_y2h, labels, batch_size = 100, to_numpy=True, verbose=True):
'''
labels: a numpy array; normalized label in [0,1]
'''
assert labels.min()>=0 and labels.max()<=1.0
nfake = len(labels)
if batch_size>nfake:
batch_size=nfake
fake_images = []
fake_labels = np.concatenate((labels, labels[0:batch_size]))
netG = netG.cuda()
netG.eval()
net_y2h = net_y2h.cuda()
net_y2h.eval()
with torch.no_grad():
if verbose:
pb = SimpleProgressBar()
n_img_got = 0
while n_img_got < nfake:
z = torch.randn(batch_size, dim_gan, dtype=torch.float).cuda()
y = torch.from_numpy(fake_labels[n_img_got:(n_img_got+batch_size)]).type(torch.float).view(-1,1).cuda()
batch_fake_images = netG(z, net_y2h(y))
fake_images.append(batch_fake_images.cpu())
n_img_got += batch_size
if verbose:
pb.update(min(float(n_img_got)/nfake, 1)*100)
##end while
fake_images = torch.cat(fake_images, dim=0)
#remove extra entries
fake_images = fake_images[0:nfake]
fake_labels = fake_labels[0:nfake]
if to_numpy:
fake_images = fake_images.numpy()
netG = netG.cpu()
net_y2h = net_y2h.cpu()
return fake_images, fake_labels | 13,893 | 43.248408 | 261 | py |
cGAN-KD | cGAN-KD-main/UTKFace/models/shufflenetv2.py | '''ShuffleNetV2 in PyTorch.
See the paper "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups=2):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N, C, H, W = x.size()
g = self.groups
return x.view(N, g, C//g, H, W).permute(0, 2, 1, 3, 4).reshape(N, C, H, W)
class SplitBlock(nn.Module):
def __init__(self, ratio):
super(SplitBlock, self).__init__()
self.ratio = ratio
def forward(self, x):
c = int(x.size(1) * self.ratio)
return x[:, :c, :, :], x[:, c:, :, :]
class BasicBlock(nn.Module):
def __init__(self, in_channels, split_ratio=0.5, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.split = SplitBlock(split_ratio)
in_channels = int(in_channels * split_ratio)
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=1, padding=1, groups=in_channels, bias=False)
self.bn2 = nn.BatchNorm2d(in_channels)
self.conv3 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(in_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
x1, x2 = self.split(x)
out = F.relu(self.bn1(self.conv1(x2)))
out = self.bn2(self.conv2(out))
preact = self.bn3(self.conv3(out))
out = F.relu(preact)
# out = F.relu(self.bn3(self.conv3(out)))
preact = torch.cat([x1, preact], 1)
out = torch.cat([x1, out], 1)
out = self.shuffle(out)
if self.is_last:
return out, preact
else:
return out
class DownBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(DownBlock, self).__init__()
mid_channels = out_channels // 2
# left
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=2, padding=1, groups=in_channels, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(mid_channels)
# right
self.conv3 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(mid_channels)
self.conv4 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=3, stride=2, padding=1, groups=mid_channels, bias=False)
self.bn4 = nn.BatchNorm2d(mid_channels)
self.conv5 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=1, bias=False)
self.bn5 = nn.BatchNorm2d(mid_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
# left
out1 = self.bn1(self.conv1(x))
out1 = F.relu(self.bn2(self.conv2(out1)))
# right
out2 = F.relu(self.bn3(self.conv3(x)))
out2 = self.bn4(self.conv4(out2))
out2 = F.relu(self.bn5(self.conv5(out2)))
# concat
out = torch.cat([out1, out2], 1)
out = self.shuffle(out)
return out
class ShuffleNetV2(nn.Module):
def __init__(self, net_size):
super(ShuffleNetV2, self).__init__()
out_channels = configs[net_size]['out_channels']
num_blocks = configs[net_size]['num_blocks']
# self.conv1 = nn.Conv2d(3, 24, kernel_size=3,
# stride=1, padding=1, bias=False)
self.conv1 = nn.Conv2d(3, 24, kernel_size=4, padding=1, stride=2, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.in_channels = 24
self.layer1 = self._make_layer(out_channels[0], num_blocks[0])
self.layer2 = self._make_layer(out_channels[1], num_blocks[1])
self.layer3 = self._make_layer(out_channels[2], num_blocks[2])
self.conv2 = nn.Conv2d(out_channels[2], out_channels[3],
kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels[3])
self.linear = nn.Sequential(
nn.Linear(out_channels[3], 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 1),
nn.ReLU(),
)
def _make_layer(self, out_channels, num_blocks):
layers = [DownBlock(self.in_channels, out_channels)]
for i in range(num_blocks):
layers.append(BasicBlock(out_channels, is_last=(i == num_blocks - 1)))
self.in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
# out = F.max_pool2d(out, 3, stride=2, padding=1)
out, _ = self.layer1(out)
out, _ = self.layer2(out)
out, _ = self.layer3(out)
out = F.relu(self.bn2(self.conv2(out)))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
configs = {
0.2: {
'out_channels': (40, 80, 160, 512),
'num_blocks': (3, 3, 3)
},
0.3: {
'out_channels': (40, 80, 160, 512),
'num_blocks': (3, 7, 3)
},
0.5: {
'out_channels': (48, 96, 192, 1024),
'num_blocks': (3, 7, 3)
},
1: {
'out_channels': (116, 232, 464, 1024),
'num_blocks': (3, 7, 3)
},
1.5: {
'out_channels': (176, 352, 704, 1024),
'num_blocks': (3, 7, 3)
},
2: {
'out_channels': (224, 488, 976, 2048),
'num_blocks': (3, 7, 3)
}
}
def ShuffleV2(**kwargs):
model = ShuffleNetV2(net_size=1, **kwargs)
return model
if __name__ == '__main__':
net = ShuffleV2()
x = torch.randn(4, 3, 64, 64)
import time
a = time.time()
out = net(x)
b = time.time()
print(b - a)
print(out.shape)
| 6,654 | 32.442211 | 107 | py |
cGAN-KD | cGAN-KD-main/UTKFace/models/SAGAN.py | '''
SAGAN arch
Adapted from https://github.com/voletiv/self-attention-GAN-pytorch/blob/master/sagan_models.py
'''
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils import spectral_norm
from torch.nn.init import xavier_uniform_
def init_weights(m):
if type(m) == nn.Linear or type(m) == nn.Conv2d:
xavier_uniform_(m.weight)
if m.bias is not None:
m.bias.data.fill_(0.)
def snconv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
return spectral_norm(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias))
def snlinear(in_features, out_features, bias=True):
return spectral_norm(nn.Linear(in_features=in_features, out_features=out_features, bias=bias))
class Self_Attn(nn.Module):
""" Self attention Layer"""
def __init__(self, in_channels):
super(Self_Attn, self).__init__()
self.in_channels = in_channels
self.snconv1x1_theta = snconv2d(in_channels=in_channels, out_channels=in_channels//8, kernel_size=1, stride=1, padding=0)
self.snconv1x1_phi = snconv2d(in_channels=in_channels, out_channels=in_channels//8, kernel_size=1, stride=1, padding=0)
self.snconv1x1_g = snconv2d(in_channels=in_channels, out_channels=in_channels//2, kernel_size=1, stride=1, padding=0)
self.snconv1x1_attn = snconv2d(in_channels=in_channels//2, out_channels=in_channels, kernel_size=1, stride=1, padding=0)
self.maxpool = nn.MaxPool2d(2, stride=2, padding=0)
self.softmax = nn.Softmax(dim=-1)
self.sigma = nn.Parameter(torch.zeros(1))
def forward(self, x):
"""
inputs :
x : input feature maps(B X C X W X H)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
"""
_, ch, h, w = x.size()
# Theta path
theta = self.snconv1x1_theta(x)
theta = theta.view(-1, ch//8, h*w)
# Phi path
phi = self.snconv1x1_phi(x)
phi = self.maxpool(phi)
phi = phi.view(-1, ch//8, h*w//4)
# Attn map
attn = torch.bmm(theta.permute(0, 2, 1), phi)
attn = self.softmax(attn)
# g path
g = self.snconv1x1_g(x)
g = self.maxpool(g)
g = g.view(-1, ch//2, h*w//4)
# Attn_g
attn_g = torch.bmm(g, attn.permute(0, 2, 1))
attn_g = attn_g.view(-1, ch//2, h, w)
attn_g = self.snconv1x1_attn(attn_g)
# Out
out = x + self.sigma*attn_g
return out
'''
Generator
'''
class ConditionalBatchNorm2d(nn.Module):
def __init__(self, num_features, dim_embed):
super().__init__()
self.num_features = num_features
self.bn = nn.BatchNorm2d(num_features, momentum=0.001, affine=False)
self.embed_gamma = nn.Linear(dim_embed, num_features, bias=False)
self.embed_beta = nn.Linear(dim_embed, num_features, bias=False)
def forward(self, x, y):
out = self.bn(x)
gamma = self.embed_gamma(y).view(-1, self.num_features, 1, 1)
beta = self.embed_beta(y).view(-1, self.num_features, 1, 1)
out = out + gamma*out + beta
return out
class GenBlock(nn.Module):
def __init__(self, in_channels, out_channels, dim_embed):
super(GenBlock, self).__init__()
self.cond_bn1 = ConditionalBatchNorm2d(in_channels, dim_embed)
self.relu = nn.ReLU(inplace=True)
self.snconv2d1 = snconv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.cond_bn2 = ConditionalBatchNorm2d(out_channels, dim_embed)
self.snconv2d2 = snconv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.snconv2d0 = snconv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, x, labels):
x0 = x
x = self.cond_bn1(x, labels)
x = self.relu(x)
x = F.interpolate(x, scale_factor=2, mode='nearest') # upsample
x = self.snconv2d1(x)
x = self.cond_bn2(x, labels)
x = self.relu(x)
x = self.snconv2d2(x)
x0 = F.interpolate(x0, scale_factor=2, mode='nearest') # upsample
x0 = self.snconv2d0(x0)
out = x + x0
return out
class SAGAN_Generator(nn.Module):
"""Generator."""
def __init__(self, z_dim=256, nc=3, gene_ch=128, dim_embed=128):
super(SAGAN_Generator, self).__init__()
self.z_dim = z_dim
self.gene_ch = gene_ch
self.snlinear0 = snlinear(in_features=z_dim, out_features=gene_ch*16*4*4)
self.block1 = GenBlock(gene_ch*16, gene_ch*8, dim_embed)
self.block2 = GenBlock(gene_ch*8, gene_ch*4, dim_embed)
self.block3 = GenBlock(gene_ch*4, gene_ch*2, dim_embed)
self.self_attn = Self_Attn(gene_ch*2)
self.block4 = GenBlock(gene_ch*2, gene_ch, dim_embed)
self.bn = nn.BatchNorm2d(gene_ch, eps=1e-5, momentum=0.0001, affine=True)
self.relu = nn.ReLU(inplace=True)
self.snconv2d1 = snconv2d(in_channels=gene_ch, out_channels=nc, kernel_size=3, stride=1, padding=1)
self.tanh = nn.Tanh()
# Weight init
self.apply(init_weights)
def forward(self, z, labels):
# n x z_dim
out = self.snlinear0(z) # 4*4
out = out.view(-1, self.gene_ch*16, 4, 4) # 4 x 4
out = self.block1(out, labels) # 8 x 8
out = self.block2(out, labels) # 16 x 16
out = self.block3(out, labels) # 32 x 32
out = self.self_attn(out) # 32 x 32
out = self.block4(out, labels) # 64 x 64
out = self.bn(out)
out = self.relu(out)
out = self.snconv2d1(out)
out = self.tanh(out)
return out
'''
Discriminator
'''
class DiscOptBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(DiscOptBlock, self).__init__()
self.snconv2d1 = snconv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.relu = nn.ReLU(inplace=True)
self.snconv2d2 = snconv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.downsample = nn.AvgPool2d(2)
self.snconv2d0 = snconv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, x):
x0 = x
x = self.snconv2d1(x)
x = self.relu(x)
x = self.snconv2d2(x)
x = self.downsample(x)
x0 = self.downsample(x0)
x0 = self.snconv2d0(x0)
out = x + x0
return out
class DiscBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(DiscBlock, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.snconv2d1 = snconv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.snconv2d2 = snconv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.downsample = nn.AvgPool2d(2)
self.ch_mismatch = False
if in_channels != out_channels:
self.ch_mismatch = True
self.snconv2d0 = snconv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, x, downsample=True):
x0 = x
x = self.relu(x)
x = self.snconv2d1(x)
x = self.relu(x)
x = self.snconv2d2(x)
if downsample:
x = self.downsample(x)
if downsample or self.ch_mismatch:
x0 = self.snconv2d0(x0)
if downsample:
x0 = self.downsample(x0)
out = x + x0
return out
class SAGAN_Discriminator(nn.Module):
"""Discriminator."""
def __init__(self, nc=3, disc_ch=128, dim_embed=128):
super(SAGAN_Discriminator, self).__init__()
self.disc_ch = disc_ch
self.opt_block1 = DiscOptBlock(nc, disc_ch)
self.self_attn = Self_Attn(disc_ch)
self.block1 = DiscBlock(disc_ch, disc_ch*2)
self.block2 = DiscBlock(disc_ch*2, disc_ch*4)
self.block3 = DiscBlock(disc_ch*4, disc_ch*8)
self.block4 = DiscBlock(disc_ch*8, disc_ch*16)
self.relu = nn.ReLU(inplace=True)
self.snlinear1 = snlinear(in_features=disc_ch*16*4*4, out_features=1)
self.sn_embedding1 = snlinear(dim_embed, disc_ch*16*4*4, bias=False)
# Weight init
self.apply(init_weights)
xavier_uniform_(self.sn_embedding1.weight)
def forward(self, x, labels):
# 64x64
out = self.opt_block1(x) # 32 x 32
out = self.self_attn(out) # 32 x 32
out = self.block1(out) # 16 x 16
out = self.block2(out) # 8 x 8
out = self.block3(out) # 4 x 4
out = self.block4(out, downsample=False) # 4 x 4
out = self.relu(out) # n x disc_ch*16 x 4 x 4
out = out.view(-1, self.disc_ch*16*4*4) # n x (disc_ch*16*4*4)
output1 = torch.squeeze(self.snlinear1(out)) # n
# Projection
h_labels = self.sn_embedding1(labels) # n x disc_ch*16
proj = torch.mul(out, h_labels) # n x disc_ch*16
output2 = torch.sum(proj, dim=[1]) # n
# Out
output = output1 + output2 # n
return output
if __name__ == "__main__":
netG = SAGAN_Generator(z_dim=256, gene_ch=128, dim_embed=128).cuda()
netD = SAGAN_Discriminator(disc_ch=128, dim_embed=128).cuda()
netG = nn.DataParallel(netG)
netD = nn.DataParallel(netD)
N=8
z = torch.randn(N, 256).cuda()
y = torch.randn(N, 128).cuda()
x = netG(z,y)
o = netD(x,y)
print(x.size())
print(o.size())
| 10,076 | 33.748276 | 129 | py |
cGAN-KD | cGAN-KD-main/UTKFace/models/efficientnet.py | '''EfficientNet in PyTorch.
Paper: "EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks".
Reference: https://github.com/keras-team/keras-applications/blob/master/keras_applications/efficientnet.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
def swish(x):
return x * x.sigmoid()
def drop_connect(x, drop_ratio):
keep_ratio = 1.0 - drop_ratio
mask = torch.empty([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device)
mask.bernoulli_(keep_ratio)
x.div_(keep_ratio)
x.mul_(mask)
return x
class SE(nn.Module):
'''Squeeze-and-Excitation block with Swish.'''
def __init__(self, in_channels, se_channels):
super(SE, self).__init__()
self.se1 = nn.Conv2d(in_channels, se_channels,
kernel_size=1, bias=True)
self.se2 = nn.Conv2d(se_channels, in_channels,
kernel_size=1, bias=True)
def forward(self, x):
out = F.adaptive_avg_pool2d(x, (1, 1))
out = swish(self.se1(out))
out = self.se2(out).sigmoid()
out = x * out
return out
class Block(nn.Module):
'''expansion + depthwise + pointwise + squeeze-excitation'''
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
expand_ratio=1,
se_ratio=0.,
drop_rate=0.):
super(Block, self).__init__()
self.stride = stride
self.drop_rate = drop_rate
self.expand_ratio = expand_ratio
# Expansion
channels = expand_ratio * in_channels
self.conv1 = nn.Conv2d(in_channels,
channels,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn1 = nn.BatchNorm2d(channels)
# Depthwise conv
self.conv2 = nn.Conv2d(channels,
channels,
kernel_size=kernel_size,
stride=stride,
padding=(1 if kernel_size == 3 else 2),
groups=channels,
bias=False)
self.bn2 = nn.BatchNorm2d(channels)
# SE layers
se_channels = int(in_channels * se_ratio)
self.se = SE(channels, se_channels)
# Output
self.conv3 = nn.Conv2d(channels,
out_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn3 = nn.BatchNorm2d(out_channels)
# Skip connection if in and out shapes are the same (MV-V2 style)
self.has_skip = (stride == 1) and (in_channels == out_channels)
def forward(self, x):
out = x if self.expand_ratio == 1 else swish(self.bn1(self.conv1(x)))
out = swish(self.bn2(self.conv2(out)))
out = self.se(out)
out = self.bn3(self.conv3(out))
if self.has_skip:
if self.training and self.drop_rate > 0:
out = drop_connect(out, self.drop_rate)
out = out + x
return out
class EfficientNet(nn.Module):
def __init__(self, cfg):
super(EfficientNet, self).__init__()
self.cfg = cfg
self.conv1 = nn.Conv2d(3,
32,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_channels=32)
self.linear = nn.Sequential(
nn.Linear(cfg['out_channels'][-1], 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 1),
nn.ReLU(),
)
def _make_layers(self, in_channels):
layers = []
cfg = [self.cfg[k] for k in ['expansion', 'out_channels', 'num_blocks', 'kernel_size',
'stride']]
b = 0
blocks = sum(self.cfg['num_blocks'])
for expansion, out_channels, num_blocks, kernel_size, stride in zip(*cfg):
strides = [stride] + [1] * (num_blocks - 1)
for stride in strides:
drop_rate = self.cfg['drop_connect_rate'] * b / blocks
layers.append(
Block(in_channels,
out_channels,
kernel_size,
stride,
expansion,
se_ratio=0.25,
drop_rate=drop_rate))
in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
out = swish(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.adaptive_avg_pool2d(out, 1)
out = out.view(out.size(0), -1)
dropout_rate = self.cfg['dropout_rate']
if self.training and dropout_rate > 0:
out = F.dropout(out, p=dropout_rate)
out = self.linear(out)
return out
def EfficientNetB0():
cfg = {
'num_blocks': [1, 2, 2, 3, 3, 4, 1],
'expansion': [1, 6, 6, 6, 6, 6, 6],
'out_channels': [16, 24, 40, 80, 112, 192, 320],
'kernel_size': [3, 3, 5, 3, 5, 5, 3],
'stride': [1, 2, 2, 2, 1, 2, 1],
'dropout_rate': 0.2,
'drop_connect_rate': 0.2,
}
return EfficientNet(cfg)
def test():
net = EfficientNetB0()
x = torch.randn(2, 3, 64, 64)
y = net(x)
print(y.shape)
if __name__ == '__main__':
test() | 5,970 | 31.275676 | 106 | py |
cGAN-KD | cGAN-KD-main/UTKFace/models/ResNet_embed.py | '''
ResNet-based model to map an image from pixel space to a features space.
Need to be pretrained on the dataset.
if isometric_map = True, there is an extra step (elf.classifier_1 = nn.Linear(512, 32*32*3)) to increase the dimension of the feature map from 512 to 32*32*3. This selection is for desity-ratio estimation in feature space.
codes are based on
@article{
zhang2018mixup,
title={mixup: Beyond Empirical Risk Minimization},
author={Hongyi Zhang, Moustapha Cisse, Yann N. Dauphin, David Lopez-Paz},
journal={International Conference on Learning Representations},
year={2018},
url={https://openreview.net/forum?id=r1Ddp1-Rb},
}
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
NC = 3
IMG_SIZE = 64
DIM_EMBED = 128
#------------------------------------------------------------------------------
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet_embed(nn.Module):
def __init__(self, block, num_blocks, nc=NC, dim_embed=DIM_EMBED):
super(ResNet_embed, self).__init__()
self.in_planes = 64
self.main = nn.Sequential(
nn.Conv2d(nc, 64, kernel_size=3, stride=1, padding=1, bias=False), # h=h
# nn.Conv2d(nc, 64, kernel_size=4, stride=2, padding=1, bias=False), # h=h/2
nn.BatchNorm2d(64),
nn.ReLU(),
# self._make_layer(block, 64, num_blocks[0], stride=1), # h=h
self._make_layer(block, 64, num_blocks[0], stride=2), # h=h/2 32
self._make_layer(block, 128, num_blocks[1], stride=2), # h=h/2 16
self._make_layer(block, 256, num_blocks[2], stride=2), # h=h/2 8
self._make_layer(block, 512, num_blocks[3], stride=2), # h=h/2 4
# nn.AvgPool2d(kernel_size=4)
nn.AdaptiveAvgPool2d((1, 1))
)
self.x2h_res = nn.Sequential(
nn.Linear(512, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, dim_embed),
nn.BatchNorm1d(dim_embed),
nn.ReLU(),
)
self.h2y = nn.Sequential(
nn.Linear(dim_embed, 1),
nn.ReLU()
)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
features = self.main(x)
features = features.view(features.size(0), -1)
features = self.x2h_res(features)
out = self.h2y(features)
return out, features
def ResNet18_embed(dim_embed=DIM_EMBED):
return ResNet_embed(BasicBlock, [2,2,2,2], dim_embed=dim_embed)
def ResNet34_embed(dim_embed=DIM_EMBED):
return ResNet_embed(BasicBlock, [3,4,6,3], dim_embed=dim_embed)
def ResNet50_embed(dim_embed=DIM_EMBED):
return ResNet_embed(Bottleneck, [3,4,6,3], dim_embed=dim_embed)
#------------------------------------------------------------------------------
# map labels to the embedding space
class model_y2h(nn.Module):
def __init__(self, dim_embed=DIM_EMBED):
super(model_y2h, self).__init__()
self.main = nn.Sequential(
nn.Linear(1, dim_embed),
# nn.BatchNorm1d(dim_embed),
nn.GroupNorm(8, dim_embed),
nn.ReLU(),
nn.Linear(dim_embed, dim_embed),
# nn.BatchNorm1d(dim_embed),
nn.GroupNorm(8, dim_embed),
nn.ReLU(),
nn.Linear(dim_embed, dim_embed),
# nn.BatchNorm1d(dim_embed),
nn.GroupNorm(8, dim_embed),
nn.ReLU(),
nn.Linear(dim_embed, dim_embed),
# nn.BatchNorm1d(dim_embed),
nn.GroupNorm(8, dim_embed),
nn.ReLU(),
nn.Linear(dim_embed, dim_embed),
nn.ReLU()
)
def forward(self, y):
y = y.view(-1, 1) +1e-8
# y = torch.exp(y.view(-1, 1))
return self.main(y)
if __name__ == "__main__":
net = ResNet34_embed(dim_embed=128).cuda()
x = torch.randn(16,NC,IMG_SIZE,IMG_SIZE).cuda()
out, features = net(x)
print(out.size())
print(features.size())
net_y2h = model_y2h()
| 6,302 | 32.526596 | 222 | py |
cGAN-KD | cGAN-KD-main/UTKFace/models/autoencoder_extract.py | import torch
from torch import nn
class encoder_extract(nn.Module):
def __init__(self, dim_bottleneck=64*64*3, ch=32):
super(encoder_extract, self).__init__()
self.ch = ch
self.dim_bottleneck = dim_bottleneck
self.conv = nn.Sequential(
nn.Conv2d(3, ch, kernel_size=4, stride=2, padding=1), #h=h/2; 32
nn.BatchNorm2d(ch),
nn.ReLU(True),
nn.Conv2d(ch, ch, kernel_size=3, stride=1, padding=1), #h=h
nn.BatchNorm2d(ch),
nn.ReLU(True),
nn.Conv2d(ch, ch, kernel_size=4, stride=2, padding=1), #h=h/2; 16
nn.BatchNorm2d(ch),
nn.ReLU(True),
nn.Conv2d(ch, ch*2, kernel_size=3, stride=1, padding=1), #h=h
nn.BatchNorm2d(ch*2),
nn.ReLU(True),
nn.Conv2d(ch*2, ch*2, kernel_size=4, stride=2, padding=1), #h=h/2; 8
nn.BatchNorm2d(ch*2),
nn.ReLU(True),
nn.Conv2d(ch*2, ch*4, kernel_size=3, stride=1, padding=1), #h=h
nn.BatchNorm2d(ch*4),
nn.ReLU(True),
nn.Conv2d(ch*4, ch*4, kernel_size=4, stride=2, padding=1), #h=h/2; 4
nn.BatchNorm2d(ch*4),
nn.ReLU(True),
nn.Conv2d(ch*4, ch*4, kernel_size=3, stride=1, padding=1), #h=h; 4
nn.BatchNorm2d(ch*4),
nn.ReLU(True),
)
self.fc = nn.Sequential(
nn.Linear(ch*4*4*4, dim_bottleneck),
nn.ReLU()
)
def forward(self, x):
feature = self.conv(x)
feature = feature.view(-1, self.ch*4*4*4)
feature = self.fc(feature)
return feature
class decoder_extract(nn.Module):
def __init__(self, dim_bottleneck=64*64*3, ch=32):
super(decoder_extract, self).__init__()
self.ch = ch
self.dim_bottleneck = dim_bottleneck
self.fc = nn.Sequential(
nn.Linear(dim_bottleneck, ch*4*4*4),
nn.BatchNorm1d(ch*4*4*4),
nn.ReLU(True)
)
self.deconv = nn.Sequential(
nn.ConvTranspose2d(ch*4, ch*4, kernel_size=4, stride=2, padding=1), #h=2h; 8
nn.BatchNorm2d(ch*4),
nn.ReLU(True),
nn.Conv2d(ch*4, ch*2, kernel_size=3, stride=1, padding=1), #h=h
nn.BatchNorm2d(ch*2),
nn.ReLU(True),
nn.ConvTranspose2d(ch*2, ch*2, kernel_size=4, stride=2, padding=1), #h=2h; 16
nn.BatchNorm2d(ch*2),
nn.ReLU(True),
nn.Conv2d(ch*2, ch, kernel_size=3, stride=1, padding=1), #h=h
nn.BatchNorm2d(ch),
nn.ReLU(True),
nn.ConvTranspose2d(ch, ch, kernel_size=4, stride=2, padding=1), #h=2h; 32
nn.BatchNorm2d(ch),
nn.ReLU(True),
nn.Conv2d(ch, ch, kernel_size=3, stride=1, padding=1), #h=h
nn.BatchNorm2d(ch),
nn.ReLU(True),
nn.ConvTranspose2d(ch, ch, kernel_size=4, stride=2, padding=1), #h=2h; 64
nn.BatchNorm2d(ch),
nn.ReLU(True),
nn.Conv2d(ch, ch, kernel_size=3, stride=1, padding=1), #h=h
nn.BatchNorm2d(ch),
nn.ReLU(True),
nn.Conv2d(ch, 3, kernel_size=1, stride=1, padding=0), #h=h
nn.Tanh()
)
def forward(self, feature):
feature = self.fc(feature)
feature = feature.view(-1, self.ch*4, 4, 4)
out = self.deconv(feature)
return out
class decoder_predict(nn.Module):
def __init__(self, dim_bottleneck=64*64*3):
super(decoder_predict, self).__init__()
self.dim_bottleneck = dim_bottleneck
self.predict = nn.Sequential(
nn.Linear(self.dim_bottleneck, 1024),
nn.BatchNorm1d(1024),
nn.ReLU(),
nn.Linear(1024, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 256),
nn.BatchNorm1d(256),
nn.ReLU(),
nn.Linear(256, 1),
nn.ReLU(),
)
def forward(self, feature):
return self.predict(feature)
if __name__=="__main__":
#test
net_encoder = encoder_extract(dim_bottleneck=64*64*3, ch=64).cuda()
net_decoder = decoder_extract(dim_bottleneck=64*64*3, ch=64).cuda()
net_predict = decoder_predict(dim_bottleneck=64*64*3).cuda()
net_encoder = nn.DataParallel(net_encoder)
net_decoder = nn.DataParallel(net_decoder)
net_predict = nn.DataParallel(net_predict)
x = torch.randn(10, 3, 64,64).cuda()
f = net_encoder(x)
xh = net_decoder(f)
yh = net_predict(f)
print(f.size())
print(xh.size())
print(yh.size())
def get_parameter_number(net):
total_num = sum(p.numel() for p in net.parameters())
trainable_num = sum(p.numel() for p in net.parameters() if p.requires_grad)
return {'Total': total_num, 'Trainable': trainable_num}
print(get_parameter_number(net_encoder))
print(get_parameter_number(net_decoder))
print(get_parameter_number(net_predict))
| 5,073 | 30.320988 | 89 | py |
cGAN-KD | cGAN-KD-main/UTKFace/models/resnet.py | from __future__ import absolute_import
'''Resnet for cifar dataset.
Ported form
https://github.com/facebook/fb.resnet.torch
and
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
(c) YANG, Wei
'''
import torch.nn as nn
import torch.nn.functional as F
import math
__all__ = ['resnet']
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class ResNet(nn.Module):
def __init__(self, depth, num_filters, block_name='BasicBlock'):
super(ResNet, self).__init__()
# Model type specifies number of layers for CIFAR-10 model
if block_name.lower() == 'basicblock':
assert (depth - 2) % 6 == 0, 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202'
n = (depth - 2) // 6
block = BasicBlock
elif block_name.lower() == 'bottleneck':
assert (depth - 2) % 9 == 0, 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199'
n = (depth - 2) // 9
block = Bottleneck
else:
raise ValueError('block_name shoule be Basicblock or Bottleneck')
self.inplanes = num_filters[0]
self.conv1 = nn.Conv2d(3, num_filters[0], kernel_size=3, stride=1, padding=1, bias=False) #h=h
self.bn1 = nn.BatchNorm2d(num_filters[0])
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, num_filters[1], n, stride=2)
self.layer2 = self._make_layer(block, num_filters[2], n, stride=2)
self.layer3 = self._make_layer(block, num_filters[3], n, stride=2)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Sequential(
nn.Linear(num_filters[3] * block.expansion, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 1),
nn.ReLU(),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = list([])
layers.append(block(self.inplanes, planes, stride, downsample, is_last=(blocks == 1)))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, is_last=(i == blocks-1)))
return nn.Sequential(*layers)
def forward(self, x):
out = self.relu(self.bn1(self.conv1(x)))
out, _ = self.layer1(out) # 32x32
out, _ = self.layer2(out) # 16x16
out, _ = self.layer3(out) # 8x8
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
def resnet8(**kwargs):
return ResNet(8, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet14(**kwargs):
return ResNet(14, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet20(**kwargs):
return ResNet(20, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet32(**kwargs):
return ResNet(32, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet44(**kwargs):
return ResNet(44, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet56(**kwargs):
return ResNet(56, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet110(**kwargs):
return ResNet(110, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet8x4(**kwargs):
return ResNet(8, [32, 64, 128, 256], 'basicblock', **kwargs)
def resnet32x4(**kwargs):
return ResNet(32, [32, 64, 128, 256], 'basicblock', **kwargs)
if __name__ == '__main__':
import torch
x = torch.randn(2, 3, 64, 64)
net = resnet8x4()
out = net(x)
print(out.size())
| 6,698 | 29.175676 | 116 | py |
cGAN-KD | cGAN-KD-main/UTKFace/models/vgg.py | '''VGG11/13/16/19 in Pytorch.'''
import torch
import torch.nn as nn
from torch.autograd import Variable
cfg = {
'VGG8': [64, 'M', 128, 'M', 256, 'M', 512, 'M', 512, 'M'],
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
NC=3
class vgg(nn.Module):
def __init__(self, vgg_name):
super(vgg, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.fc = nn.Sequential(
nn.Linear(4*4*128, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 1),
nn.ReLU(),
)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = NC
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
def vgg8():
model = vgg('VGG8')
return model
def vgg11():
model = vgg('VGG11')
return model
def vgg13():
model = vgg('VGG13')
return model
def vgg16():
model = vgg('VGG16')
return model
def vgg19():
model = vgg('VGG19')
return model
if __name__ == "__main__":
net = vgg8().cuda()
net = nn.DataParallel(net)
x = torch.randn(4,3,64,64)
print(net(x).size())
| 2,119 | 24.853659 | 117 | py |
cGAN-KD | cGAN-KD-main/UTKFace/models/shufflenetv1.py | '''ShuffleNet in PyTorch.
See the paper "ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices" for more details.
To fit 128x128 images, I modified the first conv layer and add an extra max_pool2d after it (Following Table 5 of "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design")
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N,C,H,W = x.size()
g = self.groups
return x.view(N,g,C//g,H,W).permute(0,2,1,3,4).reshape(N,C,H,W)
class Bottleneck(nn.Module):
def __init__(self, in_planes, out_planes, stride, groups, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.stride = stride
mid_planes = int(out_planes/4)
g = 1 if in_planes == 24 else groups
self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes)
self.shuffle1 = ShuffleBlock(groups=g)
self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False)
self.bn2 = nn.BatchNorm2d(mid_planes)
self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 2:
self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.shuffle1(out)
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
res = self.shortcut(x)
preact = torch.cat([out, res], 1) if self.stride == 2 else out+res
out = F.relu(preact)
# out = F.relu(torch.cat([out, res], 1)) if self.stride == 2 else F.relu(out+res)
return out
class ShuffleNet(nn.Module):
def __init__(self, cfg):
super(ShuffleNet, self).__init__()
out_planes = cfg['out_planes']
num_blocks = cfg['num_blocks']
groups = cfg['groups']
# self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False) #original
self.conv1 = nn.Conv2d(3, 24, kernel_size=4, padding=1, stride=2, bias=False) #h=h//2
self.bn1 = nn.BatchNorm2d(24)
self.in_planes = 24
self.layer1 = self._make_layer(out_planes[0], num_blocks[0], groups)
self.layer2 = self._make_layer(out_planes[1], num_blocks[1], groups)
self.layer3 = self._make_layer(out_planes[2], num_blocks[2], groups)
self.linear = nn.Sequential(
nn.Linear(out_planes[2], 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 1),
nn.ReLU(),
)
def _make_layer(self, out_planes, num_blocks, groups):
layers = []
for i in range(num_blocks):
stride = 2 if i == 0 else 1
cat_planes = self.in_planes if i == 0 else 0
layers.append(Bottleneck(self.in_planes, out_planes-cat_planes,
stride=stride,
groups=groups,
is_last=(i == num_blocks - 1)))
self.in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x, is_feat=False, preact=False):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ShuffleV1(**kwargs):
cfg = {
'out_planes': [240, 480, 960],
'num_blocks': [4, 8, 4],
'groups': 3
}
return ShuffleNet(cfg, **kwargs)
if __name__ == '__main__':
x = torch.randn(2, 3, 64, 64)
net = ShuffleV1()
import time
a = time.time()
out = net(x, is_feat=True, preact=True)
b = time.time()
print(b - a)
print(out.shape)
| 4,440 | 33.968504 | 190 | py |
cGAN-KD | cGAN-KD-main/UTKFace/models/SNGAN.py | '''
https://github.com/christiancosgrove/pytorch-spectral-normalization-gan
chainer: https://github.com/pfnet-research/sngan_projection
'''
# ResNet generator and discriminator
import torch
from torch import nn
import torch.nn.functional as F
# from spectral_normalization import SpectralNorm
import numpy as np
from torch.nn.utils import spectral_norm
class ConditionalBatchNorm2d(nn.Module):
def __init__(self, num_features, dim_embed):
super().__init__()
self.num_features = num_features
self.bn = nn.BatchNorm2d(num_features, affine=False)
self.embed_gamma = nn.Linear(dim_embed, num_features, bias=False)
self.embed_beta = nn.Linear(dim_embed, num_features, bias=False)
def forward(self, x, y):
out = self.bn(x)
gamma = self.embed_gamma(y).view(-1, self.num_features, 1, 1)
beta = self.embed_beta(y).view(-1, self.num_features, 1, 1)
out = out + out*gamma + beta
return out
class ResBlockGenerator(nn.Module):
def __init__(self, in_channels, out_channels, dim_embed, bias=True):
super(ResBlockGenerator, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, padding=1, bias=bias)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, padding=1, bias=bias)
nn.init.xavier_uniform_(self.conv1.weight.data, np.sqrt(2))
nn.init.xavier_uniform_(self.conv2.weight.data, np.sqrt(2))
self.condbn1 = ConditionalBatchNorm2d(in_channels, dim_embed)
self.condbn2 = ConditionalBatchNorm2d(out_channels, dim_embed)
self.relu = nn.ReLU()
self.upsample = nn.Upsample(scale_factor=2)
# unconditional case
self.model = nn.Sequential(
nn.BatchNorm2d(in_channels),
nn.ReLU(),
nn.Upsample(scale_factor=2),
self.conv1,
nn.BatchNorm2d(out_channels),
nn.ReLU(),
self.conv2
)
self.bypass_conv = nn.Conv2d(in_channels,out_channels, 1, 1, padding=0, bias=bias) #h=h
nn.init.xavier_uniform_(self.bypass_conv.weight.data, 1.0)
self.bypass = nn.Sequential(
nn.Upsample(scale_factor=2),
self.bypass_conv,
)
def forward(self, x, y):
if y is not None:
out = self.condbn1(x, y)
out = self.relu(out)
out = self.upsample(out)
out = self.conv1(out)
out = self.condbn2(out, y)
out = self.relu(out)
out = self.conv2(out)
out = out + self.bypass(x)
else:
out = self.model(x) + self.bypass(x)
return out
class ResBlockDiscriminator(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(ResBlockDiscriminator, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, padding=1, bias=True)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, padding=1, bias=True)
nn.init.xavier_uniform_(self.conv1.weight.data, np.sqrt(2))
nn.init.xavier_uniform_(self.conv2.weight.data, np.sqrt(2))
if stride == 1:
self.model = nn.Sequential(
nn.ReLU(),
spectral_norm(self.conv1),
nn.ReLU(),
spectral_norm(self.conv2)
)
else:
self.model = nn.Sequential(
nn.ReLU(),
spectral_norm(self.conv1),
nn.ReLU(),
spectral_norm(self.conv2),
nn.AvgPool2d(2, stride=stride, padding=0)
)
self.bypass_conv = nn.Conv2d(in_channels,out_channels, 1, 1, padding=0, bias=True)
nn.init.xavier_uniform_(self.bypass_conv.weight.data, 1.0)
if stride != 1:
self.bypass = nn.Sequential(
spectral_norm(self.bypass_conv),
nn.AvgPool2d(2, stride=stride, padding=0)
)
else:
self.bypass = nn.Sequential(
spectral_norm(self.bypass_conv),
)
def forward(self, x):
return self.model(x) + self.bypass(x)
# special ResBlock just for the first layer of the discriminator
class FirstResBlockDiscriminator(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(FirstResBlockDiscriminator, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, padding=1, bias=True)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, padding=1, bias=True)
self.bypass_conv = nn.Conv2d(in_channels, out_channels, 1, 1, padding=0, bias=True)
nn.init.xavier_uniform_(self.conv1.weight.data, np.sqrt(2))
nn.init.xavier_uniform_(self.conv2.weight.data, np.sqrt(2))
nn.init.xavier_uniform_(self.bypass_conv.weight.data, 1.0)
# we don't want to apply ReLU activation to raw image before convolution transformation.
self.model = nn.Sequential(
spectral_norm(self.conv1),
nn.ReLU(),
spectral_norm(self.conv2),
nn.AvgPool2d(2)
)
self.bypass = nn.Sequential(
nn.AvgPool2d(2),
spectral_norm(self.bypass_conv),
)
def forward(self, x):
return self.model(x) + self.bypass(x)
class SNGAN_Generator(nn.Module):
def __init__(self, z_dim=256, nc=3, gene_ch=128, dim_embed=128):
super(SNGAN_Generator, self).__init__()
self.z_dim = z_dim
self.dim_embed = dim_embed
self.gene_ch = gene_ch
self.dense = nn.Linear(self.z_dim, 4 * 4 * gene_ch*16, bias=True)
self.final = nn.Conv2d(gene_ch, nc, 3, stride=1, padding=1, bias=True)
nn.init.xavier_uniform_(self.dense.weight.data, 1.)
nn.init.xavier_uniform_(self.final.weight.data, 1.)
self.genblock0 = ResBlockGenerator(gene_ch*16, gene_ch*8, dim_embed=dim_embed) #4--->8
self.genblock1 = ResBlockGenerator(gene_ch*8, gene_ch*4, dim_embed=dim_embed) #8--->16
self.genblock2 = ResBlockGenerator(gene_ch*4, gene_ch*2, dim_embed=dim_embed) #16--->32
self.genblock3 = ResBlockGenerator(gene_ch*2, gene_ch, dim_embed=dim_embed) #32--->64
self.final = nn.Sequential(
nn.BatchNorm2d(gene_ch),
nn.ReLU(),
self.final,
nn.Tanh()
)
def forward(self, z, y): #y is embedded in the feature space
z = z.view(z.size(0), z.size(1))
out = self.dense(z)
out = out.view(-1, self.gene_ch*16, 4, 4)
out = self.genblock0(out, y)
out = self.genblock1(out, y)
out = self.genblock2(out, y)
out = self.genblock3(out, y)
out = self.final(out)
return out
class SNGAN_Discriminator(nn.Module):
def __init__(self, nc=3, disc_ch=128, dim_embed=128):
super(SNGAN_Discriminator, self).__init__()
self.dim_embed = dim_embed
self.disc_ch = disc_ch
self.discblock1 = nn.Sequential(
FirstResBlockDiscriminator(nc, disc_ch, stride=2), #64--->32
ResBlockDiscriminator(disc_ch, disc_ch*2, stride=2), #32--->16
ResBlockDiscriminator(disc_ch*2, disc_ch*4, stride=2), #16--->8
)
self.discblock2 = ResBlockDiscriminator(disc_ch*4, disc_ch*8, stride=2) #8--->4
self.discblock3 = nn.Sequential(
ResBlockDiscriminator(disc_ch*8, disc_ch*16, stride=1), #4--->4;
nn.ReLU(),
)
self.linear1 = nn.Linear(disc_ch*16*4*4, 1, bias=True)
nn.init.xavier_uniform_(self.linear1.weight.data, 1.)
self.linear1 = spectral_norm(self.linear1)
self.linear2 = nn.Linear(self.dim_embed, disc_ch*16*4*4, bias=False)
nn.init.xavier_uniform_(self.linear2.weight.data, 1.)
self.linear2 = spectral_norm(self.linear2)
def forward(self, x, y):
output = self.discblock1(x)
output = self.discblock2(output)
output = self.discblock3(output)
output = output.view(-1, self.disc_ch*16*4*4)
output_y = torch.sum(output*self.linear2(y), 1, keepdim=True)
output = self.linear1(output) + output_y
return output.view(-1, 1)
if __name__ == "__main__":
netG = SNGAN_Generator(z_dim=256, gene_ch=128, dim_embed=128).cuda()
netD = SNGAN_Discriminator(disc_ch=128, dim_embed=128).cuda()
# netG = nn.DataParallel(netG)
# netD = nn.DataParallel(netD)
N=4
z = torch.randn(N, 256).cuda()
y = torch.randn(N, 128).cuda()
x = netG(z,y)
o = netD(x,y)
print(x.size())
print(o.size())
| 8,633 | 34.240816 | 96 | py |
cGAN-KD | cGAN-KD-main/UTKFace/models/densenet.py | '''DenseNet in PyTorch.
To fit 128x128 images, I modified the first conv layer and add an extra max_pool2d after it.
'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
NC=3
IMG_SIZE = 64
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(4*growth_rate)
self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat([out,x], 1)
return out
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv(F.relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = 2*growth_rate
# self.conv1 = nn.Conv2d(NC, num_planes, kernel_size=3, padding=1, bias=False)
self.conv1 = nn.Sequential(
nn.Conv2d(NC, num_planes, kernel_size=4, padding=1, stride=2, bias=False),
nn.BatchNorm2d(num_planes),
nn.ReLU(True),
# nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
num_planes += nblocks[0]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
num_planes += nblocks[1]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
num_planes += nblocks[2]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
num_planes += nblocks[3]*growth_rate
self.bn = nn.BatchNorm2d(num_planes)
self.linear = nn.Sequential(
nn.Linear(num_planes, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 1),
nn.ReLU(),
)
def _make_dense_layers(self, block, in_planes, nblock):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
out = F.avg_pool2d(F.relu(self.bn(out)), 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DenseNet121():
return DenseNet(Bottleneck, [6,12,24,16], growth_rate=32)
def DenseNet169():
return DenseNet(Bottleneck, [6,12,32,32], growth_rate=32)
def DenseNet201():
return DenseNet(Bottleneck, [6,12,48,32], growth_rate=32)
def DenseNet161():
return DenseNet(Bottleneck, [6,12,36,24], growth_rate=48)
def test_densenet():
net = DenseNet121()
x = torch.randn(2,NC,IMG_SIZE,IMG_SIZE)
y = net(Variable(x))
print(y.shape)
if __name__ == "__main__":
test_densenet()
| 4,332 | 31.335821 | 96 | py |
cGAN-KD | cGAN-KD-main/UTKFace/models/resnetv2.py | '''
codes are based on
@article{
zhang2018mixup,
title={mixup: Beyond Empirical Risk Minimization},
author={Hongyi Zhang, Moustapha Cisse, Yann N. Dauphin, David Lopez-Paz},
journal={International Conference on Learning Representations},
year={2018},
url={https://openreview.net/forum?id=r1Ddp1-Rb},
}
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
NC = 3
IMG_SIZE = 64
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, nc=NC):
super(ResNet, self).__init__()
self.in_planes = 64
self.block1 = nn.Sequential(
nn.Conv2d(nc, 64, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
self._make_layer(block, 64, num_blocks[0], stride=2), # h=h/2 32
)
self.block2 = self._make_layer(block, 128, num_blocks[1], stride=2) # h=h/2 16
self.block3 = self._make_layer(block, 256, num_blocks[2], stride=2) # h=h/2 8
self.block4 = self._make_layer(block, 512, num_blocks[3], stride=2) # h=h/2 4
self.pool = nn.AvgPool2d(kernel_size=4)
linear_layers = [
nn.Linear(512*block.expansion, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 1),
nn.ReLU(),
]
self.linear = nn.Sequential(*linear_layers)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
ft1 = self.block1(x)
ft2 = self.block2(ft1)
ft3 = self.block3(ft2)
ft4 = self.block4(ft3)
out = self.pool(ft4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2,2,2,2])
def ResNet34():
return ResNet(BasicBlock, [3,4,6,3])
def ResNet50():
return ResNet(Bottleneck, [3,4,6,3])
def ResNet101():
return ResNet(Bottleneck, [3,4,23,3])
def ResNet152():
return ResNet(Bottleneck, [3,8,36,3])
if __name__ == "__main__":
net = ResNet34().cuda()
net = nn.DataParallel(net)
x = torch.randn(16,NC,IMG_SIZE,IMG_SIZE).cuda()
out = net(x)
print(out.size())
| 4,623 | 30.455782 | 102 | py |
cGAN-KD | cGAN-KD-main/UTKFace/models/cDR_MLP.py | '''
Conditional Density Ration Estimation via Multilayer Perceptron
Multilayer Perceptron : trained to model density ratio in a feature space
Its input is the output of a pretrained Deep CNN, say ResNet-34
'''
import torch
import torch.nn as nn
IMG_SIZE=64
NC=3
cfg = {"MLP3": [512,256,128],
"MLP5": [1024,512,256,128,64]}
class cDR_MLP(nn.Module):
def __init__(self, MLP_name, p_dropout=0.5, init_in_dim = IMG_SIZE**2*NC, dim_embed = 128):
super(cDR_MLP, self).__init__()
self.init_in_dim = init_in_dim
self.p_dropout=p_dropout
self.dim_embed = dim_embed
layers = self._make_layers(cfg[MLP_name])
layers += [nn.Linear(cfg[MLP_name][-1], 1)]
layers += [nn.ReLU()]
self.main = nn.Sequential(*layers)
def _make_layers(self, cfg):
layers = []
in_dim = self.init_in_dim #initial input dimension
for x in cfg:
if in_dim == self.init_in_dim:
layers += [nn.Linear(in_dim+self.dim_embed, x),
nn.GroupNorm(8, x),
nn.ReLU(inplace=True),
nn.Dropout(self.p_dropout) # do we really need dropout?
]
else:
layers += [nn.Linear(in_dim, x),
nn.GroupNorm(8, x),
nn.ReLU(inplace=True),
nn.Dropout(self.p_dropout) # do we really need dropout?
]
in_dim = x
return layers
def forward(self, x, labels):
x = torch.cat((labels, x), -1)
out = self.main(x)
return out
if __name__ == "__main__":
net = cDR_MLP('MLP5').cuda()
x = torch.randn((5,IMG_SIZE**2*NC)).cuda()
labels = torch.randn((5, 128)).cuda()
out = net(x, labels)
print(out.size())
def get_parameter_number(net):
total_num = sum(p.numel() for p in net.parameters())
trainable_num = sum(p.numel() for p in net.parameters() if p.requires_grad)
return {'Total': total_num, 'Trainable': trainable_num}
print(get_parameter_number(net)) | 2,157 | 29.394366 | 95 | py |
cGAN-KD | cGAN-KD-main/UTKFace/models/mobilenet.py | import torch
from torch import nn
# from .utils import load_state_dict_from_url
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
__all__ = ['MobileNetV2', 'mobilenet_v2']
model_urls = {
'mobilenet_v2': 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth',
}
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class ConvBNReLU(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1, norm_layer=None):
padding = (kernel_size - 1) // 2
if norm_layer is None:
norm_layer = nn.BatchNorm2d
super(ConvBNReLU, self).__init__(
nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
norm_layer(out_planes),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio, norm_layer=None):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
if norm_layer is None:
norm_layer = nn.BatchNorm2d
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1, norm_layer=norm_layer))
layers.extend([
# dw
ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim, norm_layer=norm_layer),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
norm_layer(oup),
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(self,
num_classes=1,
width_mult=1.0,
inverted_residual_setting=None,
round_nearest=8,
block=None,
norm_layer=None):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
block: Module specifying inverted residual building block for mobilenet
norm_layer: Module specifying the normalization layer to use
"""
super(MobileNetV2, self).__init__()
if block is None:
block = InvertedResidual
if norm_layer is None:
norm_layer = nn.BatchNorm2d
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [ConvBNReLU(3, input_channel, stride=2, norm_layer=norm_layer)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t, norm_layer=norm_layer))
input_channel = output_channel
# building last several layers
features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1, norm_layer=norm_layer))
# make it nn.Sequential
self.features = nn.Sequential(*features)
# building classifier
self.classifier = nn.Sequential(
nn.Linear(self.last_channel, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 1),
nn.ReLU(),
)
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def _forward_impl(self, x):
# This exists since TorchScript doesn't support inheritance, so the superclass method
# (this one) needs to have a name other than `forward` that can be accessed in a subclass
x = self.features(x)
# Cannot use "squeeze" as batch-size can be 1 => must use reshape with x.shape[0]
x = nn.functional.adaptive_avg_pool2d(x, 1).reshape(x.shape[0], -1)
x = self.classifier(x)
return x
def forward(self, x):
return self._forward_impl(x)
def mobilenet_v2(pretrained=False, progress=True, **kwargs):
"""
Constructs a MobileNetV2 architecture from
`"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = MobileNetV2(**kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'],
progress=progress)
model.load_state_dict(state_dict)
return model
if __name__ == "__main__":
net = mobilenet_v2().cuda()
x = torch.randn(16,3,64,64).cuda()
out = net(x)
print(out.size())
def count_parameters(module):
print('Number of parameters: {}'.format(sum([p.data.nelement() for p in module.parameters()])))
count_parameters(net)
| 7,609 | 35.238095 | 116 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.