| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | import tempfile |
| | import unittest |
| |
|
| | import torch |
| |
|
| | from diffusers import UNet2DConditionModel |
| | from diffusers.training_utils import EMAModel |
| | from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device |
| |
|
| |
|
| | enable_full_determinism() |
| |
|
| |
|
| | class EMAModelTests(unittest.TestCase): |
| | model_id = "hf-internal-testing/tiny-stable-diffusion-pipe" |
| | batch_size = 1 |
| | prompt_length = 77 |
| | text_encoder_hidden_dim = 32 |
| | num_in_channels = 4 |
| | latent_height = latent_width = 64 |
| | generator = torch.manual_seed(0) |
| |
|
| | def get_models(self, decay=0.9999): |
| | unet = UNet2DConditionModel.from_pretrained(self.model_id, subfolder="unet") |
| | unet = unet.to(torch_device) |
| | ema_unet = EMAModel(unet.parameters(), decay=decay, model_cls=UNet2DConditionModel, model_config=unet.config) |
| | return unet, ema_unet |
| |
|
| | def get_dummy_inputs(self): |
| | noisy_latents = torch.randn( |
| | self.batch_size, self.num_in_channels, self.latent_height, self.latent_width, generator=self.generator |
| | ).to(torch_device) |
| | timesteps = torch.randint(0, 1000, size=(self.batch_size,), generator=self.generator).to(torch_device) |
| | encoder_hidden_states = torch.randn( |
| | self.batch_size, self.prompt_length, self.text_encoder_hidden_dim, generator=self.generator |
| | ).to(torch_device) |
| | return noisy_latents, timesteps, encoder_hidden_states |
| |
|
| | def simulate_backprop(self, unet): |
| | updated_state_dict = {} |
| | for k, param in unet.state_dict().items(): |
| | updated_param = torch.randn_like(param) + (param * torch.randn_like(param)) |
| | updated_state_dict.update({k: updated_param}) |
| | unet.load_state_dict(updated_state_dict) |
| | return unet |
| |
|
| | def test_optimization_steps_updated(self): |
| | unet, ema_unet = self.get_models() |
| | |
| | ema_unet.step(unet.parameters()) |
| | assert ema_unet.optimization_step == 1 |
| |
|
| | |
| | for _ in range(2): |
| | ema_unet.step(unet.parameters()) |
| | assert ema_unet.optimization_step == 3 |
| |
|
| | def test_shadow_params_not_updated(self): |
| | unet, ema_unet = self.get_models() |
| | |
| | |
| | |
| | ema_unet.step(unet.parameters()) |
| | orig_params = list(unet.parameters()) |
| | for s_param, param in zip(ema_unet.shadow_params, orig_params): |
| | assert torch.allclose(s_param, param) |
| |
|
| | |
| | |
| | for _ in range(4): |
| | ema_unet.step(unet.parameters()) |
| | for s_param, param in zip(ema_unet.shadow_params, orig_params): |
| | assert torch.allclose(s_param, param) |
| |
|
| | def test_shadow_params_updated(self): |
| | unet, ema_unet = self.get_models() |
| | |
| | |
| | |
| | unet_pseudo_updated_step_one = self.simulate_backprop(unet) |
| |
|
| | |
| | ema_unet.step(unet_pseudo_updated_step_one.parameters()) |
| |
|
| | |
| | orig_params = list(unet_pseudo_updated_step_one.parameters()) |
| | for s_param, param in zip(ema_unet.shadow_params, orig_params): |
| | assert ~torch.allclose(s_param, param) |
| |
|
| | |
| | for _ in range(4): |
| | ema_unet.step(unet.parameters()) |
| | for s_param, param in zip(ema_unet.shadow_params, orig_params): |
| | assert ~torch.allclose(s_param, param) |
| |
|
| | def test_consecutive_shadow_params_updated(self): |
| | |
| | |
| | unet, ema_unet = self.get_models() |
| |
|
| | |
| | unet_step_one = self.simulate_backprop(unet) |
| | ema_unet.step(unet_step_one.parameters()) |
| | step_one_shadow_params = ema_unet.shadow_params |
| |
|
| | |
| | unet_step_two = self.simulate_backprop(unet_step_one) |
| | ema_unet.step(unet_step_two.parameters()) |
| | step_two_shadow_params = ema_unet.shadow_params |
| |
|
| | for step_one, step_two in zip(step_one_shadow_params, step_two_shadow_params): |
| | assert ~torch.allclose(step_one, step_two) |
| |
|
| | def test_zero_decay(self): |
| | |
| | |
| | |
| | unet, ema_unet = self.get_models(decay=0.0) |
| | unet_step_one = self.simulate_backprop(unet) |
| | ema_unet.step(unet_step_one.parameters()) |
| | step_one_shadow_params = ema_unet.shadow_params |
| |
|
| | unet_step_two = self.simulate_backprop(unet_step_one) |
| | ema_unet.step(unet_step_two.parameters()) |
| | step_two_shadow_params = ema_unet.shadow_params |
| |
|
| | for step_one, step_two in zip(step_one_shadow_params, step_two_shadow_params): |
| | assert torch.allclose(step_one, step_two) |
| |
|
| | @skip_mps |
| | def test_serialization(self): |
| | unet, ema_unet = self.get_models() |
| | noisy_latents, timesteps, encoder_hidden_states = self.get_dummy_inputs() |
| |
|
| | with tempfile.TemporaryDirectory() as tmpdir: |
| | ema_unet.save_pretrained(tmpdir) |
| | loaded_unet = UNet2DConditionModel.from_pretrained(tmpdir, model_cls=UNet2DConditionModel) |
| | loaded_unet = loaded_unet.to(unet.device) |
| |
|
| | |
| | output = unet(noisy_latents, timesteps, encoder_hidden_states).sample |
| | output_loaded = loaded_unet(noisy_latents, timesteps, encoder_hidden_states).sample |
| |
|
| | assert torch.allclose(output, output_loaded, atol=1e-4) |
| |
|
| |
|
| | class EMAModelTestsForeach(unittest.TestCase): |
| | model_id = "hf-internal-testing/tiny-stable-diffusion-pipe" |
| | batch_size = 1 |
| | prompt_length = 77 |
| | text_encoder_hidden_dim = 32 |
| | num_in_channels = 4 |
| | latent_height = latent_width = 64 |
| | generator = torch.manual_seed(0) |
| |
|
| | def get_models(self, decay=0.9999): |
| | unet = UNet2DConditionModel.from_pretrained(self.model_id, subfolder="unet") |
| | unet = unet.to(torch_device) |
| | ema_unet = EMAModel( |
| | unet.parameters(), decay=decay, model_cls=UNet2DConditionModel, model_config=unet.config, foreach=True |
| | ) |
| | return unet, ema_unet |
| |
|
| | def get_dummy_inputs(self): |
| | noisy_latents = torch.randn( |
| | self.batch_size, self.num_in_channels, self.latent_height, self.latent_width, generator=self.generator |
| | ).to(torch_device) |
| | timesteps = torch.randint(0, 1000, size=(self.batch_size,), generator=self.generator).to(torch_device) |
| | encoder_hidden_states = torch.randn( |
| | self.batch_size, self.prompt_length, self.text_encoder_hidden_dim, generator=self.generator |
| | ).to(torch_device) |
| | return noisy_latents, timesteps, encoder_hidden_states |
| |
|
| | def simulate_backprop(self, unet): |
| | updated_state_dict = {} |
| | for k, param in unet.state_dict().items(): |
| | updated_param = torch.randn_like(param) + (param * torch.randn_like(param)) |
| | updated_state_dict.update({k: updated_param}) |
| | unet.load_state_dict(updated_state_dict) |
| | return unet |
| |
|
| | def test_optimization_steps_updated(self): |
| | unet, ema_unet = self.get_models() |
| | |
| | ema_unet.step(unet.parameters()) |
| | assert ema_unet.optimization_step == 1 |
| |
|
| | |
| | for _ in range(2): |
| | ema_unet.step(unet.parameters()) |
| | assert ema_unet.optimization_step == 3 |
| |
|
| | def test_shadow_params_not_updated(self): |
| | unet, ema_unet = self.get_models() |
| | |
| | |
| | |
| | ema_unet.step(unet.parameters()) |
| | orig_params = list(unet.parameters()) |
| | for s_param, param in zip(ema_unet.shadow_params, orig_params): |
| | assert torch.allclose(s_param, param) |
| |
|
| | |
| | |
| | for _ in range(4): |
| | ema_unet.step(unet.parameters()) |
| | for s_param, param in zip(ema_unet.shadow_params, orig_params): |
| | assert torch.allclose(s_param, param) |
| |
|
| | def test_shadow_params_updated(self): |
| | unet, ema_unet = self.get_models() |
| | |
| | |
| | |
| | unet_pseudo_updated_step_one = self.simulate_backprop(unet) |
| |
|
| | |
| | ema_unet.step(unet_pseudo_updated_step_one.parameters()) |
| |
|
| | |
| | orig_params = list(unet_pseudo_updated_step_one.parameters()) |
| | for s_param, param in zip(ema_unet.shadow_params, orig_params): |
| | assert ~torch.allclose(s_param, param) |
| |
|
| | |
| | for _ in range(4): |
| | ema_unet.step(unet.parameters()) |
| | for s_param, param in zip(ema_unet.shadow_params, orig_params): |
| | assert ~torch.allclose(s_param, param) |
| |
|
| | def test_consecutive_shadow_params_updated(self): |
| | |
| | |
| | unet, ema_unet = self.get_models() |
| |
|
| | |
| | unet_step_one = self.simulate_backprop(unet) |
| | ema_unet.step(unet_step_one.parameters()) |
| | step_one_shadow_params = ema_unet.shadow_params |
| |
|
| | |
| | unet_step_two = self.simulate_backprop(unet_step_one) |
| | ema_unet.step(unet_step_two.parameters()) |
| | step_two_shadow_params = ema_unet.shadow_params |
| |
|
| | for step_one, step_two in zip(step_one_shadow_params, step_two_shadow_params): |
| | assert ~torch.allclose(step_one, step_two) |
| |
|
| | def test_zero_decay(self): |
| | |
| | |
| | |
| | unet, ema_unet = self.get_models(decay=0.0) |
| | unet_step_one = self.simulate_backprop(unet) |
| | ema_unet.step(unet_step_one.parameters()) |
| | step_one_shadow_params = ema_unet.shadow_params |
| |
|
| | unet_step_two = self.simulate_backprop(unet_step_one) |
| | ema_unet.step(unet_step_two.parameters()) |
| | step_two_shadow_params = ema_unet.shadow_params |
| |
|
| | for step_one, step_two in zip(step_one_shadow_params, step_two_shadow_params): |
| | assert torch.allclose(step_one, step_two) |
| |
|
| | @skip_mps |
| | def test_serialization(self): |
| | unet, ema_unet = self.get_models() |
| | noisy_latents, timesteps, encoder_hidden_states = self.get_dummy_inputs() |
| |
|
| | with tempfile.TemporaryDirectory() as tmpdir: |
| | ema_unet.save_pretrained(tmpdir) |
| | loaded_unet = UNet2DConditionModel.from_pretrained(tmpdir, model_cls=UNet2DConditionModel) |
| | loaded_unet = loaded_unet.to(unet.device) |
| |
|
| | |
| | output = unet(noisy_latents, timesteps, encoder_hidden_states).sample |
| | output_loaded = loaded_unet(noisy_latents, timesteps, encoder_hidden_states).sample |
| |
|
| | assert torch.allclose(output, output_loaded, atol=1e-4) |
| |
|