Update app.py
Browse files
app.py
CHANGED
|
@@ -881,7 +881,7 @@ num_inference_steps = 50 #@param # Number of denoising steps
|
|
| 881 |
guidance_scale = 8 #@param # Scale for classifier-free guidance
|
| 882 |
generator = torch.manual_seed(0) # Seed generator to create the inital latent noise
|
| 883 |
batch_size = 1
|
| 884 |
-
|
| 885 |
|
| 886 |
# Prep text
|
| 887 |
text_input = tokenizer([prompt], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
|
|
@@ -936,7 +936,7 @@ for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps))
|
|
| 936 |
denoised_images = vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 # range (0, 1)
|
| 937 |
|
| 938 |
# Calculate loss
|
| 939 |
-
loss = orange_loss(denoised_images) *
|
| 940 |
|
| 941 |
# Occasionally print it out
|
| 942 |
if i%10==0:
|
|
@@ -963,7 +963,7 @@ num_inference_steps = 50 #@param # Number of denoising steps
|
|
| 963 |
guidance_scale = 8 #@param # Scale for classifier-free guidance
|
| 964 |
generator = torch.manual_seed(77) # Seed generator to create the inital latent noise
|
| 965 |
batch_size = 1
|
| 966 |
-
|
| 967 |
|
| 968 |
# Prep text
|
| 969 |
text_input = tokenizer([prompt], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
|
|
@@ -1018,7 +1018,7 @@ for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps))
|
|
| 1018 |
denoised_images = vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 # range (0, 1)
|
| 1019 |
|
| 1020 |
# Calculate loss
|
| 1021 |
-
loss = orange_loss(denoised_images) *
|
| 1022 |
|
| 1023 |
# Occasionally print it out
|
| 1024 |
if i%10==0:
|
|
@@ -1045,7 +1045,7 @@ num_inference_steps = 50 #@param # Number of denoising steps
|
|
| 1045 |
guidance_scale = 8 #@param # Scale for classifier-free guidance
|
| 1046 |
generator = torch.manual_seed(42) # Seed generator to create the inital latent noise
|
| 1047 |
batch_size = 1
|
| 1048 |
-
|
| 1049 |
|
| 1050 |
# Prep text
|
| 1051 |
text_input = tokenizer([prompt], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
|
|
@@ -1100,7 +1100,7 @@ for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps))
|
|
| 1100 |
denoised_images = vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 # range (0, 1)
|
| 1101 |
|
| 1102 |
# Calculate loss
|
| 1103 |
-
loss = orange_loss(denoised_images) *
|
| 1104 |
|
| 1105 |
# Occasionally print it out
|
| 1106 |
if i%10==0:
|
|
|
|
| 881 |
guidance_scale = 8 #@param # Scale for classifier-free guidance
|
| 882 |
generator = torch.manual_seed(0) # Seed generator to create the inital latent noise
|
| 883 |
batch_size = 1
|
| 884 |
+
orange_loss_scale = 200 #@param
|
| 885 |
|
| 886 |
# Prep text
|
| 887 |
text_input = tokenizer([prompt], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
|
|
|
|
| 936 |
denoised_images = vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 # range (0, 1)
|
| 937 |
|
| 938 |
# Calculate loss
|
| 939 |
+
loss = orange_loss(denoised_images) * orange_loss_scale
|
| 940 |
|
| 941 |
# Occasionally print it out
|
| 942 |
if i%10==0:
|
|
|
|
| 963 |
guidance_scale = 8 #@param # Scale for classifier-free guidance
|
| 964 |
generator = torch.manual_seed(77) # Seed generator to create the inital latent noise
|
| 965 |
batch_size = 1
|
| 966 |
+
orange_loss_scale = 200 #@param
|
| 967 |
|
| 968 |
# Prep text
|
| 969 |
text_input = tokenizer([prompt], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
|
|
|
|
| 1018 |
denoised_images = vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 # range (0, 1)
|
| 1019 |
|
| 1020 |
# Calculate loss
|
| 1021 |
+
loss = orange_loss(denoised_images) * orange_loss_scale
|
| 1022 |
|
| 1023 |
# Occasionally print it out
|
| 1024 |
if i%10==0:
|
|
|
|
| 1045 |
guidance_scale = 8 #@param # Scale for classifier-free guidance
|
| 1046 |
generator = torch.manual_seed(42) # Seed generator to create the inital latent noise
|
| 1047 |
batch_size = 1
|
| 1048 |
+
orange_loss_scale = 200 #@param
|
| 1049 |
|
| 1050 |
# Prep text
|
| 1051 |
text_input = tokenizer([prompt], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
|
|
|
|
| 1100 |
denoised_images = vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 # range (0, 1)
|
| 1101 |
|
| 1102 |
# Calculate loss
|
| 1103 |
+
loss = orange_loss(denoised_images) * orange_loss_scale
|
| 1104 |
|
| 1105 |
# Occasionally print it out
|
| 1106 |
if i%10==0:
|