Update app.py
Browse files
app.py
CHANGED
|
@@ -118,7 +118,7 @@ thick = debias(thick, "Heavy_Makeup", df, pinverse, device)
|
|
| 118 |
@torch.no_grad()
|
| 119 |
@spaces.GPU
|
| 120 |
def sample_then_run(net):
|
| 121 |
-
|
| 122 |
# get mean and standard deviation for each principal component
|
| 123 |
m = torch.mean(proj, 0)
|
| 124 |
standev = torch.std(proj, 0)
|
|
@@ -142,6 +142,7 @@ def sample_then_run(net):
|
|
| 142 |
@torch.no_grad()
|
| 143 |
@spaces.GPU()
|
| 144 |
def inference(net, prompt, negative_prompt, guidance_scale, ddim_steps, seed):
|
|
|
|
| 145 |
mean.to(device)
|
| 146 |
std.to(device)
|
| 147 |
v.to(device)
|
|
@@ -206,12 +207,11 @@ def inference(net, prompt, negative_prompt, guidance_scale, ddim_steps, seed):
|
|
| 206 |
@torch.no_grad()
|
| 207 |
@spaces.GPU()
|
| 208 |
def edit_inference(net, prompt, negative_prompt, guidance_scale, ddim_steps, seed, start_noise, a1, a2, a3, a4):
|
|
|
|
| 209 |
mean.to(device)
|
| 210 |
std.to(device)
|
| 211 |
v.to(device)
|
| 212 |
-
print(young.device)
|
| 213 |
young.to(device)
|
| 214 |
-
print(young.device)
|
| 215 |
pointy.to(device)
|
| 216 |
wavy.to(device)
|
| 217 |
thick.to(device)
|
|
@@ -234,9 +234,6 @@ def edit_inference(net, prompt, negative_prompt, guidance_scale, ddim_steps, see
|
|
| 234 |
pcs_original = weights.shape[1]
|
| 235 |
pcs_edits = young.shape[1]
|
| 236 |
padding = torch.zeros((1,pcs_original-pcs_edits)).to(device)
|
| 237 |
-
print(device)
|
| 238 |
-
print(padding.device)
|
| 239 |
-
print(young.device)
|
| 240 |
young_pad = torch.cat((young, padding), 1)
|
| 241 |
pointy_pad = torch.cat((pointy, padding), 1)
|
| 242 |
wavy_pad = torch.cat((wavy, padding), 1)
|
|
|
|
| 118 |
@torch.no_grad()
|
| 119 |
@spaces.GPU
|
| 120 |
def sample_then_run(net):
|
| 121 |
+
device = "cuda"
|
| 122 |
# get mean and standard deviation for each principal component
|
| 123 |
m = torch.mean(proj, 0)
|
| 124 |
standev = torch.std(proj, 0)
|
|
|
|
| 142 |
@torch.no_grad()
|
| 143 |
@spaces.GPU()
|
| 144 |
def inference(net, prompt, negative_prompt, guidance_scale, ddim_steps, seed):
|
| 145 |
+
device = "cuda"
|
| 146 |
mean.to(device)
|
| 147 |
std.to(device)
|
| 148 |
v.to(device)
|
|
|
|
| 207 |
@torch.no_grad()
|
| 208 |
@spaces.GPU()
|
| 209 |
def edit_inference(net, prompt, negative_prompt, guidance_scale, ddim_steps, seed, start_noise, a1, a2, a3, a4):
|
| 210 |
+
device = "cuda"
|
| 211 |
mean.to(device)
|
| 212 |
std.to(device)
|
| 213 |
v.to(device)
|
|
|
|
| 214 |
young.to(device)
|
|
|
|
| 215 |
pointy.to(device)
|
| 216 |
wavy.to(device)
|
| 217 |
thick.to(device)
|
|
|
|
| 234 |
pcs_original = weights.shape[1]
|
| 235 |
pcs_edits = young.shape[1]
|
| 236 |
padding = torch.zeros((1,pcs_original-pcs_edits)).to(device)
|
|
|
|
|
|
|
|
|
|
| 237 |
young_pad = torch.cat((young, padding), 1)
|
| 238 |
pointy_pad = torch.cat((pointy, padding), 1)
|
| 239 |
wavy_pad = torch.cat((wavy, padding), 1)
|