fix: move to cuda in generate function
Browse files
app.py
CHANGED
|
@@ -59,7 +59,6 @@ model = get_model(args, ingr_vocab_size, instrs_vocab_size)
|
|
| 59 |
model.load_state_dict(torch.load(
|
| 60 |
hf_hub_download(REPO_ID, 'data/modelbest.ckpt', token=HF_TOKEN), map_location=map_loc)
|
| 61 |
)
|
| 62 |
-
model = model.to(device)
|
| 63 |
model.eval()
|
| 64 |
model.ingrs_only = False
|
| 65 |
model.recipe_only = False
|
|
@@ -78,12 +77,13 @@ temperature = 1.0
|
|
| 78 |
numgens = 1
|
| 79 |
|
| 80 |
# StableDiffusion
|
| 81 |
-
pipe = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4')
|
| 82 |
|
| 83 |
@spaces.GPU
|
| 84 |
def generate_image(input_img):
|
| 85 |
|
| 86 |
# Inverse Cooking
|
|
|
|
| 87 |
image_tensor = transform(input_img).unsqueeze(0).to(device)
|
| 88 |
|
| 89 |
for i in range(numgens):
|
|
@@ -107,6 +107,7 @@ def generate_image(input_img):
|
|
| 107 |
# {"prompt": prompt, "ingredients": ingredients, "ingr_ids": ingr_ids}
|
| 108 |
|
| 109 |
# StableDiffusion
|
|
|
|
| 110 |
new_image = pipe(prompt).images[0]
|
| 111 |
return new_image
|
| 112 |
|
|
|
|
| 59 |
model.load_state_dict(torch.load(
|
| 60 |
hf_hub_download(REPO_ID, 'data/modelbest.ckpt', token=HF_TOKEN), map_location=map_loc)
|
| 61 |
)
|
|
|
|
| 62 |
model.eval()
|
| 63 |
model.ingrs_only = False
|
| 64 |
model.recipe_only = False
|
|
|
|
| 77 |
numgens = 1
|
| 78 |
|
| 79 |
# StableDiffusion
|
| 80 |
+
pipe = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4')
|
| 81 |
|
| 82 |
@spaces.GPU
|
| 83 |
def generate_image(input_img):
|
| 84 |
|
| 85 |
# Inverse Cooking
|
| 86 |
+
model = model.to(device)
|
| 87 |
image_tensor = transform(input_img).unsqueeze(0).to(device)
|
| 88 |
|
| 89 |
for i in range(numgens):
|
|
|
|
| 107 |
# {"prompt": prompt, "ingredients": ingredients, "ingr_ids": ingr_ids}
|
| 108 |
|
| 109 |
# StableDiffusion
|
| 110 |
+
pipe = pipe.to(device)
|
| 111 |
new_image = pipe(prompt).images[0]
|
| 112 |
return new_image
|
| 113 |
|