last
Browse files
app.py
CHANGED
|
@@ -133,8 +133,9 @@ num_images_per_prompt = st.slider('Image Count to be Produced:', min_value=1, ma
|
|
| 133 |
# use seed with torch generator
|
| 134 |
#torch.manual_seed(0)
|
| 135 |
# seed
|
| 136 |
-
|
| 137 |
-
|
|
|
|
| 138 |
|
| 139 |
#generator = torch.Generator(device="cuda").manual_seed(0)
|
| 140 |
run_model_button = st.button("Run Model")
|
|
@@ -143,9 +144,9 @@ run_model_button = st.button("Run Model")
|
|
| 143 |
def initialize_pipe():
|
| 144 |
pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting",
|
| 145 |
revision="fp16",
|
| 146 |
-
torch_dtype=torch.
|
| 147 |
safety_checker = None,
|
| 148 |
-
requires_safety_checker = False)
|
| 149 |
|
| 150 |
pipe.safety_checker = None
|
| 151 |
pipe.requires_safety_checker = False
|
|
@@ -190,10 +191,10 @@ if run_model_button == True and input_prompt is not None :
|
|
| 190 |
|
| 191 |
pipe = initialize_pipe()
|
| 192 |
|
| 193 |
-
output_height = 128
|
| 194 |
-
output_width = 128
|
| 195 |
|
| 196 |
-
x5 = pipe(image=dif_image, mask_image=inverted_image, num_inference_steps=num_inference_steps,
|
| 197 |
num_images_per_prompt=num_images_per_prompt, prompt=prompt, negative_prompt=input_negative_prompt,
|
| 198 |
height=output_height, width=output_width).images
|
| 199 |
|
|
|
|
| 133 |
# use seed with torch generator
|
| 134 |
#torch.manual_seed(0)
|
| 135 |
# seed
|
| 136 |
+
|
| 137 |
+
seed = st.slider('Seed:', min_value=0, max_value=100, value=1)
|
| 138 |
+
generator = [torch.Generator(device="cuda").manual_seed(seed) for i in range(num_images_per_prompt)]
|
| 139 |
|
| 140 |
#generator = torch.Generator(device="cuda").manual_seed(0)
|
| 141 |
run_model_button = st.button("Run Model")
|
|
|
|
| 144 |
def initialize_pipe():
|
| 145 |
pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting",
|
| 146 |
revision="fp16",
|
| 147 |
+
torch_dtype=torch.float16, #16 for gpu
|
| 148 |
safety_checker = None,
|
| 149 |
+
requires_safety_checker = False).to("cuda")
|
| 150 |
|
| 151 |
pipe.safety_checker = None
|
| 152 |
pipe.requires_safety_checker = False
|
|
|
|
| 191 |
|
| 192 |
pipe = initialize_pipe()
|
| 193 |
|
| 194 |
+
#output_height = 128
|
| 195 |
+
#output_width = 128
|
| 196 |
|
| 197 |
+
x5 = pipe(image=dif_image, mask_image=inverted_image, num_inference_steps=num_inference_steps,generator= generator,
|
| 198 |
num_images_per_prompt=num_images_per_prompt, prompt=prompt, negative_prompt=input_negative_prompt,
|
| 199 |
height=output_height, width=output_width).images
|
| 200 |
|