xywwww commited on
Commit
f2e5893
·
verified ·
1 Parent(s): 0c4b897

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -3
app.py CHANGED
@@ -1,11 +1,17 @@
1
  import torch
2
  import random
 
3
  import gradio as gr
4
- from diffusers import StableDiffusionControlNetPipeline
5
  from annotator.util import resize_image, HWC3
 
6
 
7
  # Load the pipeline
8
- pipe = StableDiffusionControlNetPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to("cuda")
 
 
 
 
9
 
10
  def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode, strength, scale, seed, eta, low_threshold, high_threshold):
11
  with torch.no_grad():
@@ -13,9 +19,10 @@ def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resoluti
13
 
14
  if seed == -1:
15
  seed = random.randint(0, 65535)
16
- generator = torch.manual_seed(seed)
17
 
18
  # Generate images using the pipeline
 
19
  images = pipe(prompt=prompt + ', ' + a_prompt, num_inference_steps=ddim_steps, guidance_scale=scale, generator=generator, num_images_per_prompt=num_samples).images
20
 
21
  results = [np.array(image) for image in images]
 
1
  import torch
2
  import random
3
+ import numpy as np
4
  import gradio as gr
5
+ from pytorch_lightning import seed_everything
6
  from annotator.util import resize_image, HWC3
7
+ from diffusers import StableDiffusionControlNetPipeline
8
 
9
  # Load the pipeline
10
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
11
+ "CompVis/stable-diffusion-v1-4",
12
+ controlnet="path/to/controlnet/model",
13
+ image_encoder="path/to/image_encoder/model"
14
+ ).to("cuda")
15
 
16
  def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode, strength, scale, seed, eta, low_threshold, high_threshold):
17
  with torch.no_grad():
 
19
 
20
  if seed == -1:
21
  seed = random.randint(0, 65535)
22
+ seed_everything(seed)
23
 
24
  # Generate images using the pipeline
25
+ generator = torch.Generator("cuda").manual_seed(seed)
26
  images = pipe(prompt=prompt + ', ' + a_prompt, num_inference_steps=ddim_steps, guidance_scale=scale, generator=generator, num_images_per_prompt=num_samples).images
27
 
28
  results = [np.array(image) for image in images]