Spaces:
Runtime error
Runtime error
Upload sd_controller.py
Browse files- sd/sd_controller.py +40 -40
sd/sd_controller.py
CHANGED
|
@@ -29,46 +29,46 @@ class Controller():
|
|
| 29 |
scheduler=self.scheduler)
|
| 30 |
|
| 31 |
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
|
| 73 |
|
| 74 |
|
|
|
|
| 29 |
scheduler=self.scheduler)
|
| 30 |
|
| 31 |
|
| 32 |
+
@spaces.GPU
|
| 33 |
+
def get_first_result(self, img, prompt, negative_prompt,
|
| 34 |
+
controlnet_scale=0.5, strength=1.0,n_steps=30,eta=1.0):
|
| 35 |
+
|
| 36 |
+
substrate, resized_image = sketch_process(input_image)
|
| 37 |
+
prompt=prompt_preprocess(prompt)
|
| 38 |
+
|
| 39 |
+
result=self.first_pipe(image=substrate,
|
| 40 |
+
control_image=resized_image,
|
| 41 |
+
strength=strength,
|
| 42 |
+
prompt=prompt,
|
| 43 |
+
negative_prompt = negative_prompt,
|
| 44 |
+
controlnet_conditioning_scale=float(controlnet_scale),
|
| 45 |
+
generator=torch.manual_seed(0),
|
| 46 |
+
num_inference_steps=n_steps,
|
| 47 |
+
eta=eta)
|
| 48 |
+
|
| 49 |
+
return result.images[0]
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
@spaces.GPU
|
| 53 |
+
def get_second_result(self, img, prompt, negative_prompt,
|
| 54 |
+
g_scale=7.5, n_steps=25,
|
| 55 |
+
adapter_scale=0.9, adapter_factor=1.0):
|
| 56 |
+
|
| 57 |
+
preprocessed_img=self.detector(img,
|
| 58 |
+
detect_resolution=1024,
|
| 59 |
+
image_resolution=1024,
|
| 60 |
+
apply_filter=True).convert("L")
|
| 61 |
+
|
| 62 |
+
result=self.second_pipe(prompt=prompt,
|
| 63 |
+
negative_prompt=negative_prompt,
|
| 64 |
+
image=preprocessed_img,
|
| 65 |
+
guidance_scale=g_scale,
|
| 66 |
+
num_inference_steps=n_steps,
|
| 67 |
+
adapter_conditioning_scale=adapter_scale,
|
| 68 |
+
adapter_conditioning_factor=adapter_factor,
|
| 69 |
+
generator = torch.manual_seed(42))
|
| 70 |
+
|
| 71 |
+
return result.images[0]
|
| 72 |
|
| 73 |
|
| 74 |
|