Update README.md
Browse files
README.md
CHANGED
|
@@ -36,15 +36,12 @@ from diffusers.utils import load_image
|
|
| 36 |
blip_diffusion_pipe= BlipDiffusionPipeline.from_pretrained('ayushtues/blipdiffusion')
|
| 37 |
blip_diffusion_pipe.to('cuda')
|
| 38 |
|
| 39 |
-
cond_subject =
|
| 40 |
-
tgt_subject =
|
| 41 |
-
text_prompt_input =
|
| 42 |
|
| 43 |
|
| 44 |
cond_image = load_image("https://huggingface.co/datasets/ayushtues/blipdiffusion_images/resolve/main/dog.jpg")
|
| 45 |
-
num_output = 1
|
| 46 |
-
|
| 47 |
-
iter_seed = 88888
|
| 48 |
guidance_scale = 7.5
|
| 49 |
num_inference_steps = 50
|
| 50 |
negative_prompt = "over-exposure, under-exposure, saturated, duplicate, out of frame, lowres, cropped, worst quality, low quality, jpeg artifacts, morbid, mutilated, out of frame, ugly, bad anatomy, bad proportions, deformed, blurry, duplicate"
|
|
@@ -76,18 +73,15 @@ from controlnet_aux import CannyDetector
|
|
| 76 |
blip_diffusion_pipe= BlipDiffusionControlNetPipeline.from_pretrained("ayushtues/blipdiffusion-controlnet")
|
| 77 |
blip_diffusion_pipe.to('cuda')
|
| 78 |
|
| 79 |
-
style_subject =
|
| 80 |
-
tgt_subject =
|
| 81 |
-
text_prompt =
|
| 82 |
cldm_cond_image = load_image("https://huggingface.co/datasets/ayushtues/blipdiffusion_images/resolve/main/kettle.jpg").resize((512, 512))
|
| 83 |
canny = CannyDetector()
|
| 84 |
cldm_cond_image = canny(cldm_cond_image, 30, 70, output_type='pil')
|
| 85 |
-
cldm_cond_image = [cldm_cond_image ]
|
| 86 |
|
| 87 |
style_image = load_image("https://huggingface.co/datasets/ayushtues/blipdiffusion_images/resolve/main/flower.jpg")
|
| 88 |
|
| 89 |
-
|
| 90 |
-
num_output = 1
|
| 91 |
guidance_scale = 7.5
|
| 92 |
num_inference_steps = 50
|
| 93 |
negative_prompt = "over-exposure, under-exposure, saturated, duplicate, out of frame, lowres, cropped, worst quality, low quality, jpeg artifacts, morbid, mutilated, out of frame, ugly, bad anatomy, bad proportions, deformed, blurry, duplicate"
|
|
@@ -122,19 +116,15 @@ controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-scribble"
|
|
| 122 |
blip_diffusion_pipe.controlnet = controlnet
|
| 123 |
blip_diffusion_pipe.to('cuda')
|
| 124 |
|
| 125 |
-
style_subject =
|
| 126 |
-
tgt_subject =
|
| 127 |
-
text_prompt =
|
| 128 |
cldm_cond_image = load_image("https://huggingface.co/lllyasviel/sd-controlnet-scribble/resolve/main/images/bag.png" ).resize((512, 512))
|
| 129 |
hed = HEDdetector.from_pretrained("lllyasviel/Annotators")
|
| 130 |
cldm_cond_image = hed(cldm_cond_image)
|
| 131 |
-
cldm_cond_image = [cldm_cond_image ]
|
| 132 |
|
| 133 |
style_image = load_image("https://huggingface.co/datasets/ayushtues/blipdiffusion_images/resolve/main/flower.jpg")
|
| 134 |
|
| 135 |
-
|
| 136 |
-
num_output = 1
|
| 137 |
-
iter_seed = 88888
|
| 138 |
guidance_scale = 7.5
|
| 139 |
num_inference_steps = 50
|
| 140 |
negative_prompt = "over-exposure, under-exposure, saturated, duplicate, out of frame, lowres, cropped, worst quality, low quality, jpeg artifacts, morbid, mutilated, out of frame, ugly, bad anatomy, bad proportions, deformed, blurry, duplicate"
|
|
|
|
| 36 |
blip_diffusion_pipe= BlipDiffusionPipeline.from_pretrained('ayushtues/blipdiffusion')
|
| 37 |
blip_diffusion_pipe.to('cuda')
|
| 38 |
|
| 39 |
+
cond_subject = "dog"
|
| 40 |
+
tgt_subject = "dog"
|
| 41 |
+
text_prompt_input = "swimming underwater"
|
| 42 |
|
| 43 |
|
| 44 |
cond_image = load_image("https://huggingface.co/datasets/ayushtues/blipdiffusion_images/resolve/main/dog.jpg")
|
|
|
|
|
|
|
|
|
|
| 45 |
guidance_scale = 7.5
|
| 46 |
num_inference_steps = 50
|
| 47 |
negative_prompt = "over-exposure, under-exposure, saturated, duplicate, out of frame, lowres, cropped, worst quality, low quality, jpeg artifacts, morbid, mutilated, out of frame, ugly, bad anatomy, bad proportions, deformed, blurry, duplicate"
|
|
|
|
| 73 |
blip_diffusion_pipe= BlipDiffusionControlNetPipeline.from_pretrained("ayushtues/blipdiffusion-controlnet")
|
| 74 |
blip_diffusion_pipe.to('cuda')
|
| 75 |
|
| 76 |
+
style_subject = "flower" # subject that defines the style
|
| 77 |
+
tgt_subject = "teapot" # subject to generate.
|
| 78 |
+
text_prompt = "on a marble table"
|
| 79 |
cldm_cond_image = load_image("https://huggingface.co/datasets/ayushtues/blipdiffusion_images/resolve/main/kettle.jpg").resize((512, 512))
|
| 80 |
canny = CannyDetector()
|
| 81 |
cldm_cond_image = canny(cldm_cond_image, 30, 70, output_type='pil')
|
|
|
|
| 82 |
|
| 83 |
style_image = load_image("https://huggingface.co/datasets/ayushtues/blipdiffusion_images/resolve/main/flower.jpg")
|
| 84 |
|
|
|
|
|
|
|
| 85 |
guidance_scale = 7.5
|
| 86 |
num_inference_steps = 50
|
| 87 |
negative_prompt = "over-exposure, under-exposure, saturated, duplicate, out of frame, lowres, cropped, worst quality, low quality, jpeg artifacts, morbid, mutilated, out of frame, ugly, bad anatomy, bad proportions, deformed, blurry, duplicate"
|
|
|
|
| 116 |
blip_diffusion_pipe.controlnet = controlnet
|
| 117 |
blip_diffusion_pipe.to('cuda')
|
| 118 |
|
| 119 |
+
style_subject = "flower" # subject that defines the style
|
| 120 |
+
tgt_subject = "bag" # subject to generate.
|
| 121 |
+
text_prompt = "on a table"
|
| 122 |
cldm_cond_image = load_image("https://huggingface.co/lllyasviel/sd-controlnet-scribble/resolve/main/images/bag.png" ).resize((512, 512))
|
| 123 |
hed = HEDdetector.from_pretrained("lllyasviel/Annotators")
|
| 124 |
cldm_cond_image = hed(cldm_cond_image)
|
|
|
|
| 125 |
|
| 126 |
style_image = load_image("https://huggingface.co/datasets/ayushtues/blipdiffusion_images/resolve/main/flower.jpg")
|
| 127 |
|
|
|
|
|
|
|
|
|
|
| 128 |
guidance_scale = 7.5
|
| 129 |
num_inference_steps = 50
|
| 130 |
negative_prompt = "over-exposure, under-exposure, saturated, duplicate, out of frame, lowres, cropped, worst quality, low quality, jpeg artifacts, morbid, mutilated, out of frame, ugly, bad anatomy, bad proportions, deformed, blurry, duplicate"
|