profaker commited on
Commit
3581316
·
verified ·
1 Parent(s): 4f12f76

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -8
app.py CHANGED
@@ -4,7 +4,7 @@ import diffusers
4
  from diffusers.models import AutoencoderKL
5
 
6
  vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse")
7
- pipeline = diffusers.DiffusionPipeline.from_pretrained("SG161222/Realistic_Vision_V5.1_noVAE", vae=vae).to("cuda")
8
 
9
  def read_content(file_path: str) -> str:
10
  """read the content of target file
@@ -15,7 +15,18 @@ def read_content(file_path: str) -> str:
15
  return content
16
 
17
 
18
- def predict(prompt, negative_prompt, guidance_scale, num_inference_steps, scheduler, lora, lora_weight):
 
 
 
 
 
 
 
 
 
 
 
19
  scheduler_class_name = scheduler.split("-")[0]
20
  add_kwargs = {}
21
  if len(scheduler.split("-")) > 1:
@@ -23,13 +34,17 @@ def predict(prompt, negative_prompt, guidance_scale, num_inference_steps, schedu
23
  if len(scheduler.split("-")) > 2:
24
  add_kwargs["algorithm_type"] = "sde-dpmsolver++"
25
  scheduler = getattr(diffusers, scheduler_class_name)
 
26
  pipeline.scheduler = scheduler.from_pretrained("emilianJR/epiCRealism", subfolder="scheduler", **add_kwargs)
 
27
  if lora == "add_detail":
28
  lora = "profaker/add_detail_lora"
29
  if lora == "nursing_job":
30
  lora = "profaker/Nursing_job_lora"
31
  if lora == "nsfw_POV":
32
  lora = "profaker/NSFW_POV_lora"
 
 
33
  if lora == "None":
34
  images = pipeline(
35
  prompt=prompt,
@@ -45,6 +60,7 @@ def predict(prompt, negative_prompt, guidance_scale, num_inference_steps, schedu
45
  return images
46
 
47
  pipeline.load_lora_weights(lora)
 
48
  images = pipeline(
49
  prompt=prompt,
50
  negative_prompt=negative_prompt,
@@ -52,6 +68,7 @@ def predict(prompt, negative_prompt, guidance_scale, num_inference_steps, schedu
52
  guidance_scale=guidance_scale,
53
  cross_attention_kwargs={"scale": lora_weight}
54
  ).images[0]
 
55
  print("Prompt", prompt)
56
  print("Negative", negative_prompt)
57
  print("Steps", num_inference_steps)
@@ -103,6 +120,9 @@ with image_blocks as demo:
103
  with gr.Row(equal_height=True):
104
  negative_prompt = gr.Textbox(label="negative_prompt", placeholder="Your negative prompt",
105
  info="what you don't want to see in the image")
 
 
 
106
  with gr.Row(equal_height=True):
107
  schedulers = ["DEISMultistepScheduler", "HeunDiscreteScheduler", "EulerDiscreteScheduler",
108
  "DPMSolverMultistepScheduler", "DPMSolverMultistepScheduler-Karras",
@@ -110,18 +130,18 @@ with image_blocks as demo:
110
  scheduler = gr.Dropdown(label="Schedulers", choices=schedulers,
111
  value="DPMSolverMultistepScheduler-Karras")
112
  with gr.Row(equal_height=True):
113
- lora = ['None','add_detail', 'nursing_job', 'nsfw_POV']
114
- lora = gr.Dropdown(label='Lora', choices=lora, value="None")
115
- lora_weight = [-1, -0.5, 0, 0.5, 1]
116
- lora_weight = gr.Dropdown(label="Lora Weights", choices=lora_weight, value=0.5)
117
  with gr.Row(equal_height=True):
118
  btn = gr.Button("Generate", elem_id="run_button")
119
 
120
  with gr.Column():
121
  image_out = gr.Image(label="Output", elem_id="output-img", height=512, width=512)
122
- btn.click(fn=predict, inputs=[prompt, negative_prompt, guidance_scale, steps, scheduler, lora, lora_weight],
123
  outputs=[image_out], api_name='run')
124
- prompt.submit(fn=predict, inputs=[prompt, negative_prompt, guidance_scale, steps, scheduler, lora, lora_weight],
125
  outputs=[image_out])
126
 
127
  image_blocks.queue(max_size=25, api_open=True).launch(show_api=True)
 
4
  from diffusers.models import AutoencoderKL
5
 
6
  vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse")
7
+
8
 
9
  def read_content(file_path: str) -> str:
10
  """read the content of target file
 
15
  return content
16
 
17
 
18
+ def predict(prompt, negative_prompt, guidance_scale, num_inference_steps,model, scheduler, lora, lora_weight):
19
+ pipeline = diffusers.DiffusionPipeline.from_pretrained("SG161222/Realistic_Vision_V6.0_B1_noVAE", vae=vae).to("cuda")
20
+ pipeline.safety_checker = lambda images, **kwargs: (images, [False] * len(images))
21
+ if model == "Realistic_V5.1":
22
+ pipeline = diffusers.DiffusionPipeline.from_pretrained("SG161222/Realistic_Vision_V5.1_noVAE", vae=vae).to("cuda")
23
+ if model == "Realistic_V5.0":
24
+ pipeline = diffusers.DiffusionPipeline.from_pretrained("SG161222/Realistic_Vision_V5.0_noVAE", vae=vae).to("cuda")
25
+ pipeline.safety_checker = lambda images, **kwargs: (images, [False] * len(images))
26
+ if model == "EpicRealism":
27
+ pipeline = diffusers.DiffusionPipeline.from_pretrained("emilianJR/epiCRealism", vae=vae).to("cuda")
28
+ pipeline.safety_checker = lambda images, **kwargs: (images, [False] * len(images))
29
+
30
  scheduler_class_name = scheduler.split("-")[0]
31
  add_kwargs = {}
32
  if len(scheduler.split("-")) > 1:
 
34
  if len(scheduler.split("-")) > 2:
35
  add_kwargs["algorithm_type"] = "sde-dpmsolver++"
36
  scheduler = getattr(diffusers, scheduler_class_name)
37
+
38
  pipeline.scheduler = scheduler.from_pretrained("emilianJR/epiCRealism", subfolder="scheduler", **add_kwargs)
39
+
40
  if lora == "add_detail":
41
  lora = "profaker/add_detail_lora"
42
  if lora == "nursing_job":
43
  lora = "profaker/Nursing_job_lora"
44
  if lora == "nsfw_POV":
45
  lora = "profaker/NSFW_POV_lora"
46
+ if lora == "nayanthara":
47
+ lora = "profaker/Naya_lora"
48
  if lora == "None":
49
  images = pipeline(
50
  prompt=prompt,
 
60
  return images
61
 
62
  pipeline.load_lora_weights(lora)
63
+
64
  images = pipeline(
65
  prompt=prompt,
66
  negative_prompt=negative_prompt,
 
68
  guidance_scale=guidance_scale,
69
  cross_attention_kwargs={"scale": lora_weight}
70
  ).images[0]
71
+
72
  print("Prompt", prompt)
73
  print("Negative", negative_prompt)
74
  print("Steps", num_inference_steps)
 
120
  with gr.Row(equal_height=True):
121
  negative_prompt = gr.Textbox(label="negative_prompt", placeholder="Your negative prompt",
122
  info="what you don't want to see in the image")
123
+ with gr.Row(equal_height=True):
124
+ models = ['Realistic_V6.0','Realistic_V5.1','Realistic_V5.0','EpicRealism']
125
+ model = gr.Dropdown(label="Models",choices=models,value="Realistic_V6.0")
126
  with gr.Row(equal_height=True):
127
  schedulers = ["DEISMultistepScheduler", "HeunDiscreteScheduler", "EulerDiscreteScheduler",
128
  "DPMSolverMultistepScheduler", "DPMSolverMultistepScheduler-Karras",
 
130
  scheduler = gr.Dropdown(label="Schedulers", choices=schedulers,
131
  value="DPMSolverMultistepScheduler-Karras")
132
  with gr.Row(equal_height=True):
133
+ loras = ['None','add_detail', 'nursing_job', 'nsfw_POV','nayanthara']
134
+ lora = gr.Dropdown(label='Lora', choices=loras, value="None")
135
+ lora_weights = [-1, -0.5, 0, 0.5, 1]
136
+ lora_weight = gr.Dropdown(label="Lora Weights", choices=lora_weights, value=0.5)
137
  with gr.Row(equal_height=True):
138
  btn = gr.Button("Generate", elem_id="run_button")
139
 
140
  with gr.Column():
141
  image_out = gr.Image(label="Output", elem_id="output-img", height=512, width=512)
142
+ btn.click(fn=predict, inputs=[prompt, negative_prompt, guidance_scale, steps, model,scheduler, lora, lora_weight],
143
  outputs=[image_out], api_name='run')
144
+ prompt.submit(fn=predict, inputs=[prompt, negative_prompt, guidance_scale, steps, model,scheduler, lora, lora_weight],
145
  outputs=[image_out])
146
 
147
  image_blocks.queue(max_size=25, api_open=True).launch(show_api=True)