akiortagem commited on
Commit
bac7c86
·
verified ·
1 Parent(s): 432ea72

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -20
app.py CHANGED
@@ -39,29 +39,43 @@ def generate_images(
39
  seed: int,
40
  num_images: int,
41
  ):
42
- # Seed handling (maps to KSampler seed/randomize)
 
 
 
 
 
 
 
 
 
 
43
  if seed < 0:
44
- generator = torch.Generator(device=device)
45
- seed_value = torch.randint(0, 2**63 - 1, (1,), device=device).item()
46
- generator = generator.manual_seed(seed_value)
47
  else:
48
- generator = torch.Generator(device=device).manual_seed(seed)
49
-
50
- images = pipe(
51
- prompt=positive,
52
- negative_prompt=negative or None,
53
- width=width,
54
- height=height,
55
- num_inference_steps=steps,
56
- guidance_scale=cfg,
57
- num_images_per_prompt=num_images,
58
- generator=generator,
59
- ).images
60
-
61
- # Return list of PIL images (maps to SaveImage → output images)
 
 
 
 
 
62
  return images
63
 
64
 
 
65
  # ---------------------------------------------------------------------
66
  # Gradio UI (inputs correspond to Comfy node widgets_values)
67
  # ---------------------------------------------------------------------
@@ -72,12 +86,12 @@ with gr.Blocks() as demo:
72
  with gr.Column():
73
  positive = gr.Textbox(
74
  label="Positive Prompt",
75
- value="Positive Prompts", # from CLIP Text Encode (Positive Prompt)
76
  lines=5,
77
  )
78
  negative = gr.Textbox(
79
  label="Negative Prompt",
80
- value="Negative Prompts", # from CLIP Text Encode (Negative Prompt)
81
  lines=4,
82
  )
83
 
@@ -131,8 +145,12 @@ with gr.Blocks() as demo:
131
  show_label=True,
132
  columns=3,
133
  height=768,
 
 
 
134
  )
135
 
 
136
  run_btn.click(
137
  fn=generate_images,
138
  inputs=[positive, negative, width, height, steps, cfg, seed, num_images],
 
39
  seed: int,
40
  num_images: int,
41
  ):
42
+ run_device = "cuda" if torch.cuda.is_available() else "cpu"
43
+ pipe.to(run_device)
44
+
45
+ num_images = int(num_images)
46
+ width = int(width)
47
+ height = int(height)
48
+ steps = int(steps)
49
+
50
+ images = []
51
+
52
+ # Base seed logic; if seed < 0, random base, else deterministic series
53
  if seed < 0:
54
+ base_seed = torch.randint(0, 2**63 - 1, (1,), device=run_device).item()
 
 
55
  else:
56
+ base_seed = int(seed)
57
+
58
+ for i in range(num_images):
59
+ this_seed = base_seed if seed < 0 else base_seed + i
60
+ generator = torch.Generator(device=run_device).manual_seed(this_seed)
61
+
62
+ out = pipe(
63
+ prompt=positive,
64
+ negative_prompt=negative or None,
65
+ width=width,
66
+ height=height,
67
+ num_inference_steps=steps,
68
+ guidance_scale=float(cfg),
69
+ num_images_per_prompt=1, # always 1 for AOTI safety
70
+ generator=generator,
71
+ ).images[0]
72
+
73
+ images.append(out)
74
+
75
  return images
76
 
77
 
78
+
79
  # ---------------------------------------------------------------------
80
  # Gradio UI (inputs correspond to Comfy node widgets_values)
81
  # ---------------------------------------------------------------------
 
86
  with gr.Column():
87
  positive = gr.Textbox(
88
  label="Positive Prompt",
89
+ value="masterpiece, best quality, extremely detailed, high resolution.", # from CLIP Text Encode (Positive Prompt)
90
  lines=5,
91
  )
92
  negative = gr.Textbox(
93
  label="Negative Prompt",
94
+ value="watermark, blurry, ugly, bad anatomy", # from CLIP Text Encode (Negative Prompt)
95
  lines=4,
96
  )
97
 
 
145
  show_label=True,
146
  columns=3,
147
  height=768,
148
+ object_fit="contain", # keep full image visible in cell
149
+ preview=False, # do not start in zoomed preview mode
150
+ allow_preview=True, # still allow zoom when clicked
151
  )
152
 
153
+
154
  run_btn.click(
155
  fn=generate_images,
156
  inputs=[positive, negative, width, height, steps, cfg, seed, num_images],