Varhal commited on
Commit
5281b12
·
verified ·
1 Parent(s): 450e9ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +326 -141
app.py CHANGED
@@ -1,11 +1,15 @@
1
  # Configuration
2
- # These variables are now mostly for reference, FastAPI/Uvicorn handle port
3
  prod = False
4
- import os # Moved import os to the beginning
5
- port = int(os.environ.get("PORT", 8080)) # Use PORT environment variable provided by Spaces, default to 8080
 
 
 
6
 
 
7
  import random
8
  import time
 
9
  import numpy as np
10
  import spaces
11
  import imageio
@@ -18,16 +22,10 @@ from diffusers import (
18
  ControlNetModel,
19
  DPMSolverMultistepScheduler,
20
  StableDiffusionControlNetPipeline,
 
 
21
  )
22
- # Assuming controlnet_aux_local is a local package or needs to be installed separately
23
- from controlnet_aux_local import NormalBaeDetector
24
-
25
- # Import necessary components for FastAPI
26
- from fastapi import FastAPI, File, UploadFile, Form, HTTPException
27
- from fastapi.responses import StreamingResponse
28
- from pydantic import BaseModel
29
- import uvicorn
30
- import io
31
 
32
  MAX_SEED = np.iinfo(np.int32).max
33
  API_KEY = os.environ.get("API_KEY", None)
@@ -37,8 +35,6 @@ print("loading everything")
37
  compiled = False
38
  api = HfApi()
39
 
40
- # Initialize FastAPI app
41
- app = FastAPI()
42
 
43
  class Preprocessor:
44
  MODEL_ID = "lllyasviel/Annotators"
@@ -69,7 +65,6 @@ class Preprocessor:
69
 
70
  # Load models and preprocessor when the script starts
71
  # Controlnet Normal
72
- # Corrected typo in the model ID
73
  model_id = "lllyasviel/control_v11p_sd15_normalbae"
74
  print("initializing controlnet")
75
  device = "cuda" if torch.cuda.is_available() else "cpu"
@@ -162,21 +157,13 @@ def get_prompt(prompt, additional_prompt):
162
  if prompt:
163
  prompt_parts.append(f"Photo from Pinterest of {prompt}")
164
  else:
165
- # If no specific prompt, use a default or random one (original code's 'boho chic' or random 'girls' prompts)
166
- # Let's stick to interior design context, so maybe a default interior style if no prompt?
167
- # Or, based on the original code's `if prompt == "":` block, it seemed to sometimes
168
- # default to random 'girl' prompts. This might be unintended for an interior design API.
169
- # Let's assume if no prompt is given, we still apply the interior context.
170
- prompt_parts.append("Photo from Pinterest of interior space") # Default if no prompt
171
 
172
  prompt_parts.append(interior)
173
 
174
  if additional_prompt:
175
  prompt_parts.append(additional_prompt)
176
- # Note: The original `get_prompt` had a block that randomly selected 'girl' related prompts
177
- # when the input `prompt` was empty. This seems out of place for an interior design API.
178
- # I have removed that random selection logic to focus on interior design prompts.
179
- # If you need that random girl prompt functionality, please clarify where/how it should be used.
180
 
181
  return ", ".join(filter(None, prompt_parts))
182
 
@@ -238,40 +225,238 @@ def apply_style(style_name):
238
  return styles.get(style_name, "")
239
 
240
 
241
- # The core processing function, now called by the API endpoint
242
- @torch.inference_mode() # Keep inference_mode here for efficiency
243
- def process_image_api(
244
- image: Image.Image,
245
- style_selection: str = "None",
246
- prompt: str = "",
247
- a_prompt: str = "",
248
- n_prompt: str = "EasyNegativeV2, fcNeg, (badhandv4:1.4), (worst quality, low quality, bad quality, normal quality:2.0), (bad hands, missing fingers, extra fingers:2.0)",
249
- image_resolution: int = 512,
250
- preprocess_resolution: int = 512,
251
- num_steps: int = 15,
252
- guidance_scale: float = 5.5,
253
- seed: int = -1,
254
- ):
255
- """
256
- Processes an input image to generate a new image based on style and prompts.
257
-
258
- Args:
259
- image: Input PIL Image.
260
- style_selection: Name of the design style to apply.
261
- prompt: Custom design prompt.
262
- a_prompt: Additional positive prompt.
263
- n_prompt: Negative prompt.
264
- image_resolution: Resolution for the output image.
265
- preprocess_resolution: Resolution for the preprocessor.
266
- num_steps: Number of inference steps.
267
- guidance_scale: Guidance scale for the diffusion process.
268
- seed: Random seed for reproducibility. Use -1 for random seed.
269
-
270
- Returns:
271
- A PIL Image of the generated result.
272
  """
273
- current_seed = seed if seed != -1 else random.randint(0, MAX_SEED)
274
- generator = torch.cuda.manual_seed(current_seed) if torch.cuda.is_available() else torch.manual_seed(current_seed)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275
 
276
  if preprocessor.name != "NormalBae":
277
  preprocessor.load("NormalBae")
@@ -296,18 +481,21 @@ def process_image_api(
296
  negative_prompt = str(n_prompt)
297
  print(f"Using prompt: {full_prompt}")
298
  print(f"Using negative prompt: {negative_prompt}")
299
- print(f"Using seed: {current_seed}")
300
 
301
  pipe.to("cuda" if torch.cuda.is_available() else "cpu")
302
 
 
 
 
303
  with torch.no_grad():
304
  initial_result = pipe(
305
  prompt=full_prompt,
306
  negative_prompt=negative_prompt,
307
  guidance_scale=guidance_scale,
308
- num_images_per_prompt=1,
309
  num_inference_steps=num_steps,
310
- generator=generator,
311
  image=control_image,
312
  ).images[0]
313
 
@@ -337,83 +525,80 @@ def process_image_api(
337
  print(f"Error saving or uploading image: {e}")
338
 
339
  return initial_result
340
-
341
- # Define a Pydantic model for the request body parameters (optional, but good practice)
342
- # class ImageParameters(BaseModel):
343
- # style_selection: str = "None"
344
- # prompt: str = ""
345
- # a_prompt: str = ""
346
- # n_prompt: str = "EasyNegativeV2, fcNeg, (badhandv4:1.4), (worst quality, low quality, bad quality, normal quality:2.0), (bad hands, missing fingers, extra fingers:2.0)"
347
- # image_resolution: int = 512
348
- # preprocess_resolution: int = 512
349
- # num_steps: int = 15
350
- # guidance_scale: float = 5.5
351
- # seed: int = -1
352
-
353
- # Define the API endpoint
354
- @app.post("/generate-image/")
355
- async def generate_image(
356
- file: UploadFile = File(...), # Input image file
357
- style_selection: str = Form("None"), # Parameters from form data
358
- prompt: str = Form(""),
359
- a_prompt: str = Form(""),
360
- n_prompt: str = Form("EasyNegativeV2, fcNeg, (badhandv4:1.4), (worst quality, low quality, bad quality, normal quality:2.0), (bad hands, missing fingers, extra fingers:2.0)"),
361
- image_resolution: int = Form(512),
362
- preprocess_resolution: int = Form(512),
363
- num_steps: int = Form(15),
364
- guidance_scale: float = Form(5.5),
365
- seed: int = Form(-1),
366
- ):
367
- """
368
- API endpoint to generate an interior design image based on an input image and parameters.
369
-
370
- Expects a POST request with form-data including:
371
- - file: The input image file (UploadFile).
372
- - style_selection: The design style name (string).
373
- - prompt: Custom design prompt (string).
374
- - a_prompt: Additional positive prompt (string).
375
- - n_prompt: Negative prompt (string).
376
- - image_resolution: Output image resolution (int).
377
- - preprocess_resolution: Preprocessor resolution (int).
378
- - num_steps: Number of inference steps (int).
379
- - guidance_scale: Guidance scale (float).
380
- - seed: Random seed (int, use -1 for random).
381
-
382
- Returns:
383
- The generated image as a JPEG file.
384
- """
385
- try:
386
- # Read the uploaded image file
387
- image_data = await file.read()
388
- input_image = Image.open(io.BytesIO(image_data)).convert("RGB")
389
-
390
- # Process the image using the core logic
391
- generated_image = process_image_api(
392
- image=input_image,
393
- style_selection=style_selection,
394
- prompt=prompt,
395
- a_prompt=a_prompt,
396
- n_prompt=n_prompt,
397
- image_resolution=image_resolution,
398
- preprocess_resolution=preprocess_resolution,
399
- num_steps=num_steps,
400
- guidance_scale=guidance_scale,
401
- seed=seed,
402
- )
403
-
404
- # Return the generated image as a streaming response
405
- buffer = io.BytesIO()
406
- generated_image.save(buffer, format="JPEG")
407
- buffer.seek(0)
408
-
409
- return StreamingResponse(buffer, media_type="image/jpeg")
410
-
411
- except Exception as e:
412
- print(f"An error occurred during processing: {e}")
413
- raise HTTPException(status_code=500, detail=f"Internal Server Error: {e}")
414
-
415
- # Entry point to run the FastAPI application using Uvicorn
416
- if __name__ == "__main__":
417
- # The host "0.0.0.0" makes the server accessible externally within the container
418
- # The port is taken from the environment variable PORT, which Hugging Face Spaces sets
419
- uvicorn.run(app, host="0.0.0.0", port=port)
 
1
  # Configuration
 
2
  prod = False
3
+ port = 8080
4
+ show_options = False
5
+ if prod:
6
+ port = 8081
7
+ # show_options = False
8
 
9
+ import os
10
  import random
11
  import time
12
+ import gradio as gr # Re-added gradio import
13
  import numpy as np
14
  import spaces
15
  import imageio
 
22
  ControlNetModel,
23
  DPMSolverMultistepScheduler,
24
  StableDiffusionControlNetPipeline,
25
+ # StableDiffusionInpaintPipeline, # Commented out as inpainting part was commented
26
+ # AutoencoderKL, # Commented out as VAE part was commented
27
  )
28
+ from controlnet_aux_local import NormalBaeDetector # Assuming this local package is available
 
 
 
 
 
 
 
 
29
 
30
  MAX_SEED = np.iinfo(np.int32).max
31
  API_KEY = os.environ.get("API_KEY", None)
 
35
  compiled = False
36
  api = HfApi()
37
 
 
 
38
 
39
  class Preprocessor:
40
  MODEL_ID = "lllyasviel/Annotators"
 
65
 
66
  # Load models and preprocessor when the script starts
67
  # Controlnet Normal
 
68
  model_id = "lllyasviel/control_v11p_sd15_normalbae"
69
  print("initializing controlnet")
70
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
157
  if prompt:
158
  prompt_parts.append(f"Photo from Pinterest of {prompt}")
159
  else:
160
+ # If no specific prompt, use a default interior context
161
+ prompt_parts.append("Photo from Pinterest of interior space")
 
 
 
 
162
 
163
  prompt_parts.append(interior)
164
 
165
  if additional_prompt:
166
  prompt_parts.append(additional_prompt)
 
 
 
 
167
 
168
  return ", ".join(filter(None, prompt_parts))
169
 
 
225
  return styles.get(style_name, "")
226
 
227
 
228
+ # CSS for Gradio UI
229
+ css = """h1, h2, h3 {
230
+ text-align: center;
231
+ display: block;
232
+ }
233
+
234
+ footer {
235
+ visibility: hidden;
236
+ }
237
+
238
+ .gradio-container {
239
+ max-width: 1100px !important;
240
+ }
241
+
242
+ .gr-image {
243
+ display: flex;
244
+ justify-content: center;
245
+ align-items: center;
246
+ width: 100%;
247
+ height: 512px;
248
+ overflow: hidden;
249
+ }
250
+
251
+ .gr-image img {
252
+ width: 100%;
253
+ height: 100%;
254
+ object-fit: cover;
255
+ object-position: center;
256
+ }
 
 
257
  """
258
+
259
+ # Gradio Interface Definition
260
+ with gr.Blocks(theme="bethecloud/storj_theme", css=css) as demo:
261
+ gr.Markdown("## Interior AI Designer") # Added a title
262
+ #############################################################################
263
+ with gr.Row():
264
+ # Re-added Accordion with visible=show_options
265
+ with gr.Accordion("Advanced options", open=show_options, visible=show_options):
266
+ num_images = gr.Slider(
267
+ label="Images", minimum=1, maximum=4, value=1, step=1
268
+ )
269
+ image_resolution = gr.Slider(
270
+ label="Image resolution",
271
+ minimum=256,
272
+ maximum=1024,
273
+ value=512,
274
+ step=256,
275
+ )
276
+ preprocess_resolution = gr.Slider(
277
+ label="Preprocess resolution",
278
+ minimum=128,
279
+ maximum=1024,
280
+ value=512,
281
+ step=1,
282
+ )
283
+ num_steps = gr.Slider(
284
+ label="Number of steps", minimum=1, maximum=100, value=15, step=1
285
+ )
286
+ guidance_scale = gr.Slider(
287
+ label="Guidance scale", minimum=0.1, maximum=30.0, value=5.5, step=0.1
288
+ )
289
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
290
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
291
+ a_prompt = gr.Textbox(
292
+ label="Additional prompt",
293
+ value="design-style interior designed (interior space), tungsten white balance, captured with a DSLR camera using f/10 aperture, 1/60 sec shutter speed, ISO 400, 20mm focal length"
294
+ )
295
+ n_prompt = gr.Textbox(
296
+ label="Negative prompt",
297
+ value="EasyNegativeV2, fcNeg, (badhandv4:1.4), (worst quality, low quality, bad quality, normal quality:2.0), (bad hands, missing fingers, extra fingers:2.0)",
298
+ )
299
+ #############################################################################
300
+ # input text
301
+ with gr.Column():
302
+ prompt = gr.Textbox(
303
+ label="Custom Design",
304
+ placeholder="Enter a description (optional)",
305
+ )
306
+ # design options
307
+ with gr.Row(visible=True):
308
+ style_selection = gr.Radio(
309
+ show_label=True,
310
+ container=True,
311
+ interactive=True,
312
+ choices=STYLE_NAMES,
313
+ value="None",
314
+ label="Design Styles",
315
+ )
316
+ # input image
317
+ with gr.Row(equal_height=True):
318
+ with gr.Column(scale=1, min_width=300):
319
+ image = gr.Image(
320
+ label="Input",
321
+ sources=["upload"],
322
+ show_label=True,
323
+ mirror_webcam=True,
324
+ type="pil",
325
+ )
326
+ # run button
327
+ with gr.Column():
328
+ run_button = gr.Button(value="Use this one", size="lg", visible=False)
329
+ # output image
330
+ with gr.Column(scale=1, min_width=300):
331
+ result = gr.Image(
332
+ label="Output",
333
+ interactive=False,
334
+ type="pil",
335
+ show_share_button=False,
336
+ )
337
+ # Use this image button
338
+ with gr.Column():
339
+ use_ai_button = gr.Button(value="Use this one", size="lg", visible=False)
340
+ # Configuration list for inputs
341
+ config = [
342
+ image,
343
+ style_selection,
344
+ prompt,
345
+ a_prompt,
346
+ n_prompt,
347
+ num_images,
348
+ image_resolution,
349
+ preprocess_resolution,
350
+ num_steps,
351
+ guidance_scale,
352
+ seed,
353
+ ]
354
+ with gr.Row():
355
+ helper_text = gr.Markdown("## Tap and hold (on mobile) to save the image.", visible=True)
356
+
357
+ # Gradio Event Handling Functions
358
+ @gr.on(triggers=[image.upload, prompt.submit, run_button.click], inputs=config,
359
+ outputs=result, show_progress="minimal")
360
+ def auto_process_image(image, style_selection, prompt, a_prompt, n_prompt, num_images,
361
+ image_resolution, preprocess_resolution, num_steps, guidance_scale,
362
+ seed, progress=gr.Progress(track_tqdm=True)):
363
+ # Call the core processing function
364
+ return process_image(image, style_selection, prompt, a_prompt, n_prompt, num_images,
365
+ image_resolution, preprocess_resolution, num_steps, guidance_scale,
366
+ seed)
367
+
368
+ @gr.on(triggers=[use_ai_button.click], inputs=[result] + config, outputs=[image, result],
369
+ show_progress="minimal")
370
+ def submit(previous_result, image, style_selection, prompt, a_prompt, n_prompt, num_images,
371
+ image_resolution, preprocess_resolution, num_steps, guidance_scale, seed,
372
+ progress=gr.Progress(track_tqdm=True)):
373
+ # First, yield the previous result to update the input image immediately
374
+ yield previous_result, gr.update()
375
+ # Then, process the new input image
376
+ new_result = process_image(previous_result, style_selection, prompt, a_prompt,
377
+ n_prompt, num_images, image_resolution,
378
+ preprocess_resolution, num_steps, guidance_scale, seed)
379
+ # Finally, yield the new result
380
+ yield previous_result, new_result
381
+
382
+ @gr.on(triggers=[image.upload, use_ai_button.click, run_button.click], inputs=None,
383
+ outputs=[run_button, use_ai_button], show_progress="hidden")
384
+ def turn_buttons_off():
385
+ return gr.update(visible=False), gr.update(visible=False)
386
+
387
+ @gr.on(triggers=[result.change], inputs=None, outputs=[use_ai_button, run_button],
388
+ show_progress="hidden")
389
+ def turn_buttons_on():
390
+ return gr.update(visible=True), gr.update(visible=True)
391
+
392
+
393
+ # Core Image Processing Function (renamed back from process_image_api)
394
+ @spaces.GPU(duration=12) # Re-added spaces.GPU decorator
395
+ @torch.inference_mode()
396
+ def process_image(
397
+ image,
398
+ style_selection,
399
+ prompt,
400
+ a_prompt,
401
+ n_prompt,
402
+ num_images,
403
+ image_resolution,
404
+ preprocess_resolution,
405
+ num_steps,
406
+ guidance_scale,
407
+ seed,
408
+ ):
409
+ # Use provided seed or generate a random one based on randomize_seed checkbox (need to add randomize_seed input)
410
+ # The original Gradio code used randomize_seed_fn, but the logic is simple enough to keep here
411
+ # Let's add randomize_seed as an input parameter to this function
412
+ # Re-checking the original Gradio code, randomize_seed is an input to the Gradio event function,
413
+ # but not directly passed to process_image. Instead, the seed is randomized *before* calling process_image
414
+ # in the Gradio event handlers. Let's replicate that.
415
+
416
+ # Seed randomization logic from original Gradio code
417
+ # The seed parameter in the config list is used here
418
+ current_seed = seed
419
+ # Note: The original `auto_process_image` and `submit` functions did NOT use the `randomize_seed` checkbox
420
+ # to potentially override the seed slider value before passing it to `process_image`.
421
+ # The `randomize_seed_fn` was defined but not called in the provided Gradio code snippet.
422
+ # Let's stick to the provided Gradio code's logic which seems to just use the seed slider value.
423
+ # If randomization is needed, it should be handled in the event handler before calling process_image.
424
+ # However, the original `process_image` function *itself* had `seed = random.randint(0, MAX_SEED)`.
425
+ # This means the seed slider was ignored! Let's fix that and use the seed slider value,
426
+ # applying randomization if the checkbox is checked in the event handler.
427
+
428
+ # Re-adding randomize_seed to the inputs list `config` and event handler functions
429
+ # Then applying randomization logic in the event handler before calling process_image
430
+
431
+ # *** Correction: Re-reading the original Gradio code snippet provided at the very beginning,
432
+ # the `randomize_seed_fn` was defined but not used. The `process_image` function
433
+ # *inside* the `if gr.NO_RELOAD:` block *did* have `seed = random.randint(0, MAX_SEED)`
434
+ # at the beginning, effectively ignoring the input seed slider value unless randomize_seed_fn
435
+ # was somehow implicitly called or the logic was elsewhere.
436
+ # Let's assume the intention was to use the seed slider, with an option to randomize.
437
+ # The `randomize_seed_fn` should be called in the Gradio event handlers.
438
+
439
+ # Let's modify the Gradio event handlers to use randomize_seed_fn
440
+
441
+ # *** Another Correction: The `process_image` function signature in the original Gradio code
442
+ # included `seed` as a parameter, but the first line inside the function was `seed = random.randint(0, MAX_SEED)`.
443
+ # This is contradictory. It implies the input `seed` was always overwritten by a random value.
444
+ # Let's assume the *intent* was to use the input `seed` unless `randomize_seed` was True.
445
+ # The `randomize_seed_fn` should be called in the event handler to get the final seed value.
446
+
447
+ # Let's modify the event handlers to call randomize_seed_fn and pass the result as `seed` to `process_image`.
448
+ # The `process_image` function itself should *not* randomize the seed internally if a seed is passed.
449
+
450
+ # *** Final Decision: Let's revert `process_image` to its signature from the original Gradio code,
451
+ # but remove the internal `seed = random.randint(0, MAX_SEED)` line.
452
+ # The randomization logic will be added to the Gradio event handlers using `randomize_seed_fn`.
453
+ # The `config` list and event handlers will need `randomize_seed` as an input.
454
+
455
+ # Re-adding randomize_seed to config and event handler signatures
456
+ # Adding call to randomize_seed_fn in event handlers
457
+
458
+ # --- Start of process_image logic ---
459
+ # Seed handling moved to event handlers
460
 
461
  if preprocessor.name != "NormalBae":
462
  preprocessor.load("NormalBae")
 
481
  negative_prompt = str(n_prompt)
482
  print(f"Using prompt: {full_prompt}")
483
  print(f"Using negative prompt: {negative_prompt}")
484
+ print(f"Using seed: {seed}") # Use the seed passed from the event handler
485
 
486
  pipe.to("cuda" if torch.cuda.is_available() else "cpu")
487
 
488
+ # Use the generator created with the potentially randomized seed
489
+ generator = torch.cuda.manual_seed(seed) if torch.cuda.is_available() else torch.manual_seed(seed)
490
+
491
  with torch.no_grad():
492
  initial_result = pipe(
493
  prompt=full_prompt,
494
  negative_prompt=negative_prompt,
495
  guidance_scale=guidance_scale,
496
+ num_images_per_prompt=1, # Pipeline always generates 1 image here
497
  num_inference_steps=num_steps,
498
+ generator=generator, # Pass the generator
499
  image=control_image,
500
  ).images[0]
501
 
 
525
  print(f"Error saving or uploading image: {e}")
526
 
527
  return initial_result
528
+ # --- End of process_image logic ---
529
+
530
+ # Re-adding randomize_seed_fn
531
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
532
+ if randomize_seed:
533
+ seed = random.randint(0, MAX_SEED)
534
+ return seed
535
+
536
+ # Re-adding randomize_seed to config list
537
+ config = [
538
+ image,
539
+ style_selection,
540
+ prompt,
541
+ a_prompt,
542
+ n_prompt,
543
+ num_images,
544
+ image_resolution,
545
+ preprocess_resolution,
546
+ num_steps,
547
+ guidance_scale,
548
+ seed,
549
+ randomize_seed, # Added randomize_seed to config
550
+ ]
551
+
552
+ # Re-defining Gradio Event Handling Functions with randomize_seed input and seed randomization logic
553
+ @gr.on(triggers=[image.upload, prompt.submit, run_button.click], inputs=config,
554
+ outputs=result, show_progress="minimal")
555
+ def auto_process_image(image, style_selection, prompt, a_prompt, n_prompt, num_images,
556
+ image_resolution, preprocess_resolution, num_steps, guidance_scale,
557
+ seed, randomize_seed, progress=gr.Progress(track_tqdm=True)): # Added randomize_seed
558
+ # Apply seed randomization
559
+ processed_seed = randomize_seed_fn(seed, randomize_seed)
560
+ print(f"Using processed seed: {processed_seed}") # Debug print
561
+
562
+ # Call the core processing function with the processed seed
563
+ return process_image(image, style_selection, prompt, a_prompt, n_prompt, num_images,
564
+ image_resolution, preprocess_resolution, num_steps, guidance_scale,
565
+ processed_seed) # Pass processed_seed
566
+
567
+ @gr.on(triggers=[use_ai_button.click], inputs=[result] + config, outputs=[image, result],
568
+ show_progress="minimal")
569
+ def submit(previous_result, image, style_selection, prompt, a_prompt, n_prompt, num_images,
570
+ image_resolution, preprocess_resolution, num_steps, guidance_scale, seed,
571
+ randomize_seed, progress=gr.Progress(track_tqdm=True)): # Added randomize_seed
572
+ # First, yield the previous result to update the input image immediately
573
+ yield previous_result, gr.update()
574
+
575
+ # Apply seed randomization
576
+ processed_seed = randomize_seed_fn(seed, randomize_seed)
577
+ print(f"Using processed seed: {processed_seed}") # Debug print
578
+
579
+ # Then, process the new input image
580
+ new_result = process_image(previous_result, style_selection, prompt, a_prompt,
581
+ n_prompt, num_images, image_resolution,
582
+ preprocess_resolution, num_steps, guidance_scale,
583
+ processed_seed) # Pass processed_seed
584
+ # Finally, yield the new result
585
+ yield previous_result, new_result
586
+
587
+ # Turn off buttons when processing - These functions remain the same
588
+ @gr.on(triggers=[image.upload, use_ai_button.click, run_button.click], inputs=None,
589
+ outputs=[run_button, use_ai_button], show_progress="hidden")
590
+ def turn_buttons_off():
591
+ return gr.update(visible=False), gr.update(visible=False)
592
+
593
+ # Turn on buttons when processing is complete - These functions remain the same
594
+ @gr.on(triggers=[result.change], inputs=None, outputs=[use_ai_button, run_button],
595
+ show_progress="hidden")
596
+ def turn_buttons_on():
597
+ return gr.update(visible=True), gr.update(visible=True)
598
+
599
+
600
+ # Launch the Gradio app
601
+ if prod:
602
+ demo.queue(max_size=20).launch(server_name="localhost", server_port=port)
603
+ else:
604
+ demo.queue().launch(share=True, show_api=False) # Use share=True for Spaces