TejAndrewsACC commited on
Commit
b2e7a1d
·
verified ·
1 Parent(s): 4b92e8e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -43
app.py CHANGED
@@ -1,57 +1,53 @@
1
- import threading
 
2
  import time
 
3
  import os
4
- import gradio as gr
5
  from huggingface_hub import InferenceClient
6
 
7
- # Initialize Hugging Face client
8
  client = InferenceClient(
9
  provider="fal-ai",
10
  api_key=os.environ["HF_TOKEN"],
11
  )
12
 
13
- def run_edit(input_image, prompt, progress_html):
14
- stop_flag = {"stop": False}
15
-
16
- # --- background thread for smooth progress bar ---
17
- def progress_loop():
18
- current = 0.0
19
- while not stop_flag["stop"]:
20
- # Smooth progress approach
21
- current += (0.99 - current) * 0.02
22
- bar_html = f"""
23
- <div style='width: 100%; background-color: #e0e0e0; border-radius: 12px;'>
24
- <div style='width: {current*100:.1f}%; background-color: #4CAF50; height: 25px; border-radius: 12px; transition: width 0.05s linear;'></div>
25
- </div>
26
- <div style='margin-top: 5px;'>Editing Image… {int(current*100)}%</div>
27
- """
28
- progress_html.update(bar_html)
29
- time.sleep(0.05)
30
-
31
- # Finish immediately
32
- bar_html = f"""
33
- <div style='width: 100%; background-color: #e0e0e0; border-radius: 12px;'>
34
- <div style='width: 100%; background-color: #4CAF50; height: 25px; border-radius: 12px;'></div>
35
- </div>
36
- <div style='margin-top: 5px;'>Done!</div>
37
- """
38
- progress_html.update(bar_html)
39
-
40
- thread = threading.Thread(target=progress_loop)
41
- thread.start()
42
-
43
- # --- actual model call ---
44
  output = client.image_to_image(
45
  input_image,
46
  prompt=prompt,
47
  model="Qwen/Qwen-Image-Edit",
48
  )
49
 
50
- stop_flag["stop"] = True
51
- thread.join()
52
-
53
  return output
54
 
 
55
  # --- Gradio UI ---
56
  with gr.Blocks(theme="TejAndrewsACC/ACC", title="Image Edit Demo") as demo:
57
  gr.Markdown("## ACC AI Image Editor")
@@ -62,17 +58,15 @@ with gr.Blocks(theme="TejAndrewsACC/ACC", title="Image Edit Demo") as demo:
62
 
63
  prompt = gr.Textbox(
64
  label="How do you want the image to be changed?",
65
- value="" # empty by default
66
  )
67
 
68
- progress_html = gr.HTML("<div style='width:100%; height:25px; background-color:#e0e0e0; border-radius:12px;'></div>")
69
-
70
  run_btn = gr.Button("Run Edit")
71
 
72
  run_btn.click(
73
- fn=run_edit,
74
- inputs=[input_img, prompt, progress_html],
75
  outputs=[output_img],
76
  )
77
 
78
- demo.launch()
 
1
+
2
+ import gradio as gr
3
  import time
4
+ import random
5
  import os
 
6
  from huggingface_hub import InferenceClient
7
 
8
+ # Initialize client
9
  client = InferenceClient(
10
  provider="fal-ai",
11
  api_key=os.environ["HF_TOKEN"],
12
  )
13
 
14
+ def smooth_progress_bar():
15
+ """
16
+ Generates a smooth, natural-looking progress curve that slowly approaches 99%.
17
+ """
18
+ progress = gr.Progress(track_tqdm=False)
19
+
20
+ current = 0.0
21
+ while current < 0.99:
22
+ # Small random increments for a natural feel
23
+ increment = random.uniform(0.003, 0.007) # ~0.3%–0.7%
24
+
25
+ # Speed adjusts as we get closer to 99%
26
+ delay = random.uniform(0.01, 0.03)
27
+
28
+ current = min(0.99, current + increment)
29
+ progress(current, desc=f"Editing image… {int(current*100)}%")
30
+
31
+ time.sleep(delay)
32
+
33
+ return progress
34
+
35
+
36
+ def run_edit(input_image, prompt):
37
+ progress = smooth_progress_bar()
38
+
39
+ # Real model call
 
 
 
 
 
40
  output = client.image_to_image(
41
  input_image,
42
  prompt=prompt,
43
  model="Qwen/Qwen-Image-Edit",
44
  )
45
 
46
+ # Instantly finish
47
+ progress(1.0, desc="Done!")
 
48
  return output
49
 
50
+
51
  # --- Gradio UI ---
52
  with gr.Blocks(theme="TejAndrewsACC/ACC", title="Image Edit Demo") as demo:
53
  gr.Markdown("## ACC AI Image Editor")
 
58
 
59
  prompt = gr.Textbox(
60
  label="How do you want the image to be changed?",
61
+ value=""
62
  )
63
 
 
 
64
  run_btn = gr.Button("Run Edit")
65
 
66
  run_btn.click(
67
+ fn=lambda img_path, pr: run_edit(open(img_path, "rb").read(), pr),
68
+ inputs=[input_img, prompt],
69
  outputs=[output_img],
70
  )
71
 
72
+ demo.launch()