FilipeR commited on
Commit
583cfc2
·
verified ·
1 Parent(s): 649eda6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -18
app.py CHANGED
@@ -12,7 +12,7 @@ import base64
12
  import json
13
 
14
  from huggingface_hub import login
15
- from prompt_augment import PromptAugment
16
  login(token=os.environ.get('hf'))
17
 
18
 
@@ -24,23 +24,23 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
24
 
25
  # Load the model pipeline
26
  pipe = QwenImageEditPlusPipeline.from_pretrained("FireRedTeam/FireRed-Image-Edit-1.0", torch_dtype=dtype).to(device)
27
- prompt_handler = PromptAugment()
28
 
29
  # --- UI Constants and Helpers ---
30
  MAX_SEED = np.iinfo(np.int32).max
31
 
32
  # --- Main Inference Function (with hardcoded negative prompt) ---
33
- @spaces.GPU(duration=180)
34
  def infer(
35
  images,
36
  prompt,
37
- seed=42,
38
- randomize_seed=False,
39
  true_guidance_scale=1.0,
40
  num_inference_steps=50,
41
  height=None,
42
  width=None,
43
- rewrite_prompt=True,
44
  num_images_per_prompt=1,
45
  progress=gr.Progress(track_tqdm=True),
46
  ):
@@ -75,9 +75,9 @@ def infer(
75
  print(f"Calling pipeline with prompt: '{prompt}'")
76
  print(f"Negative Prompt: '{negative_prompt}'")
77
  print(f"Seed: {seed}, Steps: {num_inference_steps}, Guidance: {true_guidance_scale}, Size: {width}x{height}")
78
- if rewrite_prompt and len(pil_images) > 0:
79
  # prompt = polish_prompt(prompt, pil_images[0])
80
- prompt = prompt_handler.predict(prompt, [pil_images[0]])
81
  print(f"Rewritten Prompt: {prompt}")
82
 
83
 
@@ -96,25 +96,22 @@ def infer(
96
 
97
  return image, seed
98
 
99
- # --- Examples and UI Layout ---
100
- examples = []
101
-
102
  css = """
103
- #col-container {
104
  margin: 0 auto;
105
  max-width: 1024px;
106
  }
107
- #edit_text{margin-top: -62px !important}
108
  """
109
 
110
  def get_image_base64(image_path):
111
  with open(image_path, "rb") as img_file:
112
  return base64.b64encode(img_file.read()).decode('utf-8')
 
113
  logo_base64 = get_image_base64("logo.png")
114
 
115
  with gr.Blocks(css=css) as demo:
116
  with gr.Column(elem_id="col-container"):
117
- gr.HTML(f'<img src="data:image/png;base64,{logo_base64}" alt="Firered Logo" width="400" style="display: block; margin: 0 auto;">')
118
  gr.Markdown("[Learn more](https://github.com/FireRedTeam/FireRed-Image-Edit) about the FireRed-Image-Edit series.")
119
  with gr.Row():
120
  with gr.Column():
@@ -129,7 +126,7 @@ with gr.Blocks(css=css) as demo:
129
  placeholder="describe the edit instruction",
130
  container=False,
131
  )
132
- run_button = gr.Button("Edit!", variant="primary")
133
 
134
  with gr.Accordion("Advanced Settings", open=False):
135
  # Negative prompt UI element is removed here
@@ -167,7 +164,7 @@ with gr.Blocks(css=css) as demo:
167
  minimum=256,
168
  maximum=2048,
169
  step=8,
170
- value=None,
171
  )
172
 
173
  width = gr.Slider(
@@ -175,7 +172,7 @@ with gr.Blocks(css=css) as demo:
175
  minimum=256,
176
  maximum=2048,
177
  step=8,
178
- value=None,
179
  )
180
 
181
 
@@ -202,4 +199,4 @@ with gr.Blocks(css=css) as demo:
202
 
203
  if __name__ == "__main__":
204
  # demo.launch()
205
- demo.launch(allowed_paths=["./"])
 
12
  import json
13
 
14
  from huggingface_hub import login
15
+ # from prompt_augment import PromptAugment
16
  login(token=os.environ.get('hf'))
17
 
18
 
 
24
 
25
  # Load the model pipeline
26
  pipe = QwenImageEditPlusPipeline.from_pretrained("FireRedTeam/FireRed-Image-Edit-1.0", torch_dtype=dtype).to(device)
27
+ # prompt_handler = PromptAugment()
28
 
29
  # --- UI Constants and Helpers ---
30
  MAX_SEED = np.iinfo(np.int32).max
31
 
32
  # --- Main Inference Function (with hardcoded negative prompt) ---
33
+ @spaces.GPU()
34
  def infer(
35
  images,
36
  prompt,
37
+ seed=5555,
38
+ randomize_seed=True,
39
  true_guidance_scale=1.0,
40
  num_inference_steps=50,
41
  height=None,
42
  width=None,
43
+ rewrite_prompt=False,
44
  num_images_per_prompt=1,
45
  progress=gr.Progress(track_tqdm=True),
46
  ):
 
75
  print(f"Calling pipeline with prompt: '{prompt}'")
76
  print(f"Negative Prompt: '{negative_prompt}'")
77
  print(f"Seed: {seed}, Steps: {num_inference_steps}, Guidance: {true_guidance_scale}, Size: {width}x{height}")
78
+ if False and rewrite_prompt and len(pil_images) > 0:
79
  # prompt = polish_prompt(prompt, pil_images[0])
80
+ # prompt = prompt_handler.predict(prompt, [pil_images[0]])
81
  print(f"Rewritten Prompt: {prompt}")
82
 
83
 
 
96
 
97
  return image, seed
98
 
 
 
 
99
  css = """
100
+ #NOcol-container {
101
  margin: 0 auto;
102
  max-width: 1024px;
103
  }
 
104
  """
105
 
106
  def get_image_base64(image_path):
107
  with open(image_path, "rb") as img_file:
108
  return base64.b64encode(img_file.read()).decode('utf-8')
109
+
110
  logo_base64 = get_image_base64("logo.png")
111
 
112
  with gr.Blocks(css=css) as demo:
113
  with gr.Column(elem_id="col-container"):
114
+ gr.HTML(f'<img src="data:image/png;base64,{logo_base64}" alt="FireRedTeam Logo" width="400" />')
115
  gr.Markdown("[Learn more](https://github.com/FireRedTeam/FireRed-Image-Edit) about the FireRed-Image-Edit series.")
116
  with gr.Row():
117
  with gr.Column():
 
126
  placeholder="describe the edit instruction",
127
  container=False,
128
  )
129
+ run_button = gr.Button("Edit", variant="primary")
130
 
131
  with gr.Accordion("Advanced Settings", open=False):
132
  # Negative prompt UI element is removed here
 
164
  minimum=256,
165
  maximum=2048,
166
  step=8,
167
+ value=1024,
168
  )
169
 
170
  width = gr.Slider(
 
172
  minimum=256,
173
  maximum=2048,
174
  step=8,
175
+ value=1024,
176
  )
177
 
178
 
 
199
 
200
  if __name__ == "__main__":
201
  # demo.launch()
202
+ demo.launch(concurrency_limit=2)