Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -108,7 +108,7 @@ def reset_recalc_directions():
|
|
| 108 |
intro = """
|
| 109 |
<div style="display: flex;align-items: center;justify-content: center">
|
| 110 |
<img src="https://huggingface.co/spaces/LatentNavigation/latentnavigation-flux/resolve/main/Group 4-16.png" width="120" style="display: inline-block">
|
| 111 |
-
<h1 style="margin-left: 12px;text-align: center;margin-bottom: 7px;display: inline-block;font-size:1.
|
| 112 |
</div>
|
| 113 |
<div style="display: flex;align-items: center;justify-content: center">
|
| 114 |
<h3 style="display: inline-block;margin-left: 10px;margin-top: 6px;font-weight: 500">Exploring CLIP text space with FLUX.1 schnell 🪐</h3>
|
|
@@ -125,7 +125,7 @@ intro = """
|
|
| 125 |
css='''
|
| 126 |
#strip, #gif{min-height: 50px}
|
| 127 |
'''
|
| 128 |
-
examples = [["winter", "summer", 1.25, "a
|
| 129 |
image_seq = gr.Image(label="Strip", elem_id="strip", height=50)
|
| 130 |
output_image = gr.Image(label="Gif", elem_id="gif", height=50)
|
| 131 |
post_generation_image = gr.Image(label="Generated Images")
|
|
@@ -164,19 +164,22 @@ with gr.Blocks(css=css) as demo:
|
|
| 164 |
with gr.Column(scale=2, min_width=50):
|
| 165 |
output_image.render()
|
| 166 |
|
| 167 |
-
with gr.Accordion(label="
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
|
|
|
|
|
|
|
|
|
| 180 |
|
| 181 |
examples_gradio = gr.Examples(
|
| 182 |
examples=examples,
|
|
@@ -184,7 +187,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 184 |
fn=generate,
|
| 185 |
outputs=[x_concept_1, x_concept_2, avg_diff_x, output_image, image_seq, total_images, post_generation_image, post_generation_slider, seed],
|
| 186 |
cache_examples="lazy"
|
| 187 |
-
|
| 188 |
|
| 189 |
submit.click(fn=generate,
|
| 190 |
inputs=[concept_1, concept_2, x, prompt, randomize_seed, seed, recalc_directions, iterations, steps, interm_steps, guidance_scale, x_concept_1, x_concept_2, avg_diff_x, total_images],
|
|
|
|
| 108 |
intro = """
|
| 109 |
<div style="display: flex;align-items: center;justify-content: center">
|
| 110 |
<img src="https://huggingface.co/spaces/LatentNavigation/latentnavigation-flux/resolve/main/Group 4-16.png" width="120" style="display: inline-block">
|
| 111 |
+
<h1 style="margin-left: 12px;text-align: center;margin-bottom: 7px;display: inline-block;font-size:1.75em">Latent Navigation</h1>
|
| 112 |
</div>
|
| 113 |
<div style="display: flex;align-items: center;justify-content: center">
|
| 114 |
<h3 style="display: inline-block;margin-left: 10px;margin-top: 6px;font-weight: 500">Exploring CLIP text space with FLUX.1 schnell 🪐</h3>
|
|
|
|
| 125 |
css='''
|
| 126 |
#strip, #gif{min-height: 50px}
|
| 127 |
'''
|
| 128 |
+
examples = [["a dog in the park", "winter", "summer", 1.25], ["a house", "USA suburb", "Europe", 2], ["a tomato", "rotten", "super fresh", 2]]
|
| 129 |
image_seq = gr.Image(label="Strip", elem_id="strip", height=50)
|
| 130 |
output_image = gr.Image(label="Gif", elem_id="gif", height=50)
|
| 131 |
post_generation_image = gr.Image(label="Generated Images")
|
|
|
|
| 164 |
with gr.Column(scale=2, min_width=50):
|
| 165 |
output_image.render()
|
| 166 |
|
| 167 |
+
with gr.Accordion(label="Advanced options", open=False):
|
| 168 |
+
interm_steps = gr.Slider(label = "Num of intermediate images", minimum=3, value=21, maximum=65, step=2)
|
| 169 |
+
with gr.Row():
|
| 170 |
+
iterations = gr.Slider(label = "Num iterations for clip directions", minimum=0, value=200, maximum=500, step=1)
|
| 171 |
+
steps = gr.Slider(label = "Num inference steps", minimum=1, value=3, maximum=8, step=1)
|
| 172 |
+
with gr.Row():
|
| 173 |
+
guidance_scale = gr.Slider(
|
| 174 |
+
label="Guidance scale",
|
| 175 |
+
minimum=0.1,
|
| 176 |
+
maximum=10.0,
|
| 177 |
+
step=0.1,
|
| 178 |
+
value=3.5,
|
| 179 |
+
)
|
| 180 |
+
with gr.Column():
|
| 181 |
+
randomize_seed = gr.Checkbox(True, label="Randomize seed")
|
| 182 |
+
seed.render()
|
| 183 |
|
| 184 |
examples_gradio = gr.Examples(
|
| 185 |
examples=examples,
|
|
|
|
| 187 |
fn=generate,
|
| 188 |
outputs=[x_concept_1, x_concept_2, avg_diff_x, output_image, image_seq, total_images, post_generation_image, post_generation_slider, seed],
|
| 189 |
cache_examples="lazy"
|
| 190 |
+
)
|
| 191 |
|
| 192 |
submit.click(fn=generate,
|
| 193 |
inputs=[concept_1, concept_2, x, prompt, randomize_seed, seed, recalc_directions, iterations, steps, interm_steps, guidance_scale, x_concept_1, x_concept_2, avg_diff_x, total_images],
|