Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,33 +1,20 @@
|
|
| 1 |
#!/usr/bin/env python3
|
| 2 |
import gradio as gr
|
| 3 |
from clip_interrogator import Config, Interrogator
|
| 4 |
-
from share_btn import community_icon_html, loading_icon_html, share_js
|
| 5 |
|
| 6 |
-
MODELS = ['ViT-L (best for Stable Diffusion 1.*)', 'ViT-H (best for Stable Diffusion 2.*)']
|
|
|
|
| 7 |
|
| 8 |
# load BLIP and ViT-L https://huggingface.co/openai/clip-vit-large-patch14
|
| 9 |
config = Config(clip_model_name="ViT-L-14/openai")
|
| 10 |
-
ci_vitl = Interrogator(config)
|
| 11 |
-
ci_vitl.clip_model = ci_vitl.clip_model.to("cpu")
|
| 12 |
-
|
| 13 |
-
# load ViT-H https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K
|
| 14 |
-
config.blip_model = ci_vitl.blip_model
|
| 15 |
-
config.clip_model_name = "ViT-H-14/laion2b_s32b_b79k"
|
| 16 |
-
ci_vith = Interrogator(config)
|
| 17 |
-
ci_vith.clip_model = ci_vith.clip_model.to("cpu")
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
def image_analysis(image, clip_model_name):
|
| 21 |
-
# move selected model to GPU and other model to CPU
|
| 22 |
-
if clip_model_name == MODELS[0]:
|
| 23 |
-
ci_vith.clip_model = ci_vith.clip_model.to("cpu")
|
| 24 |
-
ci_vitl.clip_model = ci_vitl.clip_model.to(ci_vitl.device)
|
| 25 |
-
ci = ci_vitl
|
| 26 |
-
else:
|
| 27 |
-
ci_vitl.clip_model = ci_vitl.clip_model.to("cpu")
|
| 28 |
-
ci_vith.clip_model = ci_vith.clip_model.to(ci_vith.device)
|
| 29 |
-
ci = ci_vith
|
| 30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
image = image.convert('RGB')
|
| 32 |
image_features = ci.image_to_features(image)
|
| 33 |
|
|
@@ -47,20 +34,6 @@ def image_analysis(image, clip_model_name):
|
|
| 47 |
|
| 48 |
|
| 49 |
def image_to_prompt(image, clip_model_name, mode):
|
| 50 |
-
# move selected model to GPU and other model to CPU
|
| 51 |
-
if clip_model_name == MODELS[0]:
|
| 52 |
-
ci_vith.clip_model = ci_vith.clip_model.to("cpu")
|
| 53 |
-
ci_vitl.clip_model = ci_vitl.clip_model.to(ci_vitl.device)
|
| 54 |
-
ci = ci_vitl
|
| 55 |
-
else:
|
| 56 |
-
ci_vitl.clip_model = ci_vitl.clip_model.to("cpu")
|
| 57 |
-
ci_vith.clip_model = ci_vith.clip_model.to(ci_vith.device)
|
| 58 |
-
ci = ci_vith
|
| 59 |
-
|
| 60 |
-
ci.config.blip_num_beams = 64
|
| 61 |
-
ci.config.chunk_size = 2048
|
| 62 |
-
ci.config.flavor_intermediate_count = 2048 if clip_model_name == MODELS[0] else 1024
|
| 63 |
-
|
| 64 |
image = image.convert('RGB')
|
| 65 |
if mode == 'best':
|
| 66 |
prompt = ci.interrogate(image)
|
|
@@ -71,7 +44,7 @@ def image_to_prompt(image, clip_model_name, mode):
|
|
| 71 |
elif mode == 'negative':
|
| 72 |
prompt = ci.interrogate_negative(image)
|
| 73 |
|
| 74 |
-
return prompt
|
| 75 |
|
| 76 |
|
| 77 |
TITLE = """
|
|
@@ -163,7 +136,7 @@ def analyze_tab():
|
|
| 163 |
ex = gr.Examples(
|
| 164 |
examples=examples,
|
| 165 |
fn=image_analysis,
|
| 166 |
-
inputs=[input_image
|
| 167 |
outputs=[medium, artist, movement, trending, flavor],
|
| 168 |
cache_examples=True,
|
| 169 |
run_on_click=True
|
|
@@ -179,22 +152,16 @@ with gr.Blocks(css=CSS) as block:
|
|
| 179 |
with gr.Row():
|
| 180 |
input_image = gr.Image(type='pil', elem_id="input-img")
|
| 181 |
with gr.Column():
|
| 182 |
-
input_model = gr.Dropdown(MODELS, value=MODELS[0], label='CLIP Model')
|
| 183 |
input_mode = gr.Radio(['best', 'fast', 'classic', 'negative'], value='best', label='Mode')
|
| 184 |
submit_btn = gr.Button("Submit", api_name="image-to-prompt")
|
| 185 |
output_text = gr.Textbox(label="Output", elem_id="output-txt")
|
| 186 |
|
| 187 |
-
with gr.Group(elem_id="share-btn-container"):
|
| 188 |
-
community_icon = gr.HTML(community_icon_html, visible=False)
|
| 189 |
-
loading_icon = gr.HTML(loading_icon_html, visible=False)
|
| 190 |
-
share_button = gr.Button("Share to community", elem_id="share-btn", visible=False)
|
| 191 |
-
|
| 192 |
examples=[['example01.jpg', MODELS[0], 'best'], ['example02.jpg', MODELS[0], 'best']]
|
| 193 |
ex = gr.Examples(
|
| 194 |
examples=examples,
|
| 195 |
fn=image_to_prompt,
|
| 196 |
inputs=[input_image, input_model, input_mode],
|
| 197 |
-
outputs=[output_text
|
| 198 |
cache_examples=True,
|
| 199 |
run_on_click=True
|
| 200 |
)
|
|
@@ -208,8 +175,8 @@ with gr.Blocks(css=CSS) as block:
|
|
| 208 |
submit_btn.click(
|
| 209 |
fn=image_to_prompt,
|
| 210 |
inputs=[input_image, input_model, input_mode],
|
| 211 |
-
outputs=[output_text
|
| 212 |
)
|
| 213 |
share_button.click(None, [], [], _js=share_js)
|
| 214 |
|
| 215 |
-
block.queue(max_size=64).launch(show_api=False)
|
|
|
|
| 1 |
#!/usr/bin/env python3
|
| 2 |
import gradio as gr
|
| 3 |
from clip_interrogator import Config, Interrogator
|
|
|
|
| 4 |
|
| 5 |
+
# MODELS = ['ViT-L (best for Stable Diffusion 1.*)', 'ViT-H (best for Stable Diffusion 2.*)']
|
| 6 |
+
# MODELS = ['ViT-L (best for Stable Diffusion 1.*)',]
|
| 7 |
|
| 8 |
# load BLIP and ViT-L https://huggingface.co/openai/clip-vit-large-patch14
|
| 9 |
config = Config(clip_model_name="ViT-L-14/openai")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
+
ci = Interrogator(config)
|
| 12 |
+
ci.clip_model = ci_vitl.clip_model.to("cpu")
|
| 13 |
+
ci.config.blip_num_beams = 64
|
| 14 |
+
ci.config.chunk_size = 2048
|
| 15 |
+
ci.config.flavor_intermediate_count = 2048 # 1024
|
| 16 |
+
|
| 17 |
+
def image_analysis(image):
|
| 18 |
image = image.convert('RGB')
|
| 19 |
image_features = ci.image_to_features(image)
|
| 20 |
|
|
|
|
| 34 |
|
| 35 |
|
| 36 |
def image_to_prompt(image, clip_model_name, mode):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
image = image.convert('RGB')
|
| 38 |
if mode == 'best':
|
| 39 |
prompt = ci.interrogate(image)
|
|
|
|
| 44 |
elif mode == 'negative':
|
| 45 |
prompt = ci.interrogate_negative(image)
|
| 46 |
|
| 47 |
+
return prompt
|
| 48 |
|
| 49 |
|
| 50 |
TITLE = """
|
|
|
|
| 136 |
ex = gr.Examples(
|
| 137 |
examples=examples,
|
| 138 |
fn=image_analysis,
|
| 139 |
+
inputs=[input_image],
|
| 140 |
outputs=[medium, artist, movement, trending, flavor],
|
| 141 |
cache_examples=True,
|
| 142 |
run_on_click=True
|
|
|
|
| 152 |
with gr.Row():
|
| 153 |
input_image = gr.Image(type='pil', elem_id="input-img")
|
| 154 |
with gr.Column():
|
|
|
|
| 155 |
input_mode = gr.Radio(['best', 'fast', 'classic', 'negative'], value='best', label='Mode')
|
| 156 |
submit_btn = gr.Button("Submit", api_name="image-to-prompt")
|
| 157 |
output_text = gr.Textbox(label="Output", elem_id="output-txt")
|
| 158 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 159 |
examples=[['example01.jpg', MODELS[0], 'best'], ['example02.jpg', MODELS[0], 'best']]
|
| 160 |
ex = gr.Examples(
|
| 161 |
examples=examples,
|
| 162 |
fn=image_to_prompt,
|
| 163 |
inputs=[input_image, input_model, input_mode],
|
| 164 |
+
outputs=[output_text],
|
| 165 |
cache_examples=True,
|
| 166 |
run_on_click=True
|
| 167 |
)
|
|
|
|
| 175 |
submit_btn.click(
|
| 176 |
fn=image_to_prompt,
|
| 177 |
inputs=[input_image, input_model, input_mode],
|
| 178 |
+
outputs=[output_text]
|
| 179 |
)
|
| 180 |
share_button.click(None, [], [], _js=share_js)
|
| 181 |
|
| 182 |
+
block.queue(max_size=64).launch(show_api=False)
|