File size: 5,452 Bytes
0789068 e8c3351 99b150f e8c3351 99b150f e8c3351 99b150f e8c3351 b85a763 e8c3351 b85a763 e8c3351 b85a763 e8c3351 b85a763 0789068 e8c3351 b85a763 e8c3351 b85a763 e8c3351 b85a763 0789068 e8c3351 0789068 e8c3351 9b936c1 e8c3351 0789068 e8c3351 0789068 e8c3351 9b936c1 99b150f 0789068 e8c3351 0789068 e8c3351 99b150f e8c3351 99b150f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 | from share_btn import community_icon_html, loading_icon_html, share_js
import os, subprocess
import torch
# (The setup function and cache download section has been commented out)
import sys
sys.path.append('src/blip')
sys.path.append('clip-interrogator')
import gradio as gr
from clip_interrogator import Config, Interrogator
import io
from PIL import Image
config = Config()
config.device = 'cuda' if torch.cuda.is_available() else 'cpu'
config.blip_offload = False if torch.cuda.is_available() else True
config.chunk_size = 2048
config.flavor_intermediate_count = 512
config.blip_num_beams = 64
ci = Interrogator(config)
def inference(input_images, mode, best_max_flavors):
# Process each image in the list and generate prompt results
prompt_results = []
for image_bytes in input_images:
image = Image.open(io.BytesIO(image_bytes)).convert('RGB')
if mode == 'best':
prompt_result = ci.interrogate(image, max_flavors=int(best_max_flavors))
elif mode == 'classic':
prompt_result = ci.interrogate_classic(image)
else:
prompt_result = ci.interrogate_fast(image)
prompt_results.append((image, prompt_result)) # Use dictionary to set image labels
# Convert prompt_results to text format
text_results = [f"Image {i+1}: {result[1]}" for i, result in enumerate(prompt_results)]
return "\n".join(text_results)
title = """
<div style="text-align: center; max-width: 500px; margin: 0 auto;">
<div
style="
display: inline-flex;
align-items: center;
gap: 0.8rem;
font-size: 1.75rem;
margin-bottom: 10px;
"
>
<h1 style="font-weight: 600; margin-bottom: 7px;">
CLIP Interrogator 2.1
</h1>
</div>
<p style="margin-bottom: 10px;font-size: 94%;font-weight: 100;line-height: 1.5em;">
Want to figure out what a good prompt might be to create new images like an existing one?
<br />The CLIP Interrogator is here to get you answers!
<br />This version is specialized for producing nice prompts for use with Stable Diffusion 2.0 using the ViT-H-14 OpenCLIP model!
</p>
</div>
"""
article = """
<div style="text-align: center; max-width: 500px; margin: 0 auto;font-size: 94%;">
<p>
Server busy? You can also run on <a href="https://colab.research.google.com/github/pharmapsychotic/clip-interrogator/blob/open-clip/clip_interrogator.ipynb">Google Colab</a>
</p>
<p>
Has this been helpful to you? Follow Pharma on twitter
<a href="https://twitter.com/pharmapsychotic">@pharmapsychotic</a>
and check out more tools at his
<a href="https://pharmapsychotic.com/tools.html">Ai generative art tools list</a>
</p>
</div>
"""
css = '''
#col-container {width: width: 80%;; margin-left: auto; margin-right: auto;}
a {text-decoration-line: underline; font-weight: 600;}
.animate-spin {
animation: spin 1s linear infinite;
}
@keyframes spin {
from {
transform: rotate(0deg);
}
to {
transform: rotate(360deg);
}
}
#share-btn-container {
display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
}
#share-btn {
all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;
}
#share-btn * {
all: unset;
}
#share-btn-container div:nth-child(-n+2){
width: auto !important;
min-height: 0px !important;
}
#share-btn-container .wrap {
display: none !important;
}
#gallery .caption-label {
font-size: 15px !important;
right: 0 !important;
max-width: 100% !important;
text-overflow: clip !important;
white-space: normal !important;
overflow: auto !important;
height: 20% !important;
}
#gallery .caption {
padding: var(--size-2) var(--size-3) !important;
text-overflow: clip !important;
white-space: normal !important; /* Allows the text to wrap */
color: var(--block-label-text-color) !important;
font-weight: var(--weight-semibold) !important;
text-align: center !important;
height: 100% !important;
font-size: 17px !important;
}
'''
with gr.Blocks(css=css) as block:
with gr.Column(elem_id="col-container"):
gr.HTML(title)
input_image = gr.Files(label = "Inputs", file_count="multiple", type='bytes', elem_id='inputs')
with gr.Row():
mode_input = gr.Radio(['best', 'classic', 'fast'], label='Select mode', value='best')
flavor_input = gr.Slider(minimum=2, maximum=24, step=2, value=4, label='best mode max flavors')
submit_btn = gr.Button("Submit")
# Change from Gallery to Textbox for displaying results
result_textbox = gr.Textbox(label="Outputs", type="str", readonly=True, elem_id="output-textbox")
with gr.Group(elem_id="share-btn-container"):
loading_icon = gr.HTML(loading_icon_html, visible=False)
gr.HTML(article)
submit_btn.click(fn=inference, inputs=[input_image,mode_input,flavor_input], outputs=[result_textbox], api_name="clipi2")
block.queue(max_size=32,concurrency_count=10).launch(show_api=False)
|