File size: 8,752 Bytes
bf0b180 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 |
import io
import gradio as gr
import matplotlib.pyplot as plt
import requests, validators
import torch
import pathlib
from PIL import Image
from transformers import AutoFeatureExtractor, YolosForObjectDetection, DetrForObjectDetection
import os
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
# colors for visualization
COLORS = [
[0.000, 0.447, 0.741],
[0.850, 0.325, 0.098],
[0.929, 0.694, 0.125],
[0.494, 0.184, 0.556],
[0.466, 0.674, 0.188],
[0.301, 0.745, 0.933]
]
def make_prediction(img, feature_extractor, model):
inputs = feature_extractor(img, return_tensors="pt")
outputs = model(**inputs)
img_size = torch.tensor([tuple(reversed(img.size))])
processed_outputs = feature_extractor.post_process(outputs, img_size)
return processed_outputs[0]
def fig2img(fig):
buf = io.BytesIO()
fig.savefig(buf)
buf.seek(0)
pil_img = Image.open(buf)
basewidth = 750
wpercent = (basewidth/float(pil_img.size[0]))
hsize = int((float(pil_img.size[1])*float(wpercent)))
img = pil_img.resize((basewidth,hsize), Image.Resampling.LANCZOS)
return img
def visualize_prediction(img, output_dict, threshold=0.5, id2label=None):
keep = output_dict["scores"] > threshold
boxes = output_dict["boxes"][keep].tolist()
scores = output_dict["scores"][keep].tolist()
labels = output_dict["labels"][keep].tolist()
if id2label is not None:
labels = [id2label[x] for x in labels]
plt.figure(figsize=(50, 50))
plt.imshow(img)
ax = plt.gca()
colors = COLORS * 100
for score, (xmin, ymin, xmax, ymax), label, color in zip(scores, boxes, labels, colors):
if label == 'license-plates':
ax.add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, fill=False, color=color, linewidth=10))
ax.text(xmin, ymin, f"{label}: {score:0.2f}", fontsize=60, bbox=dict(facecolor="yellow", alpha=0.8))
plt.axis("off")
return fig2img(plt.gcf())
def get_original_image(url_input):
if validators.url(url_input):
try:
response = requests.get(url_input, stream=True)
response.raise_for_status()
image = Image.open(response.raw)
return image
except Exception as e:
print(f"Error loading image from URL: {e}")
return None
return None
def detect_objects(model_name, url_input, image_input, webcam_input, threshold):
# Handle case where no image is provided
image = None
if validators.url(url_input) and url_input.strip():
image = get_original_image(url_input)
elif image_input is not None:
image = image_input
elif webcam_input is not None:
image = webcam_input
if image is None:
raise gr.Error("Please provide an image via URL, file upload, or webcam")
# Extract model and feature extractor
feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)
if "yolos" in model_name:
model = YolosForObjectDetection.from_pretrained(model_name)
elif "detr" in model_name:
model = DetrForObjectDetection.from_pretrained(model_name)
# Make prediction
processed_outputs = make_prediction(image, feature_extractor, model)
# Visualize prediction
viz_img = visualize_prediction(image, processed_outputs, threshold, model.config.id2label)
return viz_img
def set_example_image(example: list) -> dict:
return gr.Image.update(value=example[0])
def set_example_url(example: list) -> dict:
image = get_original_image(example[0])
return gr.Textbox.update(value=example[0]), gr.Image.update(value=image)
title = """<h1 id="title">License Plate Detection with YOLOS</h1>"""
description = """
# πβ¨ Customize Your Biblical Porsche Scene Showcase β¨π
**YOLOS: When a Vision Transformer Gets Divine Revelation**
Behold! YOLOS is a Vision Transformer (ViT) that achieved 42 AP on COCO - not just a number, but *the answer to everything* (including which disciple gets shotgun in your biblical Porsche).
**The Scripture According to YOLOS:**
- "In the beginning was the Sequence, and the Sequence was One" - YOLOS 1:1
- Trained on 118k sacred images from the COCO testament
- Performs miracles at detecting heavenly vehicles and license plates
- Fine-tuned on the "Book of Car Plates" (443 verses of automotive divinity)
**Biblical Porsche Detection Capabilities:**
- β
Finds Peter's Porsche at the Gates of Heaven
- β
Spots Moses' license plate ("LET-M-PPL-GO")
- β
Detects David's sports car facing Goliath's SUV
- β
Locates the Holy Ghost's invisible convertible
*"And lo, the model saith: thou shalt look at only one sequence, and it shall be enough to find thy Porsche in the Red Sea of data."*
**Warning:** May occasionally confuse manna with hubcaps. Results not guaranteed in actual biblical times (camels not detected).
Links to HuggingFace Models:
- [nickmuchi/yolos-small-rego-plates-detection](https://huggingface.co/nickmuchi/yolos-small-rego-plates-detection)
"""
models = ["nickmuchi/yolos-small-finetuned-license-plate-detection","nickmuchi/detr-resnet50-license-plate-detection"]
# FIXED: Use "resolve/main" URLs instead of "blob/main" for raw images
urls = [
"https://huggingface.co/spaces/TroglodyteDerivations/Customize_your_biblical_Porsche_scene_Showcase/resolve/main/images/flux_krea_00005_.png",
"https://huggingface.co/spaces/TroglodyteDerivations/Customize_your_biblical_Porsche_scene_Showcase/resolve/main/images/flux_krea_00007_.png"
]
images = [[path.as_posix()] for path in sorted(pathlib.Path('images').rglob('*.*')) if path.suffix.lower() in ['.webp', '.jpg', '.jpeg', '.png']]
tik_tok_link = """
[](https://www.tiktok.com/@porsche)
"""
css = '''
h1#title {
text-align: center;
}
'''
demo = gr.Blocks()
with demo:
gr.Markdown(title)
gr.Markdown(description)
gr.Markdown(tik_tok_link)
options = gr.Dropdown(choices=models,label='Object Detection Model',value=models[0],show_label=True)
slider_input = gr.Slider(minimum=0.2,maximum=1,value=0.5,step=0.1,label='Prediction Threshold')
with gr.Tabs():
with gr.TabItem('Image URL'):
with gr.Row():
with gr.Column():
url_input = gr.Textbox(lines=2,label='Enter valid image URL here..')
original_image = gr.Image(height=750, width=750)
# Update the change event to handle errors
url_input.change(
get_original_image,
inputs=[url_input],
outputs=[original_image],
show_progress=True
)
with gr.Column():
img_output_from_url = gr.Image(height=750, width=750)
with gr.Row():
example_url = gr.Examples(
examples=urls,
inputs=[url_input],
outputs=[original_image],
fn=set_example_url,
cache_examples=False
)
url_but = gr.Button('Detect')
with gr.TabItem('Image Upload'):
with gr.Row():
img_input = gr.Image(type='pil', height=750, width=750)
img_output_from_upload= gr.Image(height=750, width=750)
with gr.Row():
example_images = gr.Examples(examples=images,inputs=[img_input])
img_but = gr.Button('Detect')
with gr.TabItem('WebCam'):
with gr.Row():
web_input = gr.Image(sources=['webcam'], type='pil', height=750, width=750, streaming=True)
img_output_from_webcam= gr.Image(height=750, width=750)
cam_but = gr.Button('Detect')
url_but.click(detect_objects,inputs=[options,url_input,img_input,web_input,slider_input],outputs=[img_output_from_url],queue=True)
img_but.click(detect_objects,inputs=[options,url_input,img_input,web_input,slider_input],outputs=[img_output_from_upload],queue=True)
cam_but.click(detect_objects,inputs=[options,url_input,img_input,web_input,slider_input],outputs=[img_output_from_webcam],queue=True)
gr.Markdown("[](https://www.tiktok.com/@porsche)")
demo.launch(debug=True, css=css) |