Spaces:
Sleeping
Sleeping
| import io | |
| import gradio as gr | |
| import matplotlib.pyplot as plt | |
| import requests, validators | |
| import torch | |
| import pathlib | |
| from PIL import Image | |
| import cv2 as cv | |
| import numpy as np | |
| from transformers import DetrImageProcessor, DetrForSegmentation, MaskFormerImageProcessor, MaskFormerForInstanceSegmentation | |
| from transformers.image_transforms import id_to_rgb | |
| import os | |
| # colors for visualization | |
| COLORS = [ | |
| [0.000, 0.447, 0.741], | |
| [0.850, 0.325, 0.098], | |
| [0.929, 0.694, 0.125], | |
| [0.494, 0.184, 0.556], | |
| [0.466, 0.674, 0.188], | |
| [0.301, 0.745, 0.933] | |
| ] | |
| YOLOV8_LABELS = ['pedestrian', 'people', 'bicycle', 'car', 'van', 'truck', 'tricycle', 'awning-tricycle', 'bus', 'motor'] | |
| def make_prediction(img, feature_extractor, model): | |
| inputs = feature_extractor(img, return_tensors="pt") | |
| outputs = model(**inputs) | |
| img_size = torch.tensor([tuple(reversed(img.size))]) | |
| processed_outputs = feature_extractor.post_process(outputs, img_size) | |
| return processed_outputs | |
| def fig2img(fig): | |
| buf = io.BytesIO() | |
| fig.savefig(buf, bbox_inches="tight") | |
| buf.seek(0) | |
| img = Image.open(buf) | |
| return img | |
| def visualize_prediction(pil_img, output_dict, threshold=0.7, id2label=None): | |
| keep = output_dict["scores"] > threshold | |
| boxes = output_dict["boxes"][keep].tolist() | |
| scores = output_dict["scores"][keep].tolist() | |
| labels = output_dict["labels"][keep].tolist() | |
| if id2label is not None: | |
| labels = [id2label[x] for x in labels] | |
| # print("Labels " + str(labels)) | |
| plt.figure(figsize=(16, 10)) | |
| plt.imshow(pil_img) | |
| ax = plt.gca() | |
| colors = COLORS * 100 | |
| for score, (xmin, ymin, xmax, ymax), label, color in zip(scores, boxes, labels, colors): | |
| ax.add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, fill=False, color=color, linewidth=3)) | |
| ax.text(xmin, ymin, f"{label}: {score:0.2f}", fontsize=15, bbox=dict(facecolor="yellow", alpha=0.5)) | |
| plt.axis("off") | |
| return fig2img(plt.gcf()) | |
| def contour_map(map_to_use, label_id): | |
| mask = (map_to_use.cpu().numpy() == label_id) | |
| visual_mask = (mask * 255).astype(np.uint8) | |
| contours, hierarchy = cv.findContours(visual_mask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) | |
| return contours, hierarchy | |
| def segment_images(model_name,url_input,image_input,threshold): | |
| #### | |
| # Get Image Object | |
| if validators.url(url_input): | |
| image = Image.open(requests.get(url_input, stream=True).raw) | |
| elif image_input: | |
| image = image_input | |
| #### | |
| if "detr" in model_name: | |
| pass | |
| elif "maskformer" in model_name.lower(): | |
| # Load the processor and model | |
| processor = MaskFormerImageProcessor.from_pretrained(model_name) | |
| # print(type(processor)) | |
| model = MaskFormerForInstanceSegmentation.from_pretrained(model_name) | |
| inputs = processor(images=image, return_tensors="pt") | |
| outputs = model(**inputs) | |
| results = processor.post_process_panoptic_segmentation(outputs, target_sizes=[image.size[::-1]])[0] | |
| return_string = "" | |
| for r in results["segments_info"]: | |
| contour_list, hierarchy = contour_map(results["segmentation"], r["id"]) | |
| label_name = model.config.id2label[r["label_id"]] | |
| return_string += f"ID: {r['id']}\t Contour Count: {len(contour_list)}\t Score: {r['score']}\t Label Name: {label_name},\n" | |
| r_shape = results["segmentation"].shape | |
| new_image = np.zeros((r[0], r[1], 3), dtype=np.uint8) | |
| new_image[:, :, 0] = results["segmentation"].numpy()[:, :] | |
| new_image[:, :, 1] = (new_image[:, :, 0] * 2) %256 | |
| new_image[:, :, 2] = (new_image[:, :, 0] * 3) %256 | |
| new_image = Image.fromarray(new_image) | |
| return new_image, return_string | |
| pass | |
| else: | |
| raise NameError("Model is not implemented") | |
| def set_example_image(example: list) -> dict: | |
| return gr.Image.update(value=example[0]) | |
| def set_example_url(example: list) -> dict: | |
| return gr.Textbox.update(value=example[0]) | |
| title = """<h1 id="title">Image Segmentation with Various Models</h1>""" | |
| description = """ | |
| Links to HuggingFace Models: | |
| - [facebook/detr-resnet-50-panoptic](https://huggingface.co/facebook/detr-resnet-50-panoptic) (Not implemented YET) | |
| - [facebook/detr-resnet-101-panoptic](https://huggingface.co/facebook/detr-resnet-101-panoptic) (Not implemented YET) | |
| - [facebook/maskformer-swin-large-coco](https://huggingface.co/facebook/maskformer-swin-large-coco) | |
| """ | |
| models = ["facebook/detr-resnet-50-panoptic","facebook/detr-resnet-101-panoptic","facebook/maskformer-swin-large-coco"] | |
| urls = ["https://c8.alamy.com/comp/J2AB4K/the-new-york-stock-exchange-on-the-wall-street-in-new-york-J2AB4K.jpg"] | |
| # twitter_link = """ | |
| # [](https://twitter.com/nickmuchi) | |
| # """ | |
| css = ''' | |
| h1#title { | |
| text-align: center; | |
| } | |
| ''' | |
| demo = gr.Blocks(css=css) | |
| def changing(): | |
| # https://discuss.huggingface.co/t/how-to-programmatically-enable-or-disable-components/52350/4 | |
| return gr.Button.update(interactive=True), gr.Button.update(interactive=True) | |
| with demo: | |
| gr.Markdown(title) | |
| gr.Markdown(description) | |
| # gr.Markdown(twitter_link) | |
| options = gr.Dropdown(choices=models,label='Select Image Segmentation Model',show_label=True) | |
| slider_input = gr.Slider(minimum=0.2,maximum=1,value=0.7,label='Prediction Threshold') | |
| with gr.Tabs(): | |
| with gr.TabItem('Image URL'): | |
| with gr.Row(): | |
| url_input = gr.Textbox(lines=2,label='Enter valid image URL here..') | |
| img_output_from_url = gr.Image(shape=(650,650)) | |
| with gr.Row(): | |
| example_url = gr.Dataset(components=[url_input],samples=[[str(url)] for url in urls]) | |
| url_but = gr.Button('Detect', interactive=False) | |
| with gr.TabItem('Image Upload'): | |
| with gr.Row(): | |
| img_input = gr.Image(type='pil') | |
| img_output_from_upload= gr.Image(shape=(650,650)) | |
| with gr.Row(): | |
| example_images = gr.Dataset(components=[img_input], | |
| samples=[[path.as_posix()] | |
| for path in sorted(pathlib.Path('images').rglob('*.JPG'))]) # Can't get case_sensitive to work | |
| img_but = gr.Button('Detect', interactive=False) | |
| # output_text1 = gr.outputs.Textbox(label="Confidence Values") | |
| output_text1 = gr.components.Textbox(label="Confidence Values") | |
| # https://huggingface.co/spaces/vishnun/CLIPnCROP/blob/main/app.py -- Got .outputs. from this | |
| options.change(fn=changing, inputs=[], outputs=[img_but, url_but]) | |
| url_but.click(segment_images,inputs=[options,url_input,img_input,slider_input],outputs=[img_output_from_url, output_text1],queue=True) | |
| img_but.click(segment_images,inputs=[options,url_input,img_input,slider_input],outputs=[img_output_from_upload, output_text1],queue=True) | |
| # url_but.click(segment_images,inputs=[options,url_input,img_input,slider_input],outputs=[img_output_from_url, _],queue=True) | |
| # img_but.click(segment_images,inputs=[options,url_input,img_input,slider_input],outputs=[img_output_from_upload, _],queue=True) | |
| # url_but.click(segment_images,inputs=[options,url_input,img_input,slider_input],outputs=img_output_from_url,queue=True) | |
| # img_but.click(segment_images,inputs=[options,url_input,img_input,slider_input],outputs=img_output_from_upload,queue=True) | |
| example_images.click(fn=set_example_image,inputs=[example_images],outputs=[img_input]) | |
| example_url.click(fn=set_example_url,inputs=[example_url],outputs=[url_input]) | |
| # gr.Markdown("") | |
| # demo.launch(enable_queue=True) | |
| demo.launch() #removed (share=True) |