| import os |
| from pyChatGPT import ChatGPT |
| |
|
|
| os.system("pip install -U gradio") |
|
|
| import sys |
| import gradio as gr |
|
|
| os.system( |
| "pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.9/index.html" |
| ) |
|
|
| |
| os.system( |
| "git clone https://github.com/facebookresearch/Detic.git --recurse-submodules" |
| ) |
| os.chdir("Detic") |
|
|
| |
| import torch |
|
|
| |
| |
| import detectron2 |
| from detectron2.utils.logger import setup_logger |
|
|
| setup_logger() |
|
|
| |
| import sys |
| import numpy as np |
| import os, json, cv2, random |
|
|
| |
| from detectron2 import model_zoo |
| from detectron2.engine import DefaultPredictor |
| from detectron2.config import get_cfg |
| from detectron2.utils.visualizer import Visualizer |
| from detectron2.data import MetadataCatalog, DatasetCatalog |
|
|
| |
| sys.path.insert(0, "third_party/CenterNet2/projects/CenterNet2/") |
| sys.path.insert(0, "third_party/CenterNet2/") |
| from centernet.config import add_centernet_config |
| from detic.config import add_detic_config |
| from detic.modeling.utils import reset_cls_test |
|
|
| from PIL import Image |
|
|
| |
| cfg = get_cfg() |
| add_centernet_config(cfg) |
| add_detic_config(cfg) |
| cfg.MODEL.DEVICE = "cpu" |
| cfg.merge_from_file("configs/Detic_LCOCOI21k_CLIP_SwinB_896b32_4x_ft4x_max-size.yaml") |
| cfg.MODEL.WEIGHTS = "https://dl.fbaipublicfiles.com/detic/Detic_LCOCOI21k_CLIP_SwinB_896b32_4x_ft4x_max-size.pth" |
| cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 |
| cfg.MODEL.ROI_BOX_HEAD.ZEROSHOT_WEIGHT_PATH = "rand" |
| cfg.MODEL.ROI_HEADS.ONE_CLASS_PER_PROPOSAL = ( |
| True |
| ) |
| predictor = DefaultPredictor(cfg) |
|
|
| |
|
|
| BUILDIN_CLASSIFIER = { |
| "lvis": "datasets/metadata/lvis_v1_clip_a+cname.npy", |
| "objects365": "datasets/metadata/o365_clip_a+cnamefix.npy", |
| "openimages": "datasets/metadata/oid_clip_a+cname.npy", |
| "coco": "datasets/metadata/coco_clip_a+cname.npy", |
| } |
|
|
| BUILDIN_METADATA_PATH = { |
| "lvis": "lvis_v1_val", |
| "objects365": "objects365_v2_val", |
| "openimages": "oid_val_expanded", |
| "coco": "coco_2017_val", |
| } |
|
|
| vocabulary = "lvis" |
| metadata = MetadataCatalog.get(BUILDIN_METADATA_PATH[vocabulary]) |
| classifier = BUILDIN_CLASSIFIER[vocabulary] |
| num_classes = len(metadata.thing_classes) |
| reset_cls_test(predictor.model, classifier, num_classes) |
|
|
|
|
|
|
| def inference(img,unique_only): |
|
|
| im = cv2.imread(img) |
|
|
| outputs = predictor(im) |
| |
| |
|
|
| detected_objects = [] |
| object_list_str = [] |
|
|
| |
| |
| |
| |
|
|
| box_locations = outputs["instances"].pred_boxes |
| box_loc_screen = box_locations.tensor.cpu().numpy() |
| unique_object_dict = {} |
| for i, box_coord in enumerate(box_loc_screen): |
| x0, y0, x1, y1 = box_coord |
| width = x1 - x0 |
| height = y1 - y0 |
| predicted_label = metadata.thing_classes[outputs["instances"].pred_classes[i]] |
| detected_objects.append( |
| { |
| "prediction": predicted_label, |
| "x": int(x0), |
| "y": int(y0), |
| "width": int(width), |
| "height": int(height), |
| } |
| ) |
| if ((not unique_only) or (unique_only and predicted_label not in unique_object_dict)): |
| object_list_str.append( |
| f"{predicted_label} - X:{int(x0)} Y: {int(y0)} Width: {int(width)} Height: {int(height)}" |
| ) |
| unique_object_dict[predicted_label] = 1 |
|
|
|
|
| output_str = "Imagine you are a blind but intelligent image captioner who is only given the X,Y coordinates and width, height of each object in a scene with no specific attributes of the objects themselves. Create a description of the scene using the relative positions and sizes of objects\n" |
| for line in object_list_str: |
| output_str += line + "\n" |
|
|
| return ( |
| |
| detected_objects |
| |
| ) |
|
|
|
|
| with gr.Blocks() as demo: |
| with gr.Column(): |
| inp = gr.Image(label="Input Image", type="filepath") |
| chk = gr.Checkbox(label="Unique Objects only? (useful to reduce ChatGPT input to speed up its response and also eliminate timeouts") |
| btn_detic = gr.Button("Run Detic for ChatGPT") |
| with gr.Column(): |
| |
| output_desc = gr.JSON(label="Detected Objects") |
| |
|
|
| btn_detic.click(fn=inference, inputs=[inp,chk], outputs=[output_desc], api_name="detect") |
|
|
| demo.launch() |
|
|