Spaces:
Runtime error
Runtime error
| ''' | |
| !pip install "deepsparse-nightly==1.6.0.20231007" | |
| !pip install "deepsparse[image_classification]" | |
| !pip install opencv-python-headless | |
| !pip uninstall numpy -y | |
| !pip install numpy | |
| !pip install gradio | |
| !pip install pandas | |
| ''' | |
| import os | |
| os.system("pip uninstall numpy -y") | |
| os.system("pip install numpy") | |
| os.system("pip install pandas") | |
| import gradio as gr | |
| import sys | |
| from uuid import uuid1 | |
| from PIL import Image | |
| from zipfile import ZipFile | |
| import pathlib | |
| import shutil | |
| import pandas as pd | |
| import deepsparse | |
| import json | |
| import numpy as np | |
| rn50_embedding_pipeline_default = deepsparse.Pipeline.create( | |
| task="embedding-extraction", | |
| base_task="image-classification", # tells the pipeline to expect images and normalize input with ImageNet means/stds | |
| model_path="zoo:cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/channel20_pruned75_quant-none-vnni", | |
| #emb_extraction_layer=-1, # extracts last layer before projection head and softmax | |
| ) | |
| rn50_embedding_pipeline_last_1 = deepsparse.Pipeline.create( | |
| task="embedding-extraction", | |
| base_task="image-classification", # tells the pipeline to expect images and normalize input with ImageNet means/stds | |
| model_path="zoo:cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/channel20_pruned75_quant-none-vnni", | |
| emb_extraction_layer=-1, # extracts last layer before projection head and softmax | |
| ) | |
| rn50_embedding_pipeline_last_2 = deepsparse.Pipeline.create( | |
| task="embedding-extraction", | |
| base_task="image-classification", # tells the pipeline to expect images and normalize input with ImageNet means/stds | |
| model_path="zoo:cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/channel20_pruned75_quant-none-vnni", | |
| emb_extraction_layer=-2, # extracts last layer before projection head and softmax | |
| ) | |
| rn50_embedding_pipeline_last_3 = deepsparse.Pipeline.create( | |
| task="embedding-extraction", | |
| base_task="image-classification", # tells the pipeline to expect images and normalize input with ImageNet means/stds | |
| model_path="zoo:cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/channel20_pruned75_quant-none-vnni", | |
| emb_extraction_layer=-3, # extracts last layer before projection head and softmax | |
| ) | |
| rn50_embedding_pipeline_dict = { | |
| "0": rn50_embedding_pipeline_default, | |
| "1": rn50_embedding_pipeline_last_1, | |
| "2": rn50_embedding_pipeline_last_2, | |
| "3": rn50_embedding_pipeline_last_3 | |
| } | |
| def zip_ims(g): | |
| from uuid import uuid1 | |
| if g is None: | |
| return None | |
| l = list(map(lambda x: x["name"], g)) | |
| if not l: | |
| return None | |
| zip_file_name ="tmp.zip" | |
| with ZipFile(zip_file_name ,"w") as zipObj: | |
| for ele in l: | |
| zipObj.write(ele, "{}.png".format(uuid1())) | |
| #zipObj.write(file2.name, "file2") | |
| return zip_file_name | |
| def unzip_ims_func(zip_file_name, choose_model, | |
| rn50_embedding_pipeline_dict = rn50_embedding_pipeline_dict): | |
| print("call file") | |
| if zip_file_name is None: | |
| return json.dumps({}), None | |
| print("zip_file_name :") | |
| print(zip_file_name) | |
| unzip_path = "img_dir" | |
| if os.path.exists(unzip_path): | |
| shutil.rmtree(unzip_path) | |
| with ZipFile(zip_file_name) as archive: | |
| archive.extractall(unzip_path) | |
| im_name_l = pd.Series( | |
| list(pathlib.Path(unzip_path).rglob("*.png")) + \ | |
| list(pathlib.Path(unzip_path).rglob("*.jpg")) + \ | |
| list(pathlib.Path(unzip_path).rglob("*.jpeg")) | |
| ).map(str).values.tolist() | |
| rn50_embedding_pipeline = rn50_embedding_pipeline_dict[choose_model] | |
| embeddings = rn50_embedding_pipeline(images=im_name_l) | |
| im_l = pd.Series(im_name_l).map(Image.open).values.tolist() | |
| if os.path.exists(unzip_path): | |
| shutil.rmtree(unzip_path) | |
| im_name_l = pd.Series(im_name_l).map(lambda x: x.split("/")[-1]).values.tolist() | |
| return json.dumps({ | |
| "names": im_name_l, | |
| "embs": embeddings.embeddings[0] | |
| }), im_l | |
| def emb_img_func(im, choose_model, | |
| rn50_embedding_pipeline_dict = rn50_embedding_pipeline_dict): | |
| print("call im :") | |
| if im is None: | |
| return json.dumps({}) | |
| im_obj = Image.fromarray(im) | |
| im_name = "{}.png".format(uuid1()) | |
| im_obj.save(im_name) | |
| rn50_embedding_pipeline = rn50_embedding_pipeline_dict[choose_model] | |
| embeddings = rn50_embedding_pipeline(images=[im_name]) | |
| os.remove(im_name) | |
| return json.dumps({ | |
| "names": [im_name], | |
| "embs": embeddings.embeddings[0] | |
| }) | |
| def image_grid(imgs, rows, cols): | |
| assert len(imgs) <= rows*cols | |
| w, h = imgs[0].size | |
| grid = Image.new('RGB', size=(cols*w, rows*h)) | |
| grid_w, grid_h = grid.size | |
| for i, img in enumerate(imgs): | |
| grid.paste(img, box=(i%cols*w, i//cols*h)) | |
| return grid | |
| def expand2square(pil_img, background_color): | |
| width, height = pil_img.size | |
| if width == height: | |
| return pil_img | |
| elif width > height: | |
| result = Image.new(pil_img.mode, (width, width), background_color) | |
| result.paste(pil_img, (0, (width - height) // 2)) | |
| return result | |
| else: | |
| result = Image.new(pil_img.mode, (height, height), background_color) | |
| result.paste(pil_img, ((height - width) // 2, 0)) | |
| return result | |
| def image_click(images, evt: gr.SelectData, | |
| choose_model, | |
| rn50_embedding_pipeline_dict = rn50_embedding_pipeline_dict, | |
| top_k = 5 | |
| ): | |
| images = json.loads(images.model_dump_json()) | |
| images = list(map(lambda x: {"name": x["image"]["path"]}, images)) | |
| img_selected = images[evt.index] | |
| pivot_image_path = images[evt.index]['name'] | |
| im_name_l = list(map(lambda x: x["name"], images)) | |
| rn50_embedding_pipeline = rn50_embedding_pipeline_dict[choose_model] | |
| embeddings = rn50_embedding_pipeline(images=im_name_l) | |
| json_text = json.dumps({ | |
| "names": im_name_l, | |
| "embs": embeddings.embeddings[0] | |
| }) | |
| assert type(json_text) == type("") | |
| assert type(pivot_image_path) in [type(""), type(0)] | |
| dd_obj = json.loads(json_text) | |
| names = dd_obj["names"] | |
| embs = dd_obj["embs"] | |
| assert pivot_image_path in names | |
| corr_df = pd.DataFrame(np.asarray(embs).T).corr() | |
| corr_df.columns = names | |
| corr_df.index = names | |
| arr_l = [] | |
| for i, r in corr_df.iterrows(): | |
| arr_ll = sorted(r.to_dict().items(), key = lambda t2: t2[1], reverse = True) | |
| arr_l.append(arr_ll) | |
| top_k = min(len(corr_df), top_k) | |
| cols = pd.Series(arr_l[names.index(pivot_image_path)]).map(lambda x: x[0]).values.tolist()[:top_k] | |
| corr_array_df = pd.DataFrame(arr_l).applymap(lambda x: x[0]) | |
| corr_array_df.index = names | |
| #### corr_array | |
| corr_array = corr_array_df.loc[cols].iloc[:, :top_k].values | |
| l_list = pd.Series(corr_array.reshape([-1])).values.tolist() | |
| l_list = pd.Series(l_list).map(Image.open).map(lambda x: expand2square(x, (0, 0, 0))).values.tolist() | |
| l_dist_list = [] | |
| for ele in l_list: | |
| if ele not in l_dist_list: | |
| l_dist_list.append(ele) | |
| return l_dist_list, l_list | |
| with gr.Blocks() as demo: | |
| title = gr.HTML( | |
| """<h1 align="center"> Deepsparse Image Embedding </h1>""", | |
| elem_id="title", | |
| ) | |
| with gr.Row(): | |
| with gr.Column(): | |
| inputs_0 = gr.Image(label = "Input Image for embed") | |
| button_0 = gr.Button("Image button") | |
| gr.Examples( | |
| [ | |
| "Anything_V5.png", | |
| "waifu_girl0.png", | |
| ], | |
| inputs = inputs_0, | |
| label = "Image Examples" | |
| ) | |
| with gr.Column(): | |
| inputs_1 = gr.File(label = "Input Images zip file for embed") | |
| button_1 = gr.Button("Image File button") | |
| gr.Examples( | |
| [ | |
| "rose_love_imgs.zip", | |
| "beautiful_room_imgs.zip" | |
| ], | |
| inputs = inputs_1, | |
| label = "Image Zip file Examples" | |
| ) | |
| with gr.Row(): | |
| with gr.Column(): | |
| with gr.Row(): | |
| title = gr.Markdown( | |
| value="### Click on a Image in the gallery to select it", | |
| visible=True, | |
| elem_id="selected_model", | |
| ) | |
| choose_model = gr.Radio(choices=["0", "1", "2", "3"], | |
| value="0", label="Choose embedding layer", elem_id="layer_radio") | |
| g_outputs = gr.Gallery(label='Output gallery', elem_id="gallery", | |
| columns=[5],object_fit="contain", height="auto") | |
| outputs = gr.Text(label = "Output Embeddings") | |
| with gr.Column(): | |
| sdg_outputs = gr.Gallery(label='Sort Distinct gallery', elem_id="gallery", | |
| columns=[5],object_fit="contain", height="auto") | |
| sg_outputs = gr.Gallery(label='Sort gallery', elem_id="gallery", | |
| columns=[5],object_fit="contain", height="auto") | |
| button_0.click(fn = emb_img_func, inputs = [inputs_0, choose_model], outputs = outputs) | |
| button_1.click(fn = unzip_ims_func, inputs = [inputs_1, choose_model], | |
| outputs = [outputs, g_outputs]) | |
| g_outputs.select(image_click, | |
| inputs = [g_outputs, choose_model], | |
| outputs = [sdg_outputs, sg_outputs],) | |
| demo.launch("0.0.0.0") |