Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -12,16 +12,43 @@ from zipfile import ZipFile
|
|
| 12 |
import pathlib
|
| 13 |
import shutil
|
| 14 |
import pandas as pd
|
| 15 |
-
import json
|
| 16 |
import deepsparse
|
| 17 |
|
| 18 |
-
|
| 19 |
task="embedding-extraction",
|
| 20 |
base_task="image-classification", # tells the pipeline to expect images and normalize input with ImageNet means/stds
|
| 21 |
model_path="zoo:cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/channel20_pruned75_quant-none-vnni",
|
| 22 |
-
#emb_extraction_layer=-
|
| 23 |
)
|
| 24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
def zip_ims(g):
|
| 26 |
from uuid import uuid1
|
| 27 |
if g is None:
|
|
@@ -36,7 +63,8 @@ def zip_ims(g):
|
|
| 36 |
#zipObj.write(file2.name, "file2")
|
| 37 |
return zip_file_name
|
| 38 |
|
| 39 |
-
def
|
|
|
|
| 40 |
print("call file")
|
| 41 |
if zip_file_name is None:
|
| 42 |
return json.dumps({})
|
|
@@ -50,6 +78,7 @@ def unzip_ims(zip_file_name):
|
|
| 50 |
list(pathlib.Path(unzip_path).rglob("*.jpg")) + \
|
| 51 |
list(pathlib.Path(unzip_path).rglob("*.jpeg"))
|
| 52 |
).map(str).values.tolist()
|
|
|
|
| 53 |
embeddings = rn50_embedding_pipeline(images=im_name_l)
|
| 54 |
if os.path.exists(unzip_path):
|
| 55 |
shutil.rmtree(unzip_path)
|
|
@@ -60,13 +89,15 @@ def unzip_ims(zip_file_name):
|
|
| 60 |
})
|
| 61 |
|
| 62 |
|
| 63 |
-
def emb_img_func(im
|
|
|
|
| 64 |
print("call im :")
|
| 65 |
if im is None:
|
| 66 |
return json.dumps({})
|
| 67 |
im_obj = Image.fromarray(im)
|
| 68 |
im_name = "{}.png".format(uuid1())
|
| 69 |
im_obj.save(im_name)
|
|
|
|
| 70 |
embeddings = rn50_embedding_pipeline(images=[im_name])
|
| 71 |
os.remove(im_name)
|
| 72 |
return json.dumps({
|
|
@@ -85,6 +116,9 @@ def emb_gallery_func(gallery):
|
|
| 85 |
'''
|
| 86 |
|
| 87 |
with gr.Blocks() as demo:
|
|
|
|
|
|
|
|
|
|
| 88 |
with gr.Row():
|
| 89 |
with gr.Column():
|
| 90 |
inputs_0 = gr.Image(label = "Input Image for embed")
|
|
@@ -96,6 +130,6 @@ with gr.Blocks() as demo:
|
|
| 96 |
outputs = gr.Text(label = "Output Embeddings")
|
| 97 |
|
| 98 |
button_0.click(fn = emb_img_func, inputs = inputs_0, outputs = outputs)
|
| 99 |
-
button_1.click(fn =
|
| 100 |
|
| 101 |
demo.launch("0.0.0.0")
|
|
|
|
| 12 |
import pathlib
|
| 13 |
import shutil
|
| 14 |
import pandas as pd
|
|
|
|
| 15 |
import deepsparse
|
| 16 |
|
| 17 |
+
rn50_embedding_pipeline_default = deepsparse.Pipeline.create(
|
| 18 |
task="embedding-extraction",
|
| 19 |
base_task="image-classification", # tells the pipeline to expect images and normalize input with ImageNet means/stds
|
| 20 |
model_path="zoo:cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/channel20_pruned75_quant-none-vnni",
|
| 21 |
+
#emb_extraction_layer=-1, # extracts last layer before projection head and softmax
|
| 22 |
)
|
| 23 |
|
| 24 |
+
rn50_embedding_pipeline_last_1 = deepsparse.Pipeline.create(
|
| 25 |
+
task="embedding-extraction",
|
| 26 |
+
base_task="image-classification", # tells the pipeline to expect images and normalize input with ImageNet means/stds
|
| 27 |
+
model_path="zoo:cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/channel20_pruned75_quant-none-vnni",
|
| 28 |
+
emb_extraction_layer=-1, # extracts last layer before projection head and softmax
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
rn50_embedding_pipeline_last_2 = deepsparse.Pipeline.create(
|
| 32 |
+
task="embedding-extraction",
|
| 33 |
+
base_task="image-classification", # tells the pipeline to expect images and normalize input with ImageNet means/stds
|
| 34 |
+
model_path="zoo:cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/channel20_pruned75_quant-none-vnni",
|
| 35 |
+
emb_extraction_layer=-2, # extracts last layer before projection head and softmax
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
rn50_embedding_pipeline_last_3 = deepsparse.Pipeline.create(
|
| 39 |
+
task="embedding-extraction",
|
| 40 |
+
base_task="image-classification", # tells the pipeline to expect images and normalize input with ImageNet means/stds
|
| 41 |
+
model_path="zoo:cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/channel20_pruned75_quant-none-vnni",
|
| 42 |
+
emb_extraction_layer=-3, # extracts last layer before projection head and softmax
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
rn50_embedding_pipeline_dict = {
|
| 46 |
+
"0": rn50_embedding_pipeline_default,
|
| 47 |
+
"1": rn50_embedding_pipeline_last_1,
|
| 48 |
+
"2": rn50_embedding_pipeline_last_2,
|
| 49 |
+
"3": rn50_embedding_pipeline_last_3
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
def zip_ims(g):
|
| 53 |
from uuid import uuid1
|
| 54 |
if g is None:
|
|
|
|
| 63 |
#zipObj.write(file2.name, "file2")
|
| 64 |
return zip_file_name
|
| 65 |
|
| 66 |
+
def unzip_ims_func(zip_file_name, choose_model,
|
| 67 |
+
rn50_embedding_pipeline_dict = rn50_embedding_pipeline_dict):
|
| 68 |
print("call file")
|
| 69 |
if zip_file_name is None:
|
| 70 |
return json.dumps({})
|
|
|
|
| 78 |
list(pathlib.Path(unzip_path).rglob("*.jpg")) + \
|
| 79 |
list(pathlib.Path(unzip_path).rglob("*.jpeg"))
|
| 80 |
).map(str).values.tolist()
|
| 81 |
+
rn50_embedding_pipeline = rn50_embedding_pipeline_dict[choose_model]
|
| 82 |
embeddings = rn50_embedding_pipeline(images=im_name_l)
|
| 83 |
if os.path.exists(unzip_path):
|
| 84 |
shutil.rmtree(unzip_path)
|
|
|
|
| 89 |
})
|
| 90 |
|
| 91 |
|
| 92 |
+
def emb_img_func(im, choose_model,
|
| 93 |
+
rn50_embedding_pipeline_dict = rn50_embedding_pipeline_dict):
|
| 94 |
print("call im :")
|
| 95 |
if im is None:
|
| 96 |
return json.dumps({})
|
| 97 |
im_obj = Image.fromarray(im)
|
| 98 |
im_name = "{}.png".format(uuid1())
|
| 99 |
im_obj.save(im_name)
|
| 100 |
+
rn50_embedding_pipeline = rn50_embedding_pipeline_dict[choose_model]
|
| 101 |
embeddings = rn50_embedding_pipeline(images=[im_name])
|
| 102 |
os.remove(im_name)
|
| 103 |
return json.dumps({
|
|
|
|
| 116 |
'''
|
| 117 |
|
| 118 |
with gr.Blocks() as demo:
|
| 119 |
+
with gr.Row():
|
| 120 |
+
choose_model = gr.Radio(choices=["0", "1", "2", "3"],
|
| 121 |
+
value="0", label="Choose embedding layer", elem_id="layer_radio")
|
| 122 |
with gr.Row():
|
| 123 |
with gr.Column():
|
| 124 |
inputs_0 = gr.Image(label = "Input Image for embed")
|
|
|
|
| 130 |
outputs = gr.Text(label = "Output Embeddings")
|
| 131 |
|
| 132 |
button_0.click(fn = emb_img_func, inputs = inputs_0, outputs = outputs)
|
| 133 |
+
button_1.click(fn = unzip_ims_func, inputs = inputs_1, outputs = outputs)
|
| 134 |
|
| 135 |
demo.launch("0.0.0.0")
|