Update app.py
Browse files
app.py
CHANGED
|
@@ -12,10 +12,11 @@ processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
|
| 12 |
|
| 13 |
i1 = gr.inputs.Image(type="pil", label="Input image")
|
| 14 |
i2 = gr.inputs.Textbox(label="Input text")
|
|
|
|
| 15 |
o1 = gr.outputs.Image(type="pil", label="Cropped part")
|
| 16 |
o2 = gr.outputs.Textbox(label="Similarity score")
|
| 17 |
|
| 18 |
-
def extract_image(image, text, num=1):
|
| 19 |
|
| 20 |
inputs = feature_extractor(images=image, return_tensors="pt")
|
| 21 |
outputs = dmodel(**inputs)
|
|
@@ -25,7 +26,7 @@ def extract_image(image, text, num=1):
|
|
| 25 |
bboxes = outputs.pred_boxes
|
| 26 |
probas = outputs.logits.softmax(-1)[0, :, :-1] #removing no class as detr maps
|
| 27 |
|
| 28 |
-
keep = probas.max(-1).values >
|
| 29 |
outs = feature_extractor.post_process(outputs, torch.tensor(image.size[::-1]).unsqueeze(0))
|
| 30 |
bboxes_scaled = outs[0]['boxes'][keep].detach().numpy()
|
| 31 |
labels = outs[0]['labels'][keep].detach().numpy()
|
|
@@ -65,6 +66,6 @@ def extract_image(image, text, num=1):
|
|
| 65 |
|
| 66 |
title = "ClipnCrop"
|
| 67 |
description = "Extract sections of images from your image by using OpenAI's CLIP and Facebooks Detr implemented on HuggingFace Transformers"
|
| 68 |
-
examples=[['ex3.jpg', 'black bag'],['ex2.jpg', 'man in red dress']]
|
| 69 |
article = "<p style='text-align: center'><a href='https://github.com/Vishnunkumar/clipcrop' target='_blank'>clipcrop</a></p>"
|
| 70 |
-
gr.Interface(fn=extract_image, inputs=[i1, i2], outputs=[o1, o2], title=title, description=description, article=article, examples=examples, enable_queue=True).launch()
|
|
|
|
| 12 |
|
| 13 |
i1 = gr.inputs.Image(type="pil", label="Input image")
|
| 14 |
i2 = gr.inputs.Textbox(label="Input text")
|
| 15 |
+
i3 = gr.inputs.Number(default=0.96, label="Threshold percentage score")
|
| 16 |
o1 = gr.outputs.Image(type="pil", label="Cropped part")
|
| 17 |
o2 = gr.outputs.Textbox(label="Similarity score")
|
| 18 |
|
| 19 |
+
def extract_image(image, text, num=1, prob):
|
| 20 |
|
| 21 |
inputs = feature_extractor(images=image, return_tensors="pt")
|
| 22 |
outputs = dmodel(**inputs)
|
|
|
|
| 26 |
bboxes = outputs.pred_boxes
|
| 27 |
probas = outputs.logits.softmax(-1)[0, :, :-1] #removing no class as detr maps
|
| 28 |
|
| 29 |
+
keep = probas.max(-1).values > prob
|
| 30 |
outs = feature_extractor.post_process(outputs, torch.tensor(image.size[::-1]).unsqueeze(0))
|
| 31 |
bboxes_scaled = outs[0]['boxes'][keep].detach().numpy()
|
| 32 |
labels = outs[0]['labels'][keep].detach().numpy()
|
|
|
|
| 66 |
|
| 67 |
title = "ClipnCrop"
|
| 68 |
description = "Extract sections of images from your image by using OpenAI's CLIP and Facebooks Detr implemented on HuggingFace Transformers"
|
| 69 |
+
examples=[['ex3.jpg', 'black bag', 0.96],['ex2.jpg', 'man in red dress', 0.85]]
|
| 70 |
article = "<p style='text-align: center'><a href='https://github.com/Vishnunkumar/clipcrop' target='_blank'>clipcrop</a></p>"
|
| 71 |
+
gr.Interface(fn=extract_image, inputs=[i1, i2, i3], outputs=[o1, o2], title=title, description=description, article=article, examples=examples, enable_queue=True).launch()
|