Vvaann commited on
Commit
27e1898
·
verified ·
1 Parent(s): d55d3fa

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -0
app.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #Project ref:
2
+ #1. https://github.com/haltakov/natural-language-image-search by Vladimir Haltakov
3
+ #2. OpenAI's CLIP
4
+
5
+
6
+ import torch
7
+ import requests
8
+ import numpy as np
9
+ import pandas as pd
10
+ import gradio as gr
11
+ from io import BytesIO
12
+
13
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
14
+
15
+ from PIL import Image as pil
16
+ from transformers import CLIPProcessor, CLIPModel,CLIPTokenizer
17
+ from sentence_transformers import SentenceTransformer, util
18
+
19
+ model = CLIPModel.from_pretrained('openai/clip-vit-base-patch32').to(device)
20
+ processor = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32')
21
+ tokenizer = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32')
22
+
23
+ examples = [['flag.jpg'],['flowe.jpg'],['dance.jpg']]
24
+
25
+ data = pd.read_csv('./photos.tsv000', sep='\t', header=0)
26
+ features= np.load('./features.npy')
27
+ ids = pd.read_csv('./photo_ids.csv')
28
+ ids = list(ids['photo_id'])
29
+
30
+ def encode_img(image):
31
+ img = pil.fromarray(image.astype('uint8'),'RGB')
32
+ with torch.no_grad():
33
+ processed = processor(text = None,images=image, return_tensors='pt',padding=True)['pixel_values']
34
+ search_photo_feature = model.get_image_features(processed.to(device))
35
+ search_photo_feature /= search_photo_feature.norm(dim=1,keepdim=True)
36
+ img_encoded = search_photo_feature.cpu().numpy()
37
+ return img_encoded
38
+
39
+ def encode_txt(text):
40
+ with torch.no_grad():
41
+ inp = tokenizer([text],padding=True, return_tensors='pt')
42
+ inp = processor(text=[inp], images=None, return_tensors='pt',padding=True)
43
+ text_encoded = model.gt_text_text_features(**inp).detach().numpy()
44
+ return text_encoded
45
+
46
+ def similarity(feature, photo_features):
47
+ similarities = list((feature @ photo_features.T).squeeze(0))
48
+ return similarities
49
+
50
+ def find_best_matches(image, mode, text):
51
+ if mode = 'Text2Image':
52
+ text_features = encode_txt(text)
53
+ similarities = similarity(text_features, photo_features)
54
+ else:
55
+ img_features = encode_img(image)
56
+ similarities = similarity(image_features, photo_features)
57
+
58
+ best_photos = sorted(zip(similarities, range(photo_features.shape[0])))
59
+
60
+ matched_images = []
61
+
62
+ for i in range(4):
63
+ idx = best_photos[i][1]
64
+ photo_id = ids[idx]
65
+
66
+ photo_data = photos[photos['photo_id'] == photo_id].iloc[0]
67
+
68
+ response = requests.get(photo_data['photo_image_url']+ '?w=640')
69
+ img = pil.open(BytesIO(response.content))
70
+ matched_images.append(img)
71
+
72
+ return matched_images
73
+
74
+ demo = gr.Interface(fn=find_best_matches, inputs = [ gr.Image(label='Search image'),
75
+ gr.Radio(['Text to Image', 'Image to Image']),
76
+ gr.Textbox(lines=1, label = 'Text query')],
77
+ examples = examples,
78
+ outputs = [gr.Gallery(label= 'Generated images', show_label=False, elem_id='gallery', scale = 3)],
79
+ title = 'CLIP tutorial',
80
+ description = 'This application gives best 4 results for the query by the user. Input is either text or image and get image and text as results respectively.')
81
+ demo.launch()
82
+