gdsu commited on
Commit
03baa4a
·
verified ·
1 Parent(s): fd90f2b

Upload search.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. search.py +166 -0
search.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import copy
3
+ import logging
4
+ import logging.handlers as handlers
5
+ import pathlib
6
+ import sys
7
+
8
+ import faiss
9
+ import numpy as np
10
+ import vaex as vx
11
+ import wandb
12
+
13
+ sys.path.insert(0, str(pathlib.Path(__file__).parent.resolve()))
14
+
15
+ from search.embeddings import Embeddings
16
+ from search.faiss_search import FaissIndex
17
+ from metrics import metrics
18
+ from data.wikiart import WikiArt, MyTrain
19
+
20
+ logger = logging.getLogger()
21
+
22
+
23
+ def get_parser():
24
+ parser = argparse.ArgumentParser('dynamicDistances-NN Search Module')
25
+ parser.add_argument('--dataset', default='wikiart', type=str, required=True)
26
+ parser.add_argument('--topk', nargs='+', type=int, default=[5],
27
+ help='Number of NN to consider while calculating recall')
28
+ parser.add_argument('--mode', type=str, required=True, choices=['artist', 'label'],
29
+ help='The type of matching to do')
30
+ parser.add_argument('--method', type=str, default='IP', choices=['IP', 'L2'], help='The method to do NN search')
31
+ parser.add_argument('--emb-dir', type=str, default=None,
32
+ help='The directory where per image embeddings are stored (NOT USED when chunked)')
33
+ parser.add_argument('--query_count', default=-1, type=int,
34
+ help='Number of queries to consider. Works only for domainnet')
35
+ parser.add_argument('--chunked', action='store_true', help='If I should read from chunked directory instead')
36
+ parser.add_argument('--query-chunk-dir', type=str, required=True,
37
+ help='The directory where chunked query embeddings should be saved/are already saved')
38
+ parser.add_argument('--database-chunk-dir', type=str, required=True,
39
+ help='The directory where chunked val embeddings should be saved/are already saved')
40
+ parser.add_argument('--data-dir', type=str, default=None,
41
+ help='The directory of concerned dataset. (HARD CODED LATER)')
42
+ parser.add_argument('--multilabel', action='store_true', help='If the dataset is multilabel')
43
+
44
+ return parser
45
+
46
+
47
+ def get_log_handlers(args):
48
+ # Create handlers
49
+ c_handler = logging.StreamHandler()
50
+ f_handler = handlers.RotatingFileHandler(f'search.log', maxBytes=int(1e6), backupCount=1000)
51
+ c_handler.setLevel(logging.DEBUG)
52
+ f_handler.setLevel(logging.DEBUG)
53
+
54
+ # Create formatters and add it to handlers
55
+ c_format = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
56
+ f_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
57
+ c_handler.setFormatter(c_format)
58
+ f_handler.setFormatter(f_format)
59
+ return c_handler, f_handler
60
+
61
+
62
+ def main():
63
+ parser = get_parser()
64
+ args = parser.parse_args()
65
+
66
+ handlers = get_log_handlers(args)
67
+ logger.addHandler(handlers[0])
68
+ logger.addHandler(handlers[1])
69
+ logger.setLevel(logging.DEBUG)
70
+
71
+ if args.dataset == 'wikiart':
72
+ dataset = WikiArt(args.data_dir)
73
+ else:
74
+ raise NotImplementedError
75
+
76
+ query_embeddings = Embeddings(args.emb_dir, args.query_chunk_dir,
77
+ files=None,
78
+ chunked=args.chunked,
79
+ file_ext='.pkl')
80
+ val_embeddings = Embeddings(args.emb_dir, args.database_chunk_dir,
81
+ files=None,
82
+ chunked=args.chunked,
83
+ file_ext='.pkl')
84
+
85
+ query_embeddings.filenames = list(query_embeddings.filenames)
86
+ val_embeddings.filenames = list(val_embeddings.filenames)
87
+
88
+ # Filtering the dataset based on the files which actually exist.
89
+ dataset.query_db = dataset.query_db[
90
+ dataset.query_db['name'].isin(query_embeddings.filenames)]
91
+ dataset.val_db = dataset.val_db[
92
+ dataset.val_db['name'].isin(val_embeddings.filenames)]
93
+
94
+ # Using only the embeddings corresponding to images in the datasets
95
+ temp = vx.from_arrays(filename=query_embeddings.filenames, index=np.arange(len(query_embeddings.filenames)))
96
+ print(temp)
97
+ dataset.query_db = dataset.query_db.join(temp, left_on='name', right_on='filename', how='left')
98
+ query_embeddings.embeddings = query_embeddings.embeddings[dataset.get_query_col('index')]
99
+ try:
100
+ b, h, w = query_embeddings.embeddings.shape
101
+ query_embeddings.embeddings = query_embeddings.embeddings.reshape(b, 1, h * w)
102
+ except ValueError:
103
+ b, d = query_embeddings.embeddings.shape
104
+ query_embeddings.embeddings = query_embeddings.embeddings.reshape(b, 1, d)
105
+ query_embeddings.filenames = np.asarray(query_embeddings.filenames)[dataset.get_query_col('index')]
106
+
107
+ temp = vx.from_arrays(filename=val_embeddings.filenames, index=np.arange(len(val_embeddings.filenames)))
108
+ dataset.val_db = dataset.val_db.join(temp, left_on='name', right_on='filename', how='left')
109
+ val_embeddings.embeddings = val_embeddings.embeddings[dataset.get_val_col('index')]
110
+ try:
111
+ b, h, w = val_embeddings.embeddings.shape
112
+ val_embeddings.embeddings = val_embeddings.embeddings.reshape(b, 1, h * w)
113
+ except ValueError:
114
+ b, d = val_embeddings.embeddings.shape
115
+ val_embeddings.embeddings = val_embeddings.embeddings.reshape(b, 1, d)
116
+ val_embeddings.filenames = np.asarray(val_embeddings.filenames)[dataset.get_val_col('index')]
117
+
118
+ # Building the faiss index
119
+ embedding_size = query_embeddings.embeddings[0].shape[1]
120
+ if args.method == 'IP':
121
+ method = faiss.IndexFlatIP
122
+ else:
123
+ method = faiss.IndexFlatL2
124
+ search_module = FaissIndex(embedding_size=embedding_size, index_func=method)
125
+ queries = np.asarray(query_embeddings.embeddings).reshape(len(query_embeddings.embeddings), embedding_size)
126
+ database = np.asarray(val_embeddings.embeddings).reshape(len(val_embeddings.embeddings), embedding_size)
127
+ search_module.build_index(database)
128
+
129
+ _, nns_all = search_module.search_nns(queries, max(args.topk))
130
+ if args.multilabel:
131
+ q_labels = dataset.query_db['multilabel'].values
132
+ db_labels = dataset.val_db['multilabel'].values
133
+ nns_all_pred = [q_labels[i] @ db_labels[nns_all[i]].T for i in range(len(nns_all))] # (n_queries, n_topk)
134
+ nns_all_pred = np.array(nns_all_pred)
135
+ else:
136
+ nns_all_pred = nns_all
137
+ classes = np.unique(dataset.get_val_col(args.mode))
138
+ mode_to_index = {classname: i for i, classname in enumerate(classes)}
139
+ try:
140
+ gts = np.asarray(list(map(lambda x: mode_to_index[x], dataset.get_query_col(args.mode).tolist())))
141
+ except KeyError:
142
+ logger.error('Class not found in database. This query list cannot be evaluated')
143
+ return
144
+
145
+ evals = metrics.Metrics()
146
+
147
+ for topk in args.topk:
148
+ logger.info(f'Calculating recall@{topk}')
149
+ nns_all_pred_topk = nns_all_pred[:, :topk]
150
+ if args.multilabel:
151
+ mode_recall = evals.get_recall_bin(copy.deepcopy(nns_all_pred_topk), topk)
152
+ mode_mrr = evals.get_mrr_bin(copy.deepcopy(nns_all_pred_topk), topk)
153
+ mode_map = evals.get_map_bin(copy.deepcopy(nns_all_pred_topk), topk)
154
+ else:
155
+ preds = dataset.get_val_col(args.mode)[nns_all_pred_topk.flatten()].reshape(len(queries), topk)
156
+ preds = np.vectorize(mode_to_index.get)(preds)
157
+ mode_recall = evals.get_recall(copy.deepcopy(preds), gts, topk)
158
+ mode_mrr = evals.get_mrr(copy.deepcopy(preds), gts, topk)
159
+ mode_map = evals.get_map(copy.deepcopy(preds), gts, topk)
160
+ logger.info(f'Recall@{topk}: {mode_recall}')
161
+ logger.info(f'MRR@{topk}: {mode_mrr}')
162
+ logger.info(f'mAP@{topk}: {mode_map}')
163
+
164
+
165
+ if __name__ == '__main__':
166
+ main()