code
stringlengths
17
6.64M
class MarginRankingLoss(nn.Module): '\n Compute margin ranking loss\n arg input: (batchsize, subspace) and (batchsize, subspace)\n ' def __init__(self, margin=0, measure='cosine', max_violation=False, cost_style='sum', direction='bidir', device=torch.device('cpu')): '\n :param margin:\n :param measure: cosine 余弦相似度, hist_sim 扩展 jaccard 相似度\n :param max_violation:\n :param cost_style: 把所有误差相加 sum,还是取平均值 mean\n :param direction: compare every diagonal score to scores in its column and row\n ' super(MarginRankingLoss, self).__init__() self.margin = margin self.cost_style = cost_style self.direction = direction if (measure == 'cosine'): self.sim = cosine_sim elif (measure == 'hist'): self.sim = hist_sim else: raise Exception('Not implemented.') self.max_violation = max_violation def forward(self, s, im): device = s.device scores = self.sim(im, s) diagonal = scores.diag().view(im.size(0), 1) d1 = diagonal.expand_as(scores) d2 = diagonal.t().expand_as(scores) I = (torch.eye(scores.size(0)) > 0.5) I = I.to(device) cost_s = None cost_im = None if (self.direction in ['i2t', 'bidir']): cost_s = ((self.margin + scores) - d1).clamp(min=0) cost_s = cost_s.masked_fill_(I, 0) if (self.direction in ['t2i', 'bidir']): cost_im = ((self.margin + scores) - d2).clamp(min=0) cost_im = cost_im.masked_fill_(I, 0) if self.max_violation: if (cost_s is not None): cost_s = cost_s.max(1)[0] if (cost_im is not None): cost_im = cost_im.max(0)[0] if (cost_s is None): cost_s = torch.zeros(1).to(device) if (cost_im is None): cost_im = torch.zeros(1).to(device) if (self.cost_style == 'sum'): return (cost_s.sum() + cost_im.sum()) else: return (cost_s.mean() + cost_im.mean())
class MarginRankingLossWithScore(nn.Module): '\n Compute margin ranking loss\n arg input: (batchsize, subspace) and (batchsize, subspace)\n ' def __init__(self, margin=0, max_violation=False, cost_style='sum', direction='bidir', device=torch.device('cpu')): '\n\n :param margin:\n :param measure: cosine 余弦相似度, hist_sim 扩展 jaccard 相似度\n :param max_violation:\n :param cost_style: 把所有误差相加 sum,还是取平均值 mean\n :param direction: compare every diagonal score to scores in its column and row\n ' super().__init__() self.margin = margin self.cost_style = cost_style self.direction = direction self.max_violation = max_violation self.device = device def forward(self, score): device = self.device diagonal = score.diag().view(score.size(0), 1) d1 = diagonal.expand_as(score) d2 = diagonal.t().expand_as(score) I = (torch.eye(score.size(0)) > 0.5) I = I.to(device) cost_s = None cost_im = None if (self.direction in ['i2t', 'bidir']): cost_s = ((self.margin + score) - d1).clamp(min=0) cost_s = cost_s.masked_fill_(I, 0) if (self.direction in ['t2i', 'bidir']): cost_im = ((self.margin + score) - d2).clamp(min=0) cost_im = cost_im.masked_fill_(I, 0) if self.max_violation: if (cost_s is not None): cost_s = cost_s.max(1)[0] if (cost_im is not None): cost_im = cost_im.max(0)[0] if (cost_s is None): cost_s = torch.zeros(1).to(device) if (cost_im is None): cost_im = torch.zeros(1).to(device) if (self.cost_style == 'sum'): return (cost_s.sum() + cost_im.sum()) else: return (cost_s.mean() + cost_im.mean())
class ImprovedBCELoss(nn.Module): def __init__(self, lambda_): super(ImprovedBCELoss, self).__init__() self.L = lambda_ def forward(self, s, im): astype = torch.float im = im.type(astype) s = s.type(astype) weight_1 = ((self.L / torch.sum(im, dim=1, keepdim=True, dtype=astype)) * im) weight_2 = (((1 - self.L) / torch.sum((1 - im), dim=1, keepdim=True, dtype=astype)) * (1 - im)) weight_1[(weight_1 != weight_1)] = 0 weight_2[(weight_2 != weight_2)] = 0 res1 = torch.nn.functional.binary_cross_entropy_with_logits(s, im, weight=weight_1, reduction='sum') res2 = torch.nn.functional.binary_cross_entropy_with_logits(s, im, weight=weight_2, reduction='sum') return (res1 + res2)
class MarginLoss(nn.Module): '\n Compute margin loss\n arg input: (batchsize, subspace) and (batchsize, subspace)\n ' def __init__(self, neg_weight=1, margin=0, measure='cosine', cost_style='sum', device=torch.device('cpu'), pos_weight=300): '\n\n :param margin:\n :param measure: cosine 余弦相似度, hist_sim 扩展 jaccard 相似度\n :param max_violation:\n :param cost_style: 把所有误差相加 sum,还是取平均值 mean\n :param direction: compare every diagonal score to scores in its column and row\n ' super(MarginLoss, self).__init__() self.margin = 0 self.cost_style = cost_style if (measure == 'cosine'): self.sim = vector_cosine_sim elif (measure == 'hist'): self.sim = hist_sim else: raise Exception('Not implemented.') self.device = device self.neg_weight = neg_weight def forward(self, txt_embs, vis_embs, false_txt_embs, weight): device = self.device scorest = self.sim(txt_embs, vis_embs) weight = ((weight * (self.neg_weight - 1)) + 1) scoresf = self.sim(false_txt_embs, vis_embs) cost = ((self.margin + scoresf) - scorest).clamp(min=0) cost = torch.mul(cost, weight).to(device) if (self.cost_style == 'sum'): return cost.sum() else: return cost.mean()
class CrossEntropyLoss(nn.Module): def __init__(self): super(CrossEntropyLoss, self).__init__() def forward(self, s, im, temp=1000): sim_matrix1 = cosine_sim(s, im) sim_matrix2 = sim_matrix1.T loss1 = self.cal_loss(sim_matrix1, temp) loss2 = self.cal_loss(sim_matrix2, temp) return ((loss1 + loss2) / 2) def cal_loss(self, sim_matrix): logpt = torch.diag(sim_matrix) logpt = torch.diag(logpt) loss = (- logpt) loss = loss.sum() return loss
class DualSoftmaxLoss(nn.Module): def __init__(self): super(DualSoftmaxLoss, self).__init__() def forward(self, s, im, temp=1000): sim_matrix1 = cosine_sim(s, im) sim_matrix2 = sim_matrix1.T loss1 = self.cal_loss(sim_matrix1, temp) loss2 = self.cal_loss(sim_matrix2, temp) return ((loss1 + loss2) / 2) def cal_loss(self, sim_matrix, temp=1000): sim_matrix = ((sim_matrix * F.softmax((sim_matrix / temp), dim=0)) * len(sim_matrix)) logpt = F.log_softmax(sim_matrix, dim=(- 1)) logpt = torch.diag(logpt) loss = (- logpt) loss = loss.sum() return loss
class KlLoss(nn.Module): def __init__(self, cost_style='sum', direction='bidir', device=torch.device('cpu')): super().__init__() self.cost_style = cost_style self.direction = direction self.klloss = nn.KLDivLoss(reduction='none') self.device = device self.softmax = nn.Softmax(dim=1) self.logsoftmax = nn.LogSoftmax(dim=1) def forward(self, score, originscore): losst2i = None if (self.direction in ['t2i', 'bidir']): originsimt2i = self.softmax(originscore) simt2i = self.logsoftmax(score) losst2i = self.klloss(simt2i, originsimt2i) if (self.cost_style == 'sum'): return losst2i.sum() else: return losst2i.mean()
class Margin2Loss(nn.Module): '\n Compute margin loss\n arg input: (batchsize, subspace) and (batchsize, subspace)\n ' def __init__(self, bottommargin, uppermargin, bottommargin_t2t, uppermargin_t2t, neg_weight=1, measure='cosine', cost_style='sum', device=torch.device('cpu'), pos_weight=300): '\n\n :param margin:\n :param measure: cosine 余弦相似度, hist_sim 扩展 jaccard 相似度\n :param max_violation:\n :param cost_style: 把所有误差相加 sum,还是取平均值 mean\n :param direction: compare every diagonal score to scores in its column and row\n ' super(Margin2Loss, self).__init__() self.uppermargin = uppermargin self.bottommargin = bottommargin self.uppermargin_t2t = uppermargin_t2t self.bottommargin_t2t = bottommargin_t2t self.cost_style = cost_style if (measure == 'cosine'): self.sim = vector_cosine_sim elif (measure == 'hist'): self.sim = hist_sim else: raise Exception('Not implemented.') self.device = device self.neg_weight = neg_weight def forward(self, txt_embs, vis_embs, false_txt_embs, weight): device = self.device scorest = self.sim(txt_embs, vis_embs) weight = ((weight * (self.neg_weight - 1)) + 1) scoresf = self.sim(false_txt_embs, vis_embs) scoresf2 = self.sim(false_txt_embs, txt_embs) cost = 0 if (self.bottommargin is not None): cost_b = ((self.bottommargin + scoresf) - scorest).clamp(min=0) cost = (cost + cost_b) if (self.uppermargin is not None): cost_u = (((- self.uppermargin) - scoresf) + scorest).clamp(min=0) cost = (cost + cost_u) if (self.bottommargin_t2t is not None): cost += ((self.bottommargin_t2t + scoresf2) - scorest).clamp(min=0) if (self.uppermargin_t2t is not None): cost += (((- self.uppermargin_t2t) - scoresf2) + scorest).clamp(min=0) cost = torch.mul(cost, weight).to(device) if (self.cost_style == 'sum'): return cost.sum() else: return cost.mean()
@lru_cache() def default_bpe(): return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bpe_simple_vocab_16e6.txt.gz')
@lru_cache() def bytes_to_unicode(): "\n Returns list of utf-8 byte and a corresponding list of unicode strings.\n The reversible bpe codes work on unicode strings.\n This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.\n When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.\n This is a signficant percentage of your normal, say, 32K bpe vocab.\n To avoid that, we want lookup tables between utf-8 bytes and unicode strings.\n And avoids mapping to whitespace/control characters the bpe code barfs on.\n " bs = ((list(range(ord('!'), (ord('~') + 1))) + list(range(ord('¡'), (ord('¬') + 1)))) + list(range(ord('®'), (ord('ÿ') + 1)))) cs = bs[:] n = 0 for b in range((2 ** 8)): if (b not in bs): bs.append(b) cs.append(((2 ** 8) + n)) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs))
def get_pairs(word): 'Return set of symbol pairs in a word.\n Word is represented as tuple of symbols (symbols being variable-length strings).\n ' pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs
def basic_clean(text): text = ftfy.fix_text(text) text = html.unescape(html.unescape(text)) return text.strip()
def whitespace_clean(text): text = re.sub('\\s+', ' ', text) text = text.strip() return text
class SimpleTokenizer(object): def __init__(self, bpe_path: str=default_bpe()): self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()} merges = gzip.open(bpe_path).read().decode('utf-8').split('\n') merges = merges[1:(((49152 - 256) - 2) + 1)] merges = [tuple(merge.split()) for merge in merges] vocab = list(bytes_to_unicode().values()) vocab = (vocab + [(v + '</w>') for v in vocab]) for merge in merges: vocab.append(''.join(merge)) vocab.extend(['<|startoftext|>', '<|endoftext|>']) self.encoder = dict(zip(vocab, range(len(vocab)))) self.decoder = {v: k for (k, v) in self.encoder.items()} self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'} self.pat = re.compile("<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+", re.IGNORECASE) def bpe(self, token): if (token in self.cache): return self.cache[token] word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),)) pairs = get_pairs(word) if (not pairs): return (token + '</w>') while True: bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf')))) if (bigram not in self.bpe_ranks): break (first, second) = bigram new_word = [] i = 0 while (i < len(word)): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except: new_word.extend(word[i:]) break if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)): new_word.append((first + second)) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if (len(word) == 1): break else: pairs = get_pairs(word) word = ' '.join(word) self.cache[token] = word return word def encode(self, text): bpe_tokens = [] text = whitespace_clean(basic_clean(text)).lower() for token in re.findall(self.pat, text): token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8'))) bpe_tokens.extend((self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))) return bpe_tokens def decode(self, tokens): text = ''.join([self.decoder[token] for token in tokens]) text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors='replace').replace('</w>', ' ') return text
class TestSuite(unittest.TestCase): def test_rootpath(self): self.assertTrue(os.path.exists(rootpath)) def test_w2v_dir(self): w2v_dir = os.path.join(rootpath, 'word2vec/flickr/vec500flickr30m') self.assertTrue(os.path.exists(w2v_dir), ('missing %s' % w2v_dir)) def test_train_data(self): cap_file = os.path.join(rootpath, train_collection, 'TextData', ('%s.caption.txt' % train_collection)) self.assertTrue(os.path.exists(cap_file), ('missing %s' % cap_file)) feat_dir = os.path.join(rootpath, train_collection, 'FeatureData', vis_feat) self.assertTrue(os.path.exists(feat_dir), ('missing %s' % feat_dir)) def test_val_data(self): cap_file = os.path.join(rootpath, val_collection, 'TextData', ('%s.caption.txt' % val_collection)) self.assertTrue(os.path.exists(cap_file), ('missing %s' % cap_file)) feat_dir = os.path.join(rootpath, val_collection, 'FeatureData', vis_feat) self.assertTrue(os.path.exists(feat_dir), ('missing %s' % feat_dir)) def test_test_data(self): for test_set in 'tv16 tv17 tv18'.split(): topic_file = os.path.join(rootpath, test_collection, 'TextData', ('%s.avs.txt' % test_set)) self.assertTrue(os.path.exists(topic_file), ('missing %s' % topic_file)) gt_file = os.path.join(rootpath, test_collection, 'TextData', ('avs.qrels.%s' % test_set)) self.assertTrue(os.path.exists(gt_file), ('missing %s' % gt_file)) feat_dir = os.path.join(rootpath, test_collection, 'FeatureData', vis_feat) self.assertTrue(os.path.exists(feat_dir), ('missing %s' % feat_dir))
class TextTool(): @staticmethod def tokenize(input_str, clean=True, language='en', remove_stopword=False): '\n 进行预处理,返回一个 list\n :param input_str:\n :param clean: 如果 true,去掉不是英文字母和数字的。\n :param language:\n :param remove_stopword: 如果 true,去掉 stopword\n :return:\n ' if ('en' == language): sent = input_str if clean: sent = sent.replace('\r', ' ') sent = re.sub('[^A-Za-z0-9]', ' ', sent).strip().lower() tokens = sent.split() if remove_stopword: tokens = [x for x in tokens if (x not in ENGLISH_STOP_WORDS)] else: sent = input_str.decode('utf-8') if clean: for elem in CHN_DEL_SET: sent = sent.replace(elem, '') sent = sent.encode('utf-8') sent = re.sub('[A-Za-z]', '', sent) tokens = [x for x in sent.split()] if remove_stopword: tokens = [x for x in tokens if (x not in CHINESE_STOP_WORDS)] return tokens
def negation_augumentation(input_str): res = [input_str] replacelist = [('don t', 'do not'), ('doesn t', 'does not'), ('didn t', 'did not'), ('isn t', 'is not'), ('aren t', 'are not'), ('wasn t', 'was not'), ('weren t', 'were not'), ('won t', 'will not'), ('hasn t', 'has not'), ('haven t', 'have not'), ('can t', 'can not'), ('couldn t', 'could not'), ("don't", 'do not'), ("doesn't", 'does not'), ("didn't", 'did not'), ("isn't", 'is not'), ("aren't", 'are not'), ("won't", 'will not'), ("hasn't", 'has not'), ("haven't", 'have not'), ("can't", 'can not'), ("couldn't", 'could not')] for pairs in replacelist: if (input_str.find(pairs[0]) != (- 1)): input_str2 = re.sub(pairs[0], pairs[1], input_str) res.append(input_str2) break for pairs in replacelist: if (input_str.find(pairs[1]) != (- 1)): input_str2 = re.sub(pairs[1], pairs[0], input_str) res.append(input_str2) break return res
class Vocabulary(object): 'Simple vocabulary wrapper.' def __init__(self, encoding): self.word2idx = {} self.idx2word = {} self.encoding = encoding def add(self, word): if (word not in self.word2idx): idx = len(self.word2idx) self.word2idx[word] = idx self.idx2word[idx] = word def find(self, word): return self.word2idx.get(word, (- 1)) def __getitem__(self, index): return self.idx2word[index] def __call__(self, word): if (word not in self.word2idx): if ('gru' in self.encoding): return self.word2idx['<unk>'] else: raise Exception(('word out of vocab: %s' % word)) else: return self.word2idx[word] def __len__(self): return len(self.word2idx)
def load_config(config_path): module = importlib.import_module(config_path) return module.config()
def load_pretrained_model(pretrained_file_path, rootpath, device): checkpoint = torch.load(pretrained_file_path, map_location='cpu') epoch = checkpoint['epoch'] best_perf = checkpoint['best_perf'] config = checkpoint['config'] model_name = config.model_name if hasattr(config, 't2v_w2v'): w2v_data_path = os.path.join(rootpath, 'word2vec', 'flickr', 'vec500flickr30m') config.t2v_w2v = get_txt2vec('w2v_nsw')(w2v_data_path) config.we = get_we(config.t2v_idx.vocab, config.t2v_w2v.data_path) model = get_model(model_name, device, config) model.load_state_dict(checkpoint['model'], strict=False) logger.info("=> loaded checkpoint '{}' (epoch {}, best_perf {})".format(pretrained_file_path, epoch, best_perf)) return {'model': model, 'config': config}
def prepare_config(opt, checkToSkip=True): np.random.seed(opt.random_seed) torch.manual_seed(opt.random_seed) if ('~' in opt.rootpath): opt.rootpath = opt.rootpath.replace('~', os.path.expanduser('~')) rootpath = opt.rootpath trainCollection = opt.trainCollection if ('trainCollection2' in opt): trainCollection2 = opt.trainCollection2 else: trainCollection2 = 'None' valCollection = opt.valCollection task2_caption_suffix = opt.task2_caption if ('task3_caption' in opt): task3_caption_suffix = opt.task3_caption else: task3_caption_suffix = 'no_task3_caption' if (opt.val_set == 'no'): val_set = '' else: val_set = opt.val_set global device if (torch.cuda.is_available() and (opt.device != 'cpu')): device = torch.device('cuda') else: device = torch.device('cpu') config = load_config(('configs.%s' % opt.config_name)) if (opt.parm_adjust_config != 'None'): config.adjust_parm(opt.parm_adjust_config) if (trainCollection2 == 'None'): model_path = os.path.join(rootpath, trainCollection, 'w2vvpp_train', valCollection, val_set, opt.config_name, opt.model_prefix) else: model_path = os.path.join(rootpath, ((trainCollection + '_') + trainCollection2), 'w2vvpp_train', valCollection, val_set, opt.config_name, opt.model_prefix) if checkToSkip: if util.checkToSkip(os.path.join(model_path, 'model_best.pth.tar'), opt.overwrite): sys.exit(0) util.makedirs(model_path) print(json.dumps(vars(opt), indent=2)) model_name = config.model_name global writer writer = SummaryWriter(log_dir=model_path, flush_secs=5) collections = {'train': trainCollection, 'val': valCollection} capfiles = {'train': '%s.caption.txt', 'val': os.path.join(val_set, '%s.caption.txt')} if (trainCollection2 != 'None'): collections['train2'] = trainCollection2 capfiles['train2'] = '%s.caption.txt' vocabsuffix = ((trainCollection + '_') + trainCollection2) else: vocabsuffix = trainCollection cap_file_paths = {x: os.path.join(rootpath, collections[x], 'TextData', (capfiles[x] % collections[x])) for x in collections} capfiles_negationset = os.path.join(val_set, ('%s.caption.negationset.txt' % collections['val'])) capfiles_negationset = os.path.join(rootpath, valCollection, 'TextData', capfiles_negationset) config.capfiles_negationset = capfiles_negationset vis_feat_files = {x: None for x in collections} if (len(config.vid_feats) > 0): vis_feat_files = {collection: {y: BigFile(os.path.join(rootpath, collections[collection], 'FeatureData', y)) for y in config.vid_feats} for collection in collections} config.vis_fc_layers[0] = {} for each in vis_feat_files['train'].keys(): config.vis_fc_layers[0][each] = vis_feat_files['train'][each].ndims if config.vis_feat_add_concat: feat_dim_sum = np.sum(list(config.vis_fc_layers[0].values())) config.vis_fc_layers[0]['vis_feat_add_concat'] = feat_dim_sum vis_muti_feat_dicts = {x: None for x in collections} if config.SGRAF: vis_muti_feat_paths = {x: os.path.join(rootpath, collections[x], 'VideoMultiLabelFeat', config.muti_feat) for x in collections} if (os.path.realpath(vis_muti_feat_paths['train']) == os.path.realpath(vis_muti_feat_paths['val'])): vis_muti_feat_dicts['train'] = vis_muti_feat_dicts['val'] = np.load(vis_muti_feat_paths['train'], allow_pickle=True).item() else: vis_muti_feat_dicts['train'] = np.load(vis_muti_feat_paths['train'], allow_pickle=True).item() vis_muti_feat_dicts['val'] = np.load(vis_muti_feat_paths['val'], allow_pickle=True).item() vis_frame_feat_dicts = {x: None for x in collections} if config.frame_feat_input: vis_frame_feat_dicts = {collection: {y: BigFile(os.path.join(rootpath, collections[collection], 'FeatureData/frame', y)) for y in config.vid_frame_feats} for collection in collections} for each in vis_frame_feat_dicts['train'].keys(): config.vis_fc_layers[0][each] = vis_frame_feat_dicts['train'][each].ndims if config.frame_loader: frame_id_path_file = {'train': os.path.join(rootpath, trainCollection, 'id.imagepath.txt'), 'val': os.path.join(rootpath, valCollection, 'id.imagepath.txt')} else: frame_id_path_file = {'train': None, 'val': None} if (type(config.text_encoding['bow_encoding']) is str): for name in config.text_encoding: encoding = config.text_encoding[name] config.text_encoding[name] = {} config.text_encoding[name]['name'] = encoding (bow_encoding, w2v_encoding, rnn_encoding) = (config.text_encoding['bow_encoding']['name'], config.text_encoding['w2v_encoding']['name'], config.text_encoding['rnn_encoding']['name']) (rnn_encoding, config.pooling) = rnn_encoding.split('_', 1) if ('no' in bow_encoding): config.t2v_bow = No() config.t2v_bow.ndims = 0 else: bow_vocab_file = os.path.join(rootpath, vocabsuffix, 'TextData', 'vocab', ('%s_%d.pkl' % (bow_encoding, config.threshold))) config.t2v_bow = get_txt2vec(bow_encoding)(bow_vocab_file, norm=config.bow_norm) w2v_data_path = os.path.join(rootpath, 'word2vec', 'flickr', 'vec500flickr30m') config.t2v_w2v = get_txt2vec('w2v_nsw')(w2v_data_path) rnn_vocab_file = os.path.join(rootpath, vocabsuffix, 'TextData', 'vocab', ('%s_%d.pkl' % ('gru', config.threshold))) if ('bigru' == rnn_encoding): config.rnn_size *= 2 elif (rnn_encoding == 'nogru'): config.rnn_size = 0 elif (rnn_encoding == 'gru'): rnn_vocab_file = os.path.join(rootpath, vocabsuffix, 'TextData', 'vocab', ('%s_%d.pkl' % (rnn_encoding, config.threshold))) else: raise Exception('No this gru type!') config.t2v_idx = get_txt2vec('idxvec')(rnn_vocab_file) if (config.we_dim == 500): config.we = get_we(config.t2v_idx.vocab, w2v_data_path) config.txt_fc_layers = list(map(int, config.txt_fc_layers.split('-'))) if ('bigru' in rnn_encoding): config.rnn_size = (config.rnn_size // 2) if (task2_caption_suffix == 'no_task2_caption'): cap_file_paths_task2 = {x: None for x in collections} else: capfiles_task2 = {'train': ('%s.caption.%s.txt' % ('%s', task2_caption_suffix)), 'val': os.path.join(val_set, ('%s.caption.%s.txt' % ('%s', task2_caption_suffix)))} cap_file_paths_task2 = {x: os.path.join(rootpath, collections[x], 'TextData', (capfiles_task2[x] % collections[x])) for x in collections} bow_encoding_task2 = config.text_encoding_task2 bow_vocab_file_task2 = os.path.join(rootpath, trainCollection, 'TextData', ('vocab_%s' % task2_caption_suffix), ('%s_%d.pkl' % (bow_encoding_task2, config.threshold_task2))) config.t2v_bow_task2 = get_txt2vec(bow_encoding_task2)(bow_vocab_file_task2, norm=config.bow_norm_task2) print(config.t2v_bow_task2) config.vis_fc_layers_task2 = list(map(int, config.vis_fc_layers_task2.split('-'))) config.vis_fc_layers_task2[0] = config.vis_fc_layers[0] config.vis_fc_layers_task2[1] = config.t2v_bow_task2.ndims config.txt_fc_layers_task2 = list(map(int, config.txt_fc_layers_task2.split('-'))) if (config.txt_feature_task2 == 'bow'): config.txt_fc_layers_task2[0] = config.t2v_bow.ndims elif (config.txt_feature_task2 == 'w2v'): config.txt_fc_layers_task2[0] = config.t2v_w2v.ndims elif (config.txt_feature_task2 == 'gru'): config.txt_fc_layers_task2[0] = ((2 * config.rnn_size) if (rnn_encoding == 'bigru') else config.rnn_size) elif (config.txt_feature_task2 == 'no'): pass else: raise Exception('No this txt_feature_task2 implement!') config.txt_fc_layers_task2[1] = config.t2v_bow_task2.ndims if (task3_caption_suffix == 'no_task3_caption'): config.task3 = False cap_file_paths_task3 = {x: None for x in collections} else: config.task3 = True capfiles_task3 = {'train': ('%s.caption.%s.txt' % ('%s', task3_caption_suffix)), 'val': os.path.join(val_set, ('%s.caption.%s.txt' % ('%s', task3_caption_suffix)))} cap_file_paths_task3 = {x: os.path.join(rootpath, collections[x], 'TextData', (capfiles_task3[x] % collections[x])) for x in collections} if (opt.pretrained_file_path != 'None'): pretrained_model = load_pretrained_model(opt.pretrained_file_path, opt.rootpath, device) config.t2v_bow = pretrained_model['config'].t2v_bow config.t2v_idx = pretrained_model['config'].t2v_idx config.we = pretrained_model['config'].we model = pretrained_model['model'] else: model = get_model(model_name, device, config) prepared_configs = {'vis_feat_files': vis_feat_files, 'vis_muti_feat_dicts': vis_muti_feat_dicts, 'vis_frame_feat_dicts': vis_frame_feat_dicts, 'frame_id_path_file': frame_id_path_file, 'cap_file_paths': cap_file_paths, 'cap_file_paths_task2': cap_file_paths_task2, 'cap_file_paths_task3': cap_file_paths_task3, 'opt': opt, 'val_set': val_set, 'config': config, 'collections': collections, 'model_path': model_path, 'device': device, 'task2_caption_suffix': task2_caption_suffix, 'task3_caption_suffix': task3_caption_suffix, 'capfiles_negationset': capfiles_negationset, 'model': model} return prepared_configs
def prepare_model1(opt): prepared_configs = prepare_config(opt) config = prepared_configs['config'] model_name = config.model_name if (opt.pretrained_file_path != 'None'): pretrained_model = load_pretrained_model(opt.pretrained_file_path, opt.rootpath, device) config = pretrained_model['config'] model = pretrained_model['model'] else: model = get_model(model_name, device, config) model = model.to(device) prepared_configs['model'] = model return prepared_configs
def main(opt): prepared_configs = prepare_config(opt) vis_feat_files = prepared_configs['vis_feat_files'] vis_frame_feat_dicts = prepared_configs['vis_frame_feat_dicts'] frame_id_path_file = prepared_configs['frame_id_path_file'] vis_muti_feat_dicts = prepared_configs['vis_muti_feat_dicts'] cap_file_paths = prepared_configs['cap_file_paths'] cap_file_paths_task2 = prepared_configs['cap_file_paths_task2'] cap_file_paths_task3 = prepared_configs['cap_file_paths_task3'] opt = prepared_configs['opt'] config = prepared_configs['config'] collections = prepared_configs['collections'] model_path = prepared_configs['model_path'] model = prepared_configs['model'] device = prepared_configs['device'] val_set = prepared_configs['val_set'] vis_ids = list(map(str.strip, open(os.path.join(opt.rootpath, opt.trainCollection, 'VideoSets', (opt.trainCollection + '.txt'))))) data_loaders = {x: data.pair_provider({'vis_feat_files': vis_feat_files[x], 'capfile': cap_file_paths[x], 'vis_frame_feat_dicts': vis_frame_feat_dicts[x], 'vis_ids': vis_ids, 'max_frame': config.max_frame, 'sample_type': config.frame_sample_type_train, 'vis_muti_feat_dicts': vis_muti_feat_dicts[x], 'frame_id_path_file': frame_id_path_file['train'], 'capfile_task2': cap_file_paths_task2[x], 'capfile_task3': cap_file_paths_task3[x], 'pin_memory': False, 'batch_size': opt.batch_size, 'num_workers': opt.workers, 'config': config, 'collection': x, 'shuffle': (x == 'train'), 'task3': config.task3}) for x in collections} vis_ids = list(map(str.strip, open(os.path.join(opt.rootpath, opt.valCollection, 'VideoSets', (opt.valCollection + '.txt'))))) vis_loader_val = data.vis_provider({'vis_feat_files': vis_feat_files['val'], 'vis_ids': vis_ids, 'pin_memory': False, 'vis_frame_feat_dicts': vis_frame_feat_dicts['val'], 'max_frame': config.max_frame, 'sample_type': config.frame_sample_type_test, 'frame_id_path_file': frame_id_path_file['val'], 'batch_size': int((opt.batch_size * 2)), 'config': config, 'num_workers': opt.workers}) capfile = os.path.join(opt.rootpath, opt.valCollection, 'TextData', val_set, (opt.valCollection + '.caption.txt')) txt_loader_val = data.txt_provider({'capfile': capfile, 'pin_memory': False, 'config': config, 'batch_size': int((opt.batch_size * 2)), 'num_workers': opt.workers, 'task3': config.task3}) best_perf = 0 no_impr_counter = 0 val_perf_hist_fout = open(os.path.join(model_path, 'val_perf_hist.txt'), 'w') save_checkpoint({'epoch': (0 + 1), 'model': model.state_dict(), 'best_perf': best_perf, 'config': config, 'opt': opt}, True, logdir=model_path, only_best=False, filename=('checkpoint_epoch_%s.pth.tar' % 0)) for epoch in range(opt.num_epochs): logger.info(json.dumps(vars(opt), indent=2)) print('Epoch[{0} / {1}] LR: {2}'.format(epoch, opt.num_epochs, model.learning_rate)) print(('-' * 10)) writer.add_scalar('train/learning_rate', model.learning_rate[0], epoch) if hasattr(model, 'change_raw_global_emb_weight'): model.change_raw_global_emb_weight() train(model, data_loaders['train'], epoch) if ('train2' in data_loaders): train(model, data_loaders['train2'], epoch) cur_perf2 = 0 (cur_perf, cur_perf2) = validate(model, txt_loader_val, vis_loader_val, epoch, measure=config.measure, metric=opt.metric, config=config, negative_val=config.task3) model.lr_step(val_value=cur_perf) print(' * Current perf: {}\n * Best perf: {}\n'.format(cur_perf, best_perf)) val_perf_hist_fout.write(('epoch_%d:\nText2Video(%s): %f\n' % (epoch, opt.metric, cur_perf))) val_perf_hist_fout.flush() is_best = (cur_perf > best_perf) best_perf = max(cur_perf, best_perf) config.t2v_w2v = None save_checkpoint({'epoch': (epoch + 1), 'model': model.state_dict(), 'best_perf': best_perf, 'config': config, 'opt': opt}, is_best, logdir=model_path, only_best=False, filename=('checkpoint_epoch_%s.pth.tar' % epoch)) if is_best: no_impr_counter = 0 model_dict = None elif (opt.save_mean_last == 1): if (model_dict is None): model_dict = model.state_dict() worker_state_dict = [model_dict] else: worker_state_dict.append(model.state_dict()) weight_keys = list(worker_state_dict[0].keys()) fed_state_dict = OrderedDict() for key in weight_keys: key_sum = 0 for i in range(len(worker_state_dict)): key_sum = (key_sum + worker_state_dict[i][key]) fed_state_dict[key] = (key_sum / len(worker_state_dict)) torch.save({'epoch': (epoch + 1), 'model': fed_state_dict, 'best_perf': best_perf, 'config': config, 'opt': opt}, os.path.join(model_path, 'mean_last10.pth.tar')) no_impr_counter += 1 if ((no_impr_counter > 10) or (epoch == (opt.num_epochs - 1))): save_checkpoint({'epoch': (epoch + 1), 'model': model.state_dict(), 'best_perf': best_perf, 'config': config, 'opt': opt}, is_best=False, logdir=model_path, only_best=True, filename=('checkpoint_epoch_%s.pth.tar' % epoch)) print('Early stopping happended or stopped.\n') print(json.dumps(vars(opt), indent=2)) break if ((__name__ == '__main__') and (epoch > 1)): break val_perf_hist_fout.close() message = 'best performance on validation:\n Text to video({}): {}'.format(opt.metric, best_perf) print(message) with open(os.path.join(model_path, 'val_perf.txt'), 'w') as fout: fout.write(message)
def get_negationset(capfile): negationset = set() with open(capfile, 'r') as reader: lines = reader.readlines() for line in lines: (cap_id, caption) = line.strip().split(' ', 1) negationset.add(cap_id) return negationset
def main_subset(opt): prepared_configs = prepare_config(opt) vis_feat_files = prepared_configs['vis_feat_files'] cap_file_paths = prepared_configs['cap_file_paths'] cap_file_paths_task2 = prepared_configs['cap_file_paths_task2'] opt = prepared_configs['opt'] config = prepared_configs['config'] collections = prepared_configs['collections'] model_path = prepared_configs['model_path'] model = prepared_configs['model'] device = prepared_configs['device'] task2_caption_suffix = prepared_configs['task2_caption_suffix'] params = {'vis_feat': vis_feat_files['train'], 'capfile': cap_file_paths['train'], 'capfile_task2': cap_file_paths_task2['train'], 'pin_memory': False, 'batch_size': opt.batch_size, 'sampler': None, 'num_workers': opt.workers, 'shuffle': True, 'task2_caption_suffix': task2_caption_suffix} data_loader_old = data.pair_provider(params) data_induce = np.arange(0, data_loader_old.dataset.length) data_loaders = {} train_val_split = int((0.985 * data_loader_old.dataset.length)) if ((__name__ != '__main__') and (torch.cuda.device_count() > 1)): params['sampler'] = 'NotNone' params['shuffle'] = False data_loaders['train'] = data.pair_provider_subset(params, data_induce[0:train_val_split]) data_loaders['val'] = data.pair_provider_subset(params, data_induce[train_val_split:]) best_perf = 0 no_impr_counter = 0 val_perf_hist_fout = open(os.path.join(model_path, 'val_perf_hist.txt'), 'w') for epoch in range(opt.num_epochs): logger.info(json.dumps(vars(opt), indent=2)) print('Epoch[{0} / {1}] LR: {2}'.format(epoch, opt.num_epochs, model.learning_rate)) print(('-' * 10)) writer.add_scalar('train/learning_rate', model.learning_rate[0], epoch) train(model, data_loaders['train'], epoch) cur_perf = validate(model, data_loaders['val'], epoch, measure=config.measure, metric=opt.metric, config=config) model.lr_step(val_value=cur_perf) print(' * Current perf: {}\n * Best perf: {}\n'.format(cur_perf, best_perf)) val_perf_hist_fout.write(('epoch_%d:\nText2Video(%s): %f\n' % (epoch, opt.metric, cur_perf))) val_perf_hist_fout.flush() is_best = (cur_perf > best_perf) best_perf = max(cur_perf, best_perf) save_checkpoint({'epoch': (epoch + 1), 'model': model.state_dict(), 'best_perf': best_perf, 'config': config, 'opt': opt}, is_best, logdir=model_path, only_best=True, filename=('checkpoint_epoch_%s.pth.tar' % epoch)) if is_best: no_impr_counter = 0 else: no_impr_counter += 1 if (no_impr_counter > 9): print('Early stopping happended.\n') print(json.dumps(vars(opt), indent=2)) break val_perf_hist_fout.close() message = 'best performance on validation:\n Text to video({}): {}'.format(opt.metric, best_perf) print(message) with open(os.path.join(model_path, 'val_perf.txt'), 'w') as fout: fout.write(message)
def train(model, train_loader, epoch): batch_time = util.AverageMeter() data_time = util.AverageMeter() model.train() progbar = Progbar(len(train_loader.dataset)) end = time.time() for (i, train_data) in enumerate(train_loader): if (__name__ == '__main__'): pass if (i > 5): break data_time.update((time.time() - end)) input_idxs = train_data['idxs'] loss_items = model(train_data, epoch) values = [('batch_time', batch_time.val)] for key in loss_items.keys(): if isinstance(loss_items[key], torch.Tensor): loss_items[key] = round(loss_items[key].item(), 4) values.append((key, loss_items[key])) progbar.add(len(input_idxs), values=values) batch_time.update((time.time() - end)) end = time.time() writer.add_scalar('train/Loss', sum(list(loss_items.values())), model.iters) for key in loss_items.keys(): writer.add_scalar(('train/' + key), loss_items[key], model.iters) print()
def validate(model, txt_loader, vis_loader, epoch, measure='cosine', metric='mir', negative_val=False, config=None): (txt2vis_sim, txt_ids, vis_ids) = model.predict(txt_loader, vis_loader, measure=config.measure) inds = np.argsort(txt2vis_sim, axis=1) label_matrix = np.zeros(inds.shape) if negative_val: negative_index = [] capfiles_negationset = get_negationset(config.capfiles_negationset) for index in range(inds.shape[0]): ind = inds[index][::(- 1)] gt_index = np.where((np.array(vis_ids)[ind] == txt_ids[index].split('#')[0]))[0] label_matrix[index][gt_index] = 1 if negative_val: if (txt_ids[index] in capfiles_negationset): negative_index.append(index) (r1, r5, r10, medr, meanr, mir, mAP) = evaluation.eval(label_matrix) write_metric(r1, r5, r10, medr, meanr, mir, mAP, epoch) mir2 = None if negative_val: (r1, r5, r10, medr, meanr, mir2, mAP) = evaluation.eval(label_matrix[negative_index]) print('negtive_set') write_metric(r1, r5, r10, medr, meanr, mir2, mAP, epoch, mode='task3') return (locals().get(metric, mir), locals().get('mir2', mir2))
def write_metric(r1, r5, r10, medr, meanr, mir, mAP, epoch, mode='task1'): sum_recall = ((r1 + r5) + r10) print(' * Text to video:') print(' * r_1_5_10: {}'.format([round(r1, 3), round(r5, 3), round(r10, 3)])) print(' * medr, meanr, mir: {}'.format([round(medr, 3), round(meanr, 3), round(mir, 3)])) print(' * mAP: {}'.format(round(mAP, 3))) print((' * ' + ('-' * 10))) writer.add_scalar((mode + 'val/r1'), r1, epoch) writer.add_scalar((mode + 'val/r5'), r5, epoch) writer.add_scalar((mode + 'val/r10'), r10, epoch) writer.add_scalar((mode + 'val/medr'), medr, epoch) writer.add_scalar((mode + 'val/meanr'), meanr, epoch) writer.add_scalar((mode + 'val/mir'), mir, epoch) writer.add_scalar((mode + 'val/mAP'), mAP, epoch)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar', only_best=False, logdir=''): '\n\n :param state:\n :param is_best: 比以前的好,就保存下来\n :param filename:\n :param only_best: 当结束训练时,only_best=True, 删除 checkpoint.pth.tar 文件,把 model_temp_best.pth.tar 文件 复制成 model_best.pth.tar\n :param logdir:\n :return:\n ' resfile = os.path.join(logdir, filename) if is_best: torch.save(state, resfile) shutil.copyfile(resfile, os.path.join(logdir, 'model_temp_best.pth.tar')) os.remove(resfile) if only_best: shutil.copyfile(os.path.join(logdir, 'model_temp_best.pth.tar'), os.path.join(logdir, 'model_best.pth.tar')) os.remove(os.path.join(logdir, 'model_temp_best.pth.tar'))
def parse_result(res): resp = {} lines = res.split('\n') for line in lines: elems = line.split() if (('infAP' == elems[0]) and ('all' in line)): return float(elems[(- 1)])
def xml_to_treceval(opt, input_file): overwrite = opt.overwrite res_file = (os.path.splitext(input_file)[0] + '.treceval') if os.path.exists(res_file): if overwrite: logger.info(('%s exists. Overwrite' % res_file)) else: logger.info(('%s exists. Use "--overwrite 1" if you want to overwrite' % res_file)) return res_file tree = ET.parse(input_file) root = tree.getroot() MAX_SCORE = 9999 TEAM = 'RUCMM' newlines = [] for topicResult in root.iter('videoAdhocSearchTopicResult'): qry_id = ('1' + topicResult.attrib['tNum']) itemlist = list(topicResult) for (rank, item) in enumerate(itemlist): assert ((rank + 1) == int(item.attrib['seqNum'])) shot_id = item.attrib['shotId'] score = (MAX_SCORE - rank) newlines.append(('%s 0 %s %d %d %s' % (qry_id, shot_id, (rank + 1), score, TEAM))) fw = open(res_file, 'w') fw.write(('\n'.join(newlines) + '\n')) fw.close() return res_file
def process(opt, input_xml_file): treceval_file = xml_to_treceval(opt, input_xml_file) res_file = (input_xml_file + '_perf.txt') gt_file = os.path.join(opt.rootpath, opt.collection, 'TextData', ('avs.qrels.%s' % opt.edition)) os.chdir(os.path.abspath(os.path.dirname(__file__))) cmd = ('perl sample_eval.pl -q %s %s' % (gt_file, treceval_file)) res = os.popen(cmd).read() with open(res_file, 'w') as fw: fw.write(res) resp = parse_result(res) print(('%s infAP: \t%.3f' % (opt.edition, resp)), end='\t') return resp
def main(argv=None): if (argv is None): argv = sys.argv[1:] from optparse import OptionParser parser = OptionParser(usage='usage: %prog [options] input_xml_file') parser.add_option('--rootpath', type=str, default=ROOT_PATH, help=('path to datasets. (default: %s)' % ROOT_PATH)) parser.add_option('--collection', type=str, default=COLLECTION, help='test collection') parser.add_option('--overwrite', default=0, type='int', help='overwrite existing file (default: 0)') parser.add_option('--edition', default=EDITION, type='string', help=('trecvid edition (default: %s)' % EDITION)) (options, args) = parser.parse_args(argv) if (len(args) < 1): parser.print_help() return 1 if ('~' in options.rootpath): options.rootpath = options.rootpath.replace('~', os.path.expanduser('~')) return process(options, args[0])
def read_topics(topics_file): lines = list(map(str.strip, open(topics_file).readlines())) qry_list = [] for line in lines: (tnum, query) = line.split(' ', 1) qry_list.append((tnum, query)) return qry_list
def wrap_topic_result(tNum, elapsedTime, topicResult): new_res = [('<videoAdhocSearchTopicResult tNum="%s" elapsedTime="%g">' % (tNum, elapsedTime))] for (i, shot_id) in enumerate(topicResult): new_res.append(('<item seqNum="%d" shotId="%s" />' % ((i + 1), shot_id))) new_res.append('</videoAdhocSearchTopicResult>') return new_res
def process(options, collection, input_txt_file): rootpath = options.rootpath overwrite = options.overwrite trtype = options.trtype pclass = options.pclass pid = options.pid priority = options.priority edition = options.edition desc = options.desc etime = options.etime topk = options.topk output_xml_file = (input_txt_file + '.xml') if os.path.exists(output_xml_file): if overwrite: logger.info(('%s exists. Overwrite' % output_xml_file)) else: logger.info(('%s exists. Use "--overwrite 1" if you want to overwrite' % output_xml_file)) return topics_file = os.path.join(rootpath, collection, 'TextData', ('%s.avs.txt' % edition)) shots_file = os.path.join(rootpath, collection, 'VideoSets', ('%s.txt' % collection)) topics = read_topics(topics_file) tnum_set = set([x[0] for x in topics]) shot_set = set(map(str.strip, open(shots_file).readlines())) logger.info('%s -> %d testing topics, %d shots', edition, len(tnum_set), len(shot_set)) data = list(map(str.strip, open(input_txt_file).readlines())) assert (len(data) == len(tnum_set)), 'number of topics does not match' xml_content = [] for line in data: elems = line.split() tNum = elems[0] del elems[0] if (len(elems) < (2 * topk)): topk = int((len(elems) / 2)) logger.debug('processing testing topic %s', tNum) prev_score = 100000000.0 topic_res = [] for i in range(0, (2 * topk), 2): shot_id = elems[i] score = float(elems[(i + 1)]) assert (shot_id in shot_set), ('invalid shot id: %s' % shot_id) try: assert (score < (prev_score + 1e-08)), 'shots have not been sorted' except: continue prev_score = score topic_res.append(shot_id) xml_content += wrap_topic_result(tNum, etime, topic_res) xml_content.append('') xml_file = [XML_HEAD] xml_file.append('') xml_file.append('<videoAdhocSearchResults>') xml_file.append(('<videoAdhocSearchRunResult trType="%s" class="%s" pid="%s" priority="%s" desc="%s">' % (trtype, pclass, pid, priority, desc))) xml_file += xml_content xml_file.append('') xml_file.append('</videoAdhocSearchRunResult>') xml_file.append('</videoAdhocSearchResults>') if (not os.path.exists(os.path.split(output_xml_file)[0])): os.makedirs(os.path.split(output_xml_file)[0]) open(output_xml_file, 'w').write('\n'.join(xml_file)) logger.info(('%s -> %s' % (input_txt_file, output_xml_file)))
def main(argv=None): if (argv is None): argv = sys.argv[1:] from optparse import OptionParser parser = OptionParser(usage='usage: %prog [options] collection input_txt_file') parser.add_option('--rootpath', type=str, default=ROOT_PATH, help=('path to datasets. (default: %s)' % ROOT_PATH)) parser.add_option('--overwrite', default=0, type='int', help='overwrite existing file (default: 0)') parser.add_option('--trtype', default=TRAIN_TYPE, type='string', help=('training type (default: %s)' % TRAIN_TYPE)) parser.add_option('--edition', default=EDITION, type='string', help=('trecvid edition (default: %s)' % EDITION)) parser.add_option('--pclass', default=PCLASS, type='string', help=('processing type (default: %s)' % PCLASS)) parser.add_option('--pid', default=PID, type='string', help=('participant ID (default: %s)' % PID)) parser.add_option('--desc', default=DESC, type='string', help=('description of this run (default: %s)' % DESC)) parser.add_option('--etime', default=ETIME, type='float', help=('elapsed time in seconds (default: %g)' % ETIME)) parser.add_option('--topk', default=TOPK, type='int', help=('number of returned shots per query (default: %d)' % TOPK)) parser.add_option('--priority', default=PRIORITY, type='int', help=('priority (default: %d)' % PRIORITY)) (options, args) = parser.parse_args(argv) if (len(args) < 1): parser.print_help() return 1 if ('~' in options.rootpath): options.rootpath = options.rootpath.replace('~', os.path.expanduser('~')) return process(options, args[0], args[1])
def checkToSkip(filename, overwrite): if os.path.exists(filename): (print(('%s exists.' % filename)),) if overwrite: print('overwrite') return 0 else: print('skip') return 1 return 0
def process(feat_dim, inputTextFiles, resultdir, overwrite): res_binary_file = os.path.join(resultdir, 'feature.bin') res_id_file = os.path.join(resultdir, 'id.txt') if checkToSkip(res_binary_file, overwrite): return 0 if (os.path.isdir(resultdir) is False): os.makedirs(resultdir) fw = open(res_binary_file, 'wb') processed = set() imset = [] count_line = 0 failed = 0 for filename in inputTextFiles: print(('>>> Processing %s' % filename)) for line in open(filename): count_line += 1 elems = line.strip().split() if (not elems): continue name = elems[0] if (name in processed): continue processed.add(name) del elems[0] vec = np.array(list(map(float, elems)), dtype=np.float32) okay = True for x in vec: if math.isnan(x): okay = False break if (not okay): failed += 1 continue if (feat_dim == 0): feat_dim = len(vec) else: assert (len(vec) == feat_dim), ('dimensionality mismatch: required %d, input %d, id=%s, inputfile=%s' % (feat_dim, len(vec), name, filename)) vec.tofile(fw) imset.append(name) fw.close() fw = open(res_id_file, 'w') fw.write(' '.join(imset)) fw.close() fw = open(os.path.join(resultdir, 'shape.txt'), 'w') fw.write(('%d %d' % (len(imset), feat_dim))) fw.close() print(('%d lines parsed, %d ids, %d failed -> %d unique ids' % (count_line, len(processed), failed, len(imset))))
def main(argv=None): if (argv is None): argv = sys.argv[1:] parser = OptionParser(usage='usage: %prog [options] nDims inputTextFile isFileList resultDir') parser.add_option('--overwrite', default=0, type='int', help='overwrite existing file (default=0)') (options, args) = parser.parse_args(argv) if (len(args) < 4): parser.print_help() return 1 fea_dim = int(args[0]) inputTextFile = args[1] if (int(args[2]) == 1): inputTextFiles = [x.strip() for x in open(inputTextFile).readlines() if (x.strip() and (not x.strip().startswith('#')))] else: inputTextFiles = [inputTextFile] return process(fea_dim, inputTextFiles, args[3], options.overwrite)
def get_lang(data_path): return 'en'
class Txt2Vec(object): '\n norm: 0 no norm, 1 l_1 norm, 2 l_2 norm\n ' def __init__(self, data_path, norm=0, clean=True): logger.info((self.__class__.__name__ + ' initializing ...')) self.data_path = data_path self.norm = norm self.lang = get_lang(data_path) self.clean = clean assert (norm in [0, 1, 2]), ('invalid norm %s' % norm) def _preprocess(self, query): words = TextTool.tokenize(query, clean=self.clean, language=self.lang) return words def _do_norm(self, vec): assert ((1 == self.norm) or (2 == self.norm)) norm = np.linalg.norm(vec, self.norm) return (vec / (norm + 1e-10)) def _encoding(self, words): raise Exception('encoding not implemented yet!') def encoding(self, query): words = self._preprocess(query) vec = self._encoding(words) if (self.norm > 0): return self.do_norm(vec) return vec def encoding_word_and_confidence(self, query): raise Exception('encoding_word_and_confidence not implemented yet!')
class BowVec(Txt2Vec): def __init__(self, data_path, norm=0, clean=True): super(BowVec, self).__init__(data_path, norm, clean) self.vocab = pickle.load(open(data_path, 'rb')) self.ndims = len(self.vocab) logger.info(('vob size: %d, vec dim: %d' % (len(self.vocab), self.ndims))) def _encoding(self, words): vec = np.zeros(self.ndims) for word in words: idx = self.vocab.find(word) if (idx >= 0): vec[idx] += 1 return vec def __len__(self): return self.ndims def encoding_word_and_confidence(self, query): '\n :param query: str\n :return:\n ' word_cons = query.strip(' .').lower().split() word_dict = {} for each in word_cons: (word, confidence) = each.split('#') word_dict[word] = confidence vec = np.zeros(self.ndims) for word in list(word_dict.keys()): idx = self.vocab.find(word) if (idx >= 0): vec[idx] = word_dict[word] if (self.norm > 0): return self.do_norm(vec) return vec
class W2Vec(Txt2Vec): def __init__(self, data_path, norm=0, clean=True): super(W2Vec, self).__init__(data_path, norm, clean) self.w2v = BigFile(data_path) (vocab_size, self.ndims) = self.w2v.shape() logger.info(('vob size: %d, vec dim: %d' % (vocab_size, self.ndims))) def _encoding(self, words): (renamed, vectors) = self.w2v.read(words) if (len(vectors) > 0): vec = np.array(vectors).mean(axis=0) else: vec = np.zeros(self.ndims) return vec def raw_encoding(self, query): words = self._preprocess(query) (renamed, vectors) = self.w2v.read(words) if (len(vectors) > 0): vec = np.array(vectors) else: vec = np.zeros((len(words), self.ndims)) return vec
class IndexVec(Txt2Vec): def __init__(self, data_path, clean=True): super(IndexVec, self).__init__(data_path, 0, clean) self.vocab = pickle.load(open(data_path, 'rb')) self.ndims = len(self.vocab) logger.info(('vob size: %s' % len(self.vocab))) def _preprocess(self, query): words = TextTool.tokenize(query, clean=self.clean, language=self.lang, remove_stopword=False) words = ((['<start>'] + words) + ['<end>']) return words def _encoding(self, words): return np.array([self.vocab(word) for word in words])
class BowVecNSW(BowVec): def __init__(self, data_path, norm=0, clean=True): super(BowVecNSW, self).__init__(data_path, norm, clean) if ('_nsw' not in data_path): logger.error('WARNING: loaded a vocabulary that contains stopwords') def _preprocess(self, query): words = TextTool.tokenize(query, clean=self.clean, language=self.lang, remove_stopword=True) return words
class W2VecNSW(W2Vec): def _preprocess(self, query): words = TextTool.tokenize(query, clean=self.clean, language=self.lang, remove_stopword=True) return words
def get_txt2vec(name): assert (name in NAME_TO_T2V) return NAME_TO_T2V[name]
def checkToSkip(filename, overwrite): '\n 如果文件存在,是否进行覆盖\n :param filename:\n :param overwrite:\n :return:\n ' if os.path.exists(filename): if overwrite: logging.info('%s exists. overwrite', filename) return 0 else: logging.info('%s exists. quit', filename) return 1 return 0
def makedirs(path): if (not os.path.exists(path)): os.makedirs(path)
def makedirsforfile(filename): makedirs(os.path.dirname(filename))
def timer(fn): @wraps(fn) def compute_time(*args, **kwargs): start_time = time.time() ret = fn(*args, **kwargs) elapsed_time = (time.time() - start_time) print((fn.__name__ + (' execution time: %.3f seconds\n' % elapsed_time))) return ret return compute_time
class AverageMeter(object): 'Computes and stores the average and current value' def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += (val * n) self.count += n self.avg = (self.sum / (0.0001 + self.count)) def __str__(self): 'String representation for logging\n ' if (self.count == 0): return str(self.val) return ('%.4f (%.4f)' % (self.val, self.avg))
class LogCollector(object): 'A collection of logging objects that can change from train to val' def __init__(self): self.meters = OrderedDict() def update(self, k, v, n=1): if (k not in self.meters): self.meters[k] = AverageMeter() self.meters[k].update(v, n) def __str__(self): 'Concatenate the meters in one log line\n ' s = '' for (i, (k, v)) in enumerate(self.meters.items()): if (i > 0): s += ' ' s += ((k + ' ') + str(v)) return s def tb_log(self, tb_logger, prefix='', step=None): 'Log using tensorboard\n ' for (k, v) in self.meters.items(): tb_logger.add_scalar((prefix + k), v.val, step=step)
def train_nn(neurons=(20,), **kwargs): is_categorical = dataset.get('is_categorical', None) model = MLPClassifier(hidden_layer_sizes=neurons, **kwargs) if (is_categorical is not None): model = Pipeline([('one_hot', OneHotEncoder(categorical_features=is_categorical)), ('mlp', model)]) model.fit(train_x, train_y) train_score = model.score(train_x, train_y) test_score = model.score(test_x, test_y) print('Training score:', train_score) print('Test score:', test_score) return model
def train_surrogate(model, sampling_rate=2.0, **kwargs): surrogate = rule_surrogate(model.predict, train_x, sampling_rate=sampling_rate, is_continuous=is_continuous, is_categorical=is_categorical, is_integer=is_integer, rlargs={'feature_names': feature_names, 'verbose': 2}, **kwargs) train_fidelity = surrogate.score(train_x) test_fidelity = surrogate.score(test_x) print('Training fidelity:', train_fidelity) print('Test fidelity:', test_fidelity) return surrogate
def label2binary(y): return OneHotEncoder().fit_transform(y.reshape([(- 1), 1])).toarray()
def auc_score(y_true, y_pred, average=None): return roc_auc_score(label2binary(y_true), y_pred, average=average)
def accuracy(y_true, y_pred, weights=None): score = (y_true == y_pred) return np.average(score, weights=weights)
def mse(y_true, y_pred, weights=None): return np.average(((y_true - y_pred) ** 2), weights=weights)
def evaluate_classifier(classifier, x, y, verbose=True): acc = accuracy(y, classifier.predict(x)) y_proba = classifier.predict_proba(x) loss = log_loss(y, y_proba) auc = auc_score(y, y_proba, average='macro') if verbose: print('Accuracy: {:.4f}; loss: {:.4f}; auc: {:.4f}'.format(acc, loss, auc)) return (acc, loss, auc)
class NumpyEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.ndarray): return obj.tolist() return json.JSONEncoder.default(self, obj)
class HashableList(list): def __hash__(self): return hash(json.jsonify(self))
def model_name2file(model_name): return Config.get_path(Config.model_dir(), (model_name + FILE_EXTENSION))
class ModelCache(): 'A wrapper that ' def __init__(self): self.cache = {} self.model2dataset = {} def init(self, config_path=None): if (config_path is None): config_path = Config.config_dir() config = json2dict(config_path) if ('models' in config): for model_config in config['models']: self.model2dataset[model_config['model']] = model_config['dataset'] return self def load_model(self, model_name): filename = model_name2file(model_name) print('Loading model {} from {}'.format(model_name, filename)) start = time.time() model = load_model(filename) if isinstance(model, ModelInterface): self.cache[model_name] = model print('Model {} loaded. Total time {:.4f}s'.format(model_name, (time.time() - start))) return model else: raise RuntimeError('Mal-format! Cannot load model file {}!'.format(filename)) def get_model(self, model_name): if (model_name in self.cache): return self.cache[model_name] return self.load_model(model_name) def get_model_data(self, model_name: str): if (model_name in self.model2dataset): return self.model2dataset[model_name] print('Try find model data name by parsing the model_name') specs = model_name.split('-') if (specs[1] == 'surrogate'): return specs[2] else: return specs[0]
def get_model(model_name): return _cache.get_model(model_name)
def available_models(): return list(_cache.model2dataset.keys())
def get_model_data(model_name): return _cache.get_model_data(model_name)
def register_model_dataset(model_name, dataset): _cache.model2dataset[model_name] = dataset
def parse_filter(query_json): if (query_json is None): return query_json return HashableList(query_json)
@app.route('/static/js/<path:path>') def send_js(path): return send_from_directory(safe_join(app.config['STATIC_FOLDER'], 'js'), path)
@app.route('/static/css/<path:path>') def send_css(path): return send_from_directory(safe_join(app.config['STATIC_FOLDER'], 'css'), path)
@app.route('/static/fonts/<path:path>') def send_fonts(path): return send_from_directory(safe_join(app.config['STATIC_FOLDER'], 'fonts'), path)
@app.route('/') def index(): return send_from_directory(app.config['FRONT_END_ROOT'], 'index.html')
@app.route('/<string:model>', methods=['GET']) def send_index(model): if (model == 'service-worker.js'): return send_from_directory(app.config['FRONT_END_ROOT'], 'service-worker.js') if (model == 'favicon.ico'): return send_from_directory(app.config['FRONT_END_ROOT'], 'favicon.ico') return send_from_directory(app.config['FRONT_END_ROOT'], 'index.html')
@app.route('/api/model', methods=['GET']) def models(): return jsonify(available_models())
@app.route('/api/model/<string:model_name>', methods=['GET']) def model_info(model_name): model_json = model2json(model_name) if (model_json is None): return abort(404) return model_json
@app.route('/api/model_data/<string:model_name>', methods=['GET', 'POST']) def model_data(model_name): if (model_name is None): return abort(404) data_type = request.args.get('data', 'train') bins = int(request.args.get('bins', '20')) if (request.method == 'GET'): data_json = model_data2json(model_name, data_type, bins) else: filters = parse_filter(request.get_json()) data_json = model_data2json(model_name, data_type, bins, filters=filters) if (data_json is None): abort(404) else: return data_json
@app.route('/api/metric/<string:model_name>', methods=['GET']) def metric(model_name): data = request.args.get('data', 'test') ret_json = model_metric(model_name, data) if (ret_json is None): abort(404) else: return ret_json
@app.route('/api/support/<string:model_name>', methods=['GET', 'POST']) def support(model_name): data_type = request.args.get('data', 'train') support_type = request.args.get('support', 'simple') if (request.method == 'GET'): ret_json = get_support(model_name, data_type, support_type) else: filters = parse_filter(request.get_json()) ret_json = get_support(model_name, data_type, support_type, filters=filters) if (ret_json is None): abort(404) else: return ret_json
@app.route('/api/stream/<string:model_name>', methods=['GET', 'POST']) def stream(model_name): data_type = request.args.get('data', 'train') conditional = (request.args.get('conditional', 'true') == 'true') bins = int(request.args.get('bins', '20')) if (request.method == 'GET'): ret_json = get_stream(model_name, data_type, conditional=conditional, bins=bins) else: filters = parse_filter(request.get_json()) ret_json = get_stream(model_name, data_type, conditional=conditional, bins=bins, filters=filters) if (ret_json is None): abort(404) else: return ret_json
@app.route('/api/query/<string:model_name>', methods=['POST']) def query(model_name): if (model_name is None): abort(404) data_type = request.args.get('data', 'train') start = int(request.args.get('start', '0')) end = int(request.args.get('end', '100')) query_json = request.get_json() print(query_json) filters = (None if (query_json is None) else HashableList(query_json)) try: data = get_model_x_y(model_name, data_type, filters) except: raise if (data is None): abort(404) else: (x, y) = data print('get', len(y)) return jsonify({'data': x[start:end], 'target': y[start:end], 'end': end, 'totalLength': len(y)})
@app.route('/api/predict', methods=['POST']) def predict(): name = request.args.get('name') data = request.args.get('data') if (name is None): abort(404) else: return get_model(name).predict(data)
def get_path(path, filename=None, absolute=False): '\n A helper function that get the real/abs path of a file on disk, with the project dir as the base dir.\n Note: there is no checking on the illegality of the args!\n :param path: a relative path to ROOT_DIR, optional file_name to use\n :param filename: an optional file name under the path\n :param absolute: return the absolute path\n :return: return the path relative to the project root dir, default to return relative path to the called place.\n ' return Config.get_path(path, filename, absolute)
def write2file(s_io, filename=None, mode='w', encoding=None): '\n This is a wrapper function for writing files to disks,\n it will automatically check for dir existence and create dir or file if needed\n :param s_io: a io.StringIO instance or a str\n :param filename: the path of the file to write to\n :param mode: the writing mode to use\n :return: None\n ' before_save(filename) with open(filename, mode, encoding=encoding) as f: if isinstance(s_io, io.StringIO): f.write(s_io.getvalue()) else: f.write(s_io)
def obj2pkl(obj, filename=None, *args, **kwargs): if (filename is not None): before_save(filename) with open(filename, 'wb') as f: return pickle.dump(obj, f, *args, **kwargs) return pickle.dumps(obj, **kwargs)
def pkl2obj(filename=None): assert_file_exists(filename) with open(filename, 'rb') as f: return pickle.load(f)
def dict2json(obj, filename=None, *args, **kwargs): if (filename is not None): before_save(filename) with open(filename, 'w') as f: return json.dump(obj, f, *args, **kwargs) return json.dumps(obj, **kwargs)
def json2dict(filename, *args, **kwargs): assert_file_exists(filename) with open(filename, 'r') as f: return json.load(f, *args, **kwargs)
def df2csv(df, filename, **kwargs): if (not isinstance(df, pd.DataFrame)): df = pd.DataFrame(df) return df.to_csv(filename, index=False, **kwargs)
def csv2df(filename): assert_file_exists(filename) return pd.read_csv(filename)
def array2csv(array, filename, **kwargs): df = pd.DataFrame(array) return df.to_csv(filename, index=False, **kwargs)
def csv2array(filename): assert_file_exists(filename) return pd.read_csv(filename).as_matrix()
def array2npy(array: np.ndarray, filename, *args, **kwargs): return np.save(filename, array, *args, **kwargs)
def npy2array(filename, *args, **kwargs): assert_file_exists(filename) return np.load(filename, *args, **kwargs)
def lists2csv(list_of_list, file_path, delimiter=',', encoding=None): with io.StringIO() as s_io: writer = csv.writer(s_io, delimiter=delimiter) for ls in list_of_list: writer.writerow([str(i) for i in ls]) write2file(s_io, file_path, 'w', encoding=encoding)
def csv2lists(file_path, delimiter=',', mode='r', encoding=None, skip=0): assert_file_exists(file_path) lists = [] with open(file_path, mode, newline='', encoding=encoding) as f: csv_reader = csv.reader(f, delimiter=delimiter) for i in range(skip): next(csv_reader) for row in csv_reader: lists.append(row) return lists
def text2list(file_path, delimiter='|', mode='r'): assert_file_exists(file_path) with open(file_path, mode) as f: s = f.read() return s.split(delimiter)
def save2text(a_list, file_path, delimiter='|'): s = delimiter.join([str(e) for e in a_list]) write2file(s, file_path, 'w')
def path_exists(file_or_dir): return os.path.exists(file_or_dir)