code stringlengths 17 6.64M |
|---|
def tokenExtraction(window_size_list, data, mode):
'\n given data, extract the corresponing feature\n :param window_size_list: [list] define the window size of the n-grams\n :param data: [list] input data needs to be in a form of (tweet, tagging, ner) tuple\n :param mode: define how to extract features from the tweet\n :return: [list] extracted n-gram features\n '
ngram_all = []
for line in data:
curr_ngram = []
line2 = line.copy()
line = line['text_TARGET'].split(' ')
try:
target_index = [idx for (idx, i) in enumerate(line) if (i == '<TARGET>')][0]
except:
print(line)
print("[ERROR] didn't find <TARGET>")
raise
for window_size in window_size_list:
start_index = max(0, (target_index - window_size))
end_index = min((target_index + window_size), len(line))
if (mode == 'TARGET_two_sides'):
extracted_token = ' '.join(line[start_index:end_index])
curr_ngram.append(extracted_token)
if (mode == 'TARGET_one_side'):
if (line[0] != '<TARGET>'):
extracted_token = ' '.join(line[start_index:(target_index + 1)])
curr_ngram.append(extracted_token)
if (line[(- 1)] != '<TARGET>'):
extracted_token = ' '.join(line[target_index:end_index])
curr_ngram.append(extracted_token)
if (mode == 'all'):
for idx in range(((len(line) - window_size) + 1)):
extracted_token = ' '.join(line[idx:(idx + window_size)])
curr_ngram.append(extracted_token)
if curr_ngram:
ngram_all.append(curr_ngram)
else:
ngram_all.append('<UNK>')
return ngram_all
|
def convertFeature2Idx(actual_features, train_feature_dict):
'\n convert actual features into idx\n :param actual_features: real features extracted from tweets\n :param train_feature_dict: train_ngram_dict\n :return:\n '
features_idx = []
for line in actual_features:
curr_feature = []
for token in line:
try:
curr_feature.append(train_feature_dict[token])
except:
curr_feature.append(train_feature_dict['<UNK>'])
features_idx.append(curr_feature)
return features_idx
|
def buildTrainDict(train_ngram_all, verbose=False, set_threshold=False, threshold=1):
'\n build up train ngram dictionary\n :param train_ngram_all: all extracted ngram features\n :param verbose:\n :param set_threshold: if we want to move ngrams with low frequency\n :param threshold: define low frequency\n :return: [dict] train_ngram_dict\n '
train_ngram_all_flatten = [j for i in train_ngram_all for j in i]
train_ngram_counter = Counter(train_ngram_all_flatten)
train_ngram_counter = [(ngram, train_ngram_counter[ngram]) for ngram in train_ngram_counter.keys()]
train_ngram_counter = sorted(train_ngram_counter, key=(lambda x: x[1]), reverse=True)
if verbose:
print('[I] total ngram', len(train_ngram_all_flatten), 'unique ngram', len(set(train_ngram_all_flatten)))
print('[I] the most frequent tokens: ', train_ngram_counter[0:20])
if set_threshold:
print((('[W] threshold ' + str(threshold)) + ' is used for filtering the ngrams'))
find_threshold_index = min([idx for (idx, i) in enumerate(train_ngram_counter) if (i[1] == threshold)])
train_ngram_counter = train_ngram_counter[0:find_threshold_index]
train_ngram_dict = {}
for (idx, i) in enumerate(train_ngram_counter):
train_ngram_dict[i[0]] = idx
train_ngram_dict['<UNK>'] = (idx + 1)
return (train_ngram_counter, train_ngram_dict)
|
def trainLRModel(train_all, train_label, window_size_list, ngram_extract_mode, flag, save_model=False):
'\n given cyber threat data with severe / non-severe label, train a LR classifier\n :param train_all: training data\n :param train_label: training label\n :param window_size_list: n-gram window size\n :param ngram_extract_mode:\n :param flag:\n :param save_model:\n :return:\n '
train_ngram_all = tokenExtraction(window_size_list, train_all, mode=ngram_extract_mode)
(train_ngram_counter, train_ngram_dict) = buildTrainDict(train_ngram_all, verbose=False, set_threshold=True, threshold=1)
train_features_idx = convertFeature2Idx(train_ngram_all, train_ngram_dict)
train_features_no_dup = []
for line in train_features_idx:
train_features_no_dup.append(list(set(line)))
train_idx_sparse = convertToSparseMatrix(train_features_no_dup, train_ngram_dict)
lr = LogisticRegression(solver='lbfgs')
lr.fit(train_idx_sparse['sparse_matrix'], train_label)
print('[I] logistic regression training completed.')
print(('[I] training set dimension: ' + str(np.shape(train_idx_sparse['sparse_matrix']))))
if save_model:
with open((('./trained_model/' + flag) + '_lr_model.pkl'), 'wb') as f:
pickle.dump(lr, f)
with open((('./trained_model/' + flag) + '_train_ngram_counter.json'), 'w') as f:
json.dump(train_ngram_counter, f)
with open((('./trained_model/' + flag) + '_train_ngram_dict.json'), 'w') as f:
json.dump(train_ngram_dict, f)
print('[I] all model files have been saved.')
return (lr, train_ngram_dict)
|
def evalLRModel(window_size_list, val_all, train_ngram_dict, ngram_extract_mode, model):
'\n cyber threat existence classifier\n :param window_size_list: define feature extraction window size\n :param val_all: data to be tested\n :param ngram_extract_mode: how the features are extracted\n :return:\n '
val_ngram_all = tokenExtraction(window_size_list, val_all, mode=ngram_extract_mode)
val_features_idx = convertFeature2Idx(val_ngram_all, train_ngram_dict)
val_features_no_dup = []
for line in val_features_idx:
val_features_no_dup.append(list(set(line)))
val_idx_sparse = convertToSparseMatrix(val_features_no_dup, train_ngram_dict)
val_prob = model.predict_proba(val_idx_sparse['sparse_matrix'])
return val_prob
|
def readTXTFile(path, verbose=False):
'\n TODO: It is not the best way of reading txt files\n\n :param path:\n :param verbose:\n :return:\n '
with open(path, 'r') as f:
data = f.readlines()
if verbose:
print('[I] file read complete with length', len(data))
return data
|
def readJSONFile(path, verbose=False):
with open(path, 'r') as f:
data = json.load(f)
if verbose:
print('[I] file read complete')
return data
|
def writeJSONFile(data, path, verbose=False):
with open(path, 'w') as f:
json.dump(data, f)
if verbose:
print(('[I] file written complete: ' + path))
|
def readTSVFile(path, verbose=False):
with open(path, 'r') as f:
data = [line.strip().split('\t') for line in f]
if verbose:
print('[I] file read complete with length', len(data))
return data
|
def readJSONLine(path, verbose=False):
input = readTXTFile(path)
data = []
for each_line in input:
each_line = each_line.strip()
each_line = json.loads(each_line)
data.append(each_line)
if verbose:
print('[I] file read complete')
return data
|
def writeJSONLine(path, data, verbose=False):
with open(path, 'w') as f:
for i in data:
json.dump(i, f)
f.write('\n')
if verbose:
print(('[I] file written complete: ' + path))
|
def taggingSeperate(line):
"\n split tagging results into [token], [tags] format\n :param data: results from tagging tools\n :return:\n\n EXAMPLE\n\n Input: james/B-person ball/I-person |/O citigroup/O incorporated/O |/O email/O vice/O president/O of/O web/O application/O vulnerability/O analysis/O |/O @citigrou/O .../O https://t.co/gs20umuhr3/O\n Output: ['james', 'ball', '|', 'citigroup', 'incorporated', '|', 'email', 'vice', 'president', 'of', 'web', 'application', 'vulnerability', 'analysis', '|', '@citigrou', '...', 'https:t.cogs20umuhr3'], ['B-person', 'I-person', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O']\n\n "
tweet = []
tags = []
tweetData = line.strip().split(' ')
for token in tweetData:
splitData = token.split('/')
datatweet = ''
for i in range(0, (len(splitData) - 1)):
datatweet += (splitData[i] + '')
tweet.append(datatweet)
tags.append(splitData[(- 1)])
return (tweet, tags)
|
def getEntitySegClass(tweet, annot, lower=False, getIndices=True):
"\n get segments containing ENTITYs\n :param tweet: a specific tweet (NEED TO BE SPLITTED)\n :param annot: corresponding tags (NEED TO BE SPLITTED)\n :param lower:\n :param getIndices:\n :return: [segs]\n\n ATT: input should be a single tweet\n\n EXAMPLE\n\n Input: ['james', 'ball', '|', 'citigroup', 'incorporated', '|', 'email', 'vice', 'president', 'of', 'web', 'application', 'vulnerability', 'analysis', '|', '@citigrou', '...', 'https:t.cogs20umuhr3'], ['B-person', 'I-person', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O']\n Output: [('james ball', ((0, 2), 'B-person'))]\n\n "
start = None
result = []
for i in range(len(tweet)):
if ('B-' in annot[i]):
if (start != None):
if getIndices:
if (start != len(tweet)):
result.append((' '.join(tweet[start:i]), (start, i, annot[start])))
else:
result.append((' '.join(tweet[start:i]), (start, i, annot[start])))
else:
result.append(' '.join(tweet[start:i]))
start = i
elif ((annot[i] == 'O') and (start != None)):
if getIndices:
result.append((' '.join(tweet[start:i]), ((start, i), annot[start])))
else:
result.append(' '.join(tweet[start:i]))
start = None
if (start != None):
if getIndices:
result.append((' '.join(tweet[start:(i + 1)]), (start, (i + 1), annot[start])))
else:
result.append(' '.join(tweet[start:(i + 1)]))
if lower:
if getIndices:
result = [(x[0].lower(), x[1]) for x in result]
else:
result = [x.lower() for x in result]
return result
|
def replaceEntityTarget(ent_tuple, tweet, tag):
"\n replace ENTITY with TARGET\n :param ent: target words needed to be replaced\n :param tweet: tweet tokens (NEED TO BE SPLITTED)\n :param tag: corresponding tags (NEED TO BE SPLITTED)\n :return: tweet (target words marked as TARGET), tag (target words marked as MOD)\n\n ATT: input should be a single tweet, and in ent_tuple, the location for entity should be identified\n\n EXAMPLE\n\n Input: ent_tuple = ('james ball', (0, 2))\n Output: (['<TARGET>', '|', 'citigroup', 'incorporated', '|', 'email', 'vice', 'president', 'of', 'web', 'application', 'vulnerability', 'analysis', '|', '@citigrou', '...', 'https:t.cogs20umuhr3'], ['MOD', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O'])\n\n "
def modTweetTargetEnt(tweet, ent, indices):
start = indices[0]
end = indices[1]
assert (ent == ' '.join(tweet[start:end]))
del tweet[start:end]
tweet.insert(start, '<TARGET>')
return tweet
def modTweetTarTags(tags, indices):
start = indices[0]
end = indices[1]
del tags[start:end]
tags.insert(start, 'MOD')
return tags
ent = ent_tuple[0]
loc = ent_tuple[1]
tweet = modTweetTargetEnt(tweet, ent, loc)
tag = modTweetTarTags(tag, loc)
return (tweet, tag)
|
class CFGTrainer(object):
def __init__(self, node_init_dims, data_dir, device, log_file, best_model_file, args):
super(CFGTrainer, self).__init__()
self.max_epoch = args.epochs
self.batch_size = args.batch_size
self.lr = args.lr
self.device = device
self.log_file = log_file
self.best_model_path = best_model_file
self.model = MultiLevelGraphMatchNetwork(node_init_dims=node_init_dims, arguments=args, device=device).to(device)
write_log_file(self.log_file, str(self.model))
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)
cfg = CFGDataset(data_dir=data_dir, batch_size=self.batch_size)
self.graph_train = cfg.graph_train
self.classes_train = cfg.classes_train
self.epoch_data_valid = cfg.valid_epoch
self.epoch_data_test = cfg.test_epoch
init_val_auc = self.eval_auc_epoch(model=self.model, eval_epoch_data=self.epoch_data_valid)
write_log_file(self.log_file, 'Initial Validation AUC = {0} @ {1}'.format(init_val_auc, datetime.now()))
def fit(self):
best_val_auc = None
for i in range(1, (self.max_epoch + 1)):
loss_avg = self.train_one_epoch(model=self.model, optimizer=self.optimizer, graphs=self.graph_train, classes=self.classes_train, batch_size=self.batch_size, device=self.device, load_data=None)
write_log_file(self.log_file, 'EPOCH {0}/{1}:\tMSE loss = {2} @ {3}'.format(i, self.max_epoch, loss_avg, datetime.now()))
valid_auc = self.eval_auc_epoch(model=self.model, eval_epoch_data=self.epoch_data_valid)
write_log_file(self.log_file, 'Validation AUC = {0} @ {1}'.format(valid_auc, datetime.now()))
if ((best_val_auc is None) or (best_val_auc < valid_auc)):
write_log_file(self.log_file, 'Validation AUC increased ({} ---> {}), and saving the model ... '.format(best_val_auc, valid_auc))
best_val_auc = valid_auc
torch.save(self.model.state_dict(), self.best_model_path)
write_log_file(self.log_file, 'Best Validation auc = {} '.format(best_val_auc))
return best_val_auc
def testing(self):
self.model.load_state_dict(torch.load(self.best_model_path))
self.model.eval()
double_val_auc = self.eval_auc_epoch(model=self.model, eval_epoch_data=self.epoch_data_valid)
final_test_auc = self.eval_auc_epoch(model=self.model, eval_epoch_data=self.epoch_data_test)
write_log_file(self.log_file, '\nDouble check for the saved best checkpoint model for validation {} '.format(double_val_auc))
write_log_file(self.log_file, 'Finally, testing auc = {} @ {}'.format(final_test_auc, datetime.now()))
return final_test_auc
@staticmethod
def train_one_epoch(model, optimizer, graphs, classes, batch_size, device, load_data=None):
model.train()
if (load_data is None):
epoch_data = generate_epoch_pair(graphs, classes, batch_size)
else:
epoch_data = load_data
perm = np.random.permutation(len(epoch_data))
cum_loss = 0.0
num = 0
for index in perm:
cur_data = epoch_data[index]
(x1, x2, adj1, adj2, y) = cur_data
batch_output = model(batch_x_p=x1, batch_x_h=x2, batch_adj_p=adj1, batch_adj_h=adj2)
y = torch.FloatTensor(y).to(device)
mse_loss = torch.nn.functional.mse_loss(batch_output, y)
optimizer.zero_grad()
mse_loss.backward()
optimizer.step()
cum_loss += mse_loss
if ((num % int((len(perm) / 10))) == 0):
print('\tTraining: {}/{}: index = {} loss = {}'.format(num, len(epoch_data), index, mse_loss))
num = (num + 1)
return (cum_loss / len(perm))
@staticmethod
def eval_auc_epoch(model, eval_epoch_data):
model.eval()
with torch.no_grad():
tot_diff = []
tot_truth = []
for cur_data in eval_epoch_data:
(x1, x2, adj1, adj2, y) = cur_data
batch_output = model(batch_x_p=x1, batch_x_h=x2, batch_adj_p=adj1, batch_adj_h=adj2)
tot_diff += list(batch_output.data.cpu().numpy())
tot_truth += list((y > 0))
diff = (np.array(tot_diff) * (- 1))
truth = np.array(tot_truth)
(fpr, tpr, _) = roc_curve(truth, ((1 - diff) / 2))
model_auc = auc(fpr, tpr)
return model_auc
|
class GEDTrainer(object):
def __init__(self, data_dir, device, best_model_path, args, log_path):
super(GEDTrainer, self).__init__()
self.max_iterations = args.iterations
self.iter_val_start = args.iter_val_start
self.iter_val_every = args.iter_val_every
self.batch_size = args.batch_size
self.lr = args.lr
self.device = device
self.validation_data = None
self.best_model_path = best_model_path
self.log_file = log_path
self.dataset = GEDDataset(ged_main_dir=data_dir, args=args)
self.flag_inclusive = args.inclusive
write_log_file(self.log_file, str(args))
self.model = MultiLevelGraphMatchNetwork(node_init_dims=self.dataset.input_dim, arguments=args, device=self.device).to(self.device)
write_log_file(self.log_file, str(self.model))
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=args.lr)
print('\n\n', self.model.state_dict().keys())
def _need_val(self, iteration):
return ((iteration >= self.iter_val_start) and ((iteration % self.iter_val_every) == 0))
def batch_pairs_predication(self, batch_feature_1, batch_adjacent_1, batch_mask_1, batch_feature_2, batch_adjacent_2, batch_mask_2):
feature_1 = np.array(batch_feature_1)
feature_2 = np.array(batch_feature_2)
adj_1 = np.array(batch_adjacent_1)
adj_2 = np.array(batch_adjacent_2)
predictions = self.model(batch_x_p=feature_1, batch_x_h=feature_2, batch_adj_p=adj_1, batch_adj_h=adj_2)
return predictions
def training_batch_predication(self, batch_feature_1, batch_adjacent_1, batch_mask_1, batch_feature_2, batch_adjacent_2, batch_mask_2, ged_pairs):
self.model.train()
self.optimizer.zero_grad()
predictions = self.batch_pairs_predication(batch_feature_1, batch_adjacent_1, batch_mask_1, batch_feature_2, batch_adjacent_2, batch_mask_2)
trues = torch.from_numpy(np.array(ged_pairs, dtype=np.float32)).to(self.device)
loss = functional.mse_loss(predictions, trues)
loss.backward()
self.optimizer.step()
return (loss.item(), torch.stack((trues, predictions), 1))
def val_batch_predication(self, batch_feature_1, batch_adjacent_1, batch_mask_1, batch_feature_2, batch_adjacent_2, batch_mask_2, ged_pairs):
st_time = datetime.now()
self.model.eval()
nr_examples = batch_adjacent_1.shape[0]
assert ((batch_feature_1.shape[0] == batch_adjacent_1.shape[0]) and (batch_feature_2.shape[0] == batch_adjacent_2.shape[0]))
st = 0
batch_size = self.batch_size
predictions = []
with torch.no_grad():
while (st < nr_examples):
if ((st + batch_size) >= nr_examples):
ed = nr_examples
else:
ed = (st + batch_size)
feature_1 = batch_feature_1[st:ed]
feature_2 = batch_feature_2[st:ed]
adjacent_1 = batch_adjacent_1[st:ed]
adjacent_2 = batch_adjacent_2[st:ed]
mask_1 = batch_mask_1[st:ed]
mask_2 = batch_mask_2[st:ed]
batch_pred = self.batch_pairs_predication(feature_1, adjacent_1, mask_1, feature_2, adjacent_2, mask_2)
predictions.append(batch_pred)
st = ed
predictions = torch.cat(predictions)
trues = torch.from_numpy(np.array(ged_pairs, dtype=np.float32)).to(self.device)
loss = torch.nn.functional.mse_loss(predictions, trues)
return (loss.data.item(), np.stack((trues.cpu().detach().numpy(), predictions.cpu().detach().numpy()), 1), (datetime.now() - st_time))
def testing_prediction(self):
results = np.zeros((len(self.dataset.testing_graphs), len(self.dataset.train_val_graphs)))
write_log_file(self.log_file, 'result shape is {} '.format(results.shape))
for row in range(len(self.dataset.testing_graphs)):
(batch_rows_feature, batch_rows_adjacent, batch_rows_mask, batch_cols_feature, batch_cols_adjacent, batch_cols_mask) = self.dataset.extract_test_matrices(row)
st = 0
pred = []
while (st < len(self.dataset.train_val_graphs)):
if ((st + self.batch_size) < len(self.dataset.train_val_graphs)):
ed = (st + self.batch_size)
else:
ed = len(self.dataset.train_val_graphs)
batch_rows_feature_small = batch_rows_feature[st:ed]
batch_rows_adjacent_small = batch_rows_adjacent[st:ed]
batch_rows_mask_small = batch_rows_mask[st:ed]
batch_cols_feature_small = batch_cols_feature[st:ed]
batch_cols_adjacent_small = batch_cols_adjacent[st:ed]
batch_cols_mask_small = batch_cols_mask[st:ed]
with torch.no_grad():
cur_pred = self.batch_pairs_predication(batch_rows_feature_small, batch_rows_adjacent_small, batch_rows_mask_small, batch_cols_feature_small, batch_cols_adjacent_small, batch_cols_mask_small)
pred.append(cur_pred)
st = ed
pred = torch.cat(pred)
results[row] = pred.detach().cpu().numpy()
return results
def fit(self):
self.model.train()
time = datetime.now()
best_val_loss = None
for iteration in range(self.max_iterations):
(batch_feature_1, batch_adj_1, batch_mask_1, batch_feature_2, batch_adj_2, batch_mask_2, batch_ged) = self.dataset.get_training_batch()
(train_loss, train_true_pred) = self.training_batch_predication(batch_feature_1, batch_adj_1, batch_mask_1, batch_feature_2, batch_adj_2, batch_mask_2, batch_ged)
if ((iteration % int((self.max_iterations / 20))) == 0):
time_spent = (datetime.now() - time)
time = datetime.now()
write_log_file(self.log_file, 'Iteration = {}\tbatch loss={} (e-3) @ {}'.format(iteration, (train_loss * 1000), time_spent))
if self._need_val(iteration=iteration):
self.model.eval()
if (self.validation_data is None):
self.validation_data = self.dataset.get_all_validation()
(val_feature_1, val_adj_1, val_mask_1, val_feature_2, val_adj_2, val_mask_2, val_ged) = self.validation_data
else:
(val_feature_1, val_adj_1, val_mask_1, val_feature_2, val_adj_2, val_mask_2, val_ged) = self.validation_data
(val_loss, val_true_pred, time_spent) = self.val_batch_predication(val_feature_1, val_adj_1, val_mask_1, val_feature_2, val_adj_2, val_mask_2, val_ged)
write_log_file(self.log_file, '\nvalidation iteration={}, loss={}(e-3), spend time = {} @ {}'.format(iteration, (val_loss * 1000), time_spent, datetime.now()))
if ((not best_val_loss) or (val_loss <= best_val_loss)):
write_log_file(self.log_file, '\tvalidation mse decreased ( {} ---> {} (e-3) ), and save the model ... '.format(best_val_loss, (val_loss * 1000)))
best_val_loss = val_loss
torch.save(self.model.state_dict(), self.best_model_path)
write_log_file(self.log_file, '\tbest validation mse = {} (e-3)'.format((best_val_loss * 1000)))
def testing(self):
self.model.load_state_dict(torch.load(self.best_model_path))
self.model.eval()
self.model.to(self.device)
if (self.validation_data is None):
self.validation_data = self.dataset.get_all_validation()
(val_feature_1, val_adj_1, val_mask_1, val_feature_2, val_adj_2, val_mask_2, val_ged) = self.validation_data
else:
(val_feature_1, val_adj_1, val_mask_1, val_feature_2, val_adj_2, val_mask_2, val_ged) = self.validation_data
(val_loss, val_true_pred, time_spent) = self.val_batch_predication(val_feature_1, val_adj_1, val_mask_1, val_feature_2, val_adj_2, val_mask_2, val_ged)
write_log_file(self.log_file, '\nDouble check validation, loss = {}(e-3) @ {}'.format((val_loss * 1000), datetime.now()))
test_predictions = self.testing_prediction()
test_mse = metrics_mean_square_error(self.dataset.ground_truth.flatten(), test_predictions.flatten())
test_rho = metrics_spearmanr_rho(self.dataset.ground_truth.flatten(), test_predictions.flatten())
test_tau = metrics_kendall_tau(self.dataset.ground_truth.flatten(), test_predictions.flatten())
(ps, inclusive_true_ks, inclusive_pred_ks) = computing_precision_ks(trues=self.dataset.ground_truth, predictions=test_predictions, ks=[10, 20], inclusive=self.flag_inclusive, rm=0)
test_results = {'mse': test_mse, 'rho': test_rho, 'tau': test_tau, 'test_p10': ps[0], 'test_p20': ps[1]}
write_log_file(self.log_file, 'Test results:')
for (k, v) in test_results.items():
write_log_file(self.log_file, '\t {} = {}'.format(k, v))
|
class DenseGGNN(nn.Module):
def __init__(self, out_channels, num_layers=1):
super(DenseGGNN, self).__init__()
self.model = GatedGraphConv(out_channels=out_channels, num_layers=num_layers)
def forward(self, x, adj, **kwargs):
B = x.size()[0]
N = x.size()[1]
D = x.size()[2]
indices = []
for i in range(B):
edge_index = dense_to_sparse(adj[i])
indices.append((edge_index[0] + (i * N)))
edge_index = torch.cat(indices, dim=1)
x = x.reshape((- 1), D)
output = self.model(x, edge_index)
return output.reshape(B, N, (- 1))
|
class MultiLevelGraphMatchNetwork(torch.nn.Module):
def __init__(self, node_init_dims, arguments, device):
super(MultiLevelGraphMatchNetwork, self).__init__()
self.node_init_dims = node_init_dims
self.args = arguments
self.device = device
self.dropout = arguments.dropout
filters = self.args.filters.split('_')
self.gcn_filters = [int(n_filter) for n_filter in filters]
self.gcn_numbers = len(self.gcn_filters)
self.gcn_last_filter = self.gcn_filters[(- 1)]
gcn_parameters = [dict(in_channels=self.gcn_filters[(i - 1)], out_channels=self.gcn_filters[i], bias=True) for i in range(1, self.gcn_numbers)]
gcn_parameters.insert(0, dict(in_channels=node_init_dims, out_channels=self.gcn_filters[0], bias=True))
gin_parameters = [dict(nn=nn.Linear(in_features=self.gcn_filters[(i - 1)], out_features=self.gcn_filters[i])) for i in range(1, self.gcn_numbers)]
gin_parameters.insert(0, {'nn': nn.Linear(in_features=node_init_dims, out_features=self.gcn_filters[0])})
ggnn_parameters = [dict(out_channels=self.gcn_filters[i]) for i in range(self.gcn_numbers)]
conv_layer_constructor = {'gcn': dict(constructor=DenseGCNConv, kwargs=gcn_parameters), 'graphsage': dict(constructor=DenseSAGEConv, kwargs=gcn_parameters), 'gin': dict(constructor=DenseGINConv, kwargs=gin_parameters), 'ggnn': dict(constructor=DenseGGNN, kwargs=ggnn_parameters)}
conv = conv_layer_constructor[self.args.conv]
constructor = conv['constructor']
setattr(self, 'gc{}'.format(1), constructor(**conv['kwargs'][0]))
for i in range(1, self.gcn_numbers):
setattr(self, 'gc{}'.format((i + 1)), constructor(**conv['kwargs'][i]))
self.global_flag = self.args.global_flag
if (self.global_flag is True):
self.global_agg = self.args.global_agg
if (self.global_agg.lower() == 'max_pool'):
print('Only Max Pooling')
elif (self.global_agg.lower() == 'fc_max_pool'):
self.global_fc_agg = nn.Linear(self.gcn_last_filter, self.gcn_last_filter)
elif (self.global_agg.lower() == 'mean_pool'):
print('Only Mean Pooling')
elif (self.global_agg.lower() == 'fc_mean_pool'):
self.global_fc_agg = nn.Linear(self.gcn_last_filter, self.gcn_last_filter)
elif (self.global_agg.lower() == 'lstm'):
self.global_lstm_agg = nn.LSTM(input_size=self.gcn_last_filter, hidden_size=self.gcn_last_filter, num_layers=1, bidirectional=True, batch_first=True)
else:
raise NotImplementedError
self.perspectives = self.args.perspectives
if (self.args.match.lower() == 'node-graph'):
self.mp_w = nn.Parameter(torch.rand(self.perspectives, self.gcn_last_filter))
self.lstm_input_size = self.perspectives
else:
raise NotImplementedError
self.hidden_size = self.args.hidden_size
if (self.args.match_agg.lower() == 'bilstm'):
self.agg_bilstm = nn.LSTM(input_size=self.lstm_input_size, hidden_size=self.hidden_size, num_layers=1, bidirectional=True, batch_first=True)
elif ((self.args.match_agg.lower() == 'fc_avg') or (self.args.match_agg.lower() == 'fc_max')):
self.fc_agg = nn.Linear(self.lstm_input_size, self.lstm_input_size)
elif ((self.args.match_agg.lower() == 'avg') or (self.args.match_agg.lower() == 'max')):
pass
else:
raise NotImplementedError
if (self.args.task.lower() == 'regression'):
if (self.global_flag is True):
if (self.global_agg.lower() == 'lstm'):
factor_global = 2
else:
factor_global = 1
else:
factor_global = 0
if (self.args.match_agg == 'bilstm'):
factor_match_agg = 2
else:
factor_match_agg = 1
factor = (factor_match_agg + factor_global)
self.predict_fc1 = nn.Linear(int(((self.hidden_size * 2) * factor)), int((self.hidden_size * factor)))
self.predict_fc2 = nn.Linear(int((self.hidden_size * factor)), int(((self.hidden_size * factor) / 2)))
self.predict_fc3 = nn.Linear(int(((self.hidden_size * factor) / 2)), int(((self.hidden_size * factor) / 4)))
self.predict_fc4 = nn.Linear(int(((self.hidden_size * factor) / 4)), 1)
elif (self.args.task.lower() == 'classification'):
print('classification task')
else:
raise NotImplementedError
def global_aggregation_info(self, v, agg_func_name):
'\n :param v: (batch, len, dim)\n :param agg_func_name:\n :return: (batch, len)\n '
if (agg_func_name.lower() == 'max_pool'):
agg_v = torch.max(v, 1)[0]
elif (agg_func_name.lower() == 'fc_max_pool'):
agg_v = self.global_fc_agg(v)
agg_v = torch.max(agg_v, 1)[0]
elif (agg_func_name.lower() == 'mean_pool'):
agg_v = torch.mean(v, dim=1)
elif (agg_func_name.lower() == 'fc_mean_pool'):
agg_v = self.global_fc_agg(v)
agg_v = torch.mean(agg_v, dim=1)
elif (agg_func_name.lower() == 'lstm'):
(_, (agg_v_last, _)) = self.global_lstm_agg(v)
agg_v = agg_v_last.permute(1, 0, 2).contiguous().view((- 1), (self.gcn_last_filter * 2))
else:
raise NotImplementedError
return agg_v
@staticmethod
def div_with_small_value(n, d, eps=1e-08):
d = ((d * (d > eps).float()) + (eps * (d <= eps).float()))
return (n / d)
def cosine_attention(self, v1, v2):
'\n :param v1: (batch, len1, dim)\n :param v2: (batch, len2, dim)\n :return: (batch, len1, len2)\n '
a = torch.bmm(v1, v2.permute(0, 2, 1))
v1_norm = v1.norm(p=2, dim=2, keepdim=True)
v2_norm = v2.norm(p=2, dim=2, keepdim=True).permute(0, 2, 1)
d = (v1_norm * v2_norm)
return self.div_with_small_value(a, d)
def multi_perspective_match_func(self, v1, v2, w):
'\n :param v1: (batch, len, dim)\n :param v2: (batch, len, dim)\n :param w: (perspectives, dim)\n :return: (batch, len, perspectives)\n '
w = w.transpose(1, 0).unsqueeze(0).unsqueeze(0)
v1 = (w * torch.stack(([v1] * self.perspectives), dim=3))
v2 = (w * torch.stack(([v2] * self.perspectives), dim=3))
return functional.cosine_similarity(v1, v2, dim=2)
def forward_dense_gcn_layers(self, feat, adj):
feat_in = feat
for i in range(1, (self.gcn_numbers + 1)):
feat_out = functional.relu(getattr(self, 'gc{}'.format(i))(x=feat_in, adj=adj, mask=None, add_loop=False), inplace=True)
feat_out = functional.dropout(feat_out, p=self.dropout, training=self.training)
feat_in = feat_out
return feat_out
def forward(self, batch_x_p, batch_x_h, batch_adj_p, batch_adj_h):
feature_p_init = torch.FloatTensor(batch_x_p).to(self.device)
adj_p = torch.FloatTensor(batch_adj_p).to(self.device)
feature_h_init = torch.FloatTensor(batch_x_h).to(self.device)
adj_h = torch.FloatTensor(batch_adj_h).to(self.device)
feature_p = self.forward_dense_gcn_layers(feat=feature_p_init, adj=adj_p)
feature_h = self.forward_dense_gcn_layers(feat=feature_h_init, adj=adj_h)
attention = self.cosine_attention(feature_p, feature_h)
attention_h = (feature_h.unsqueeze(1) * attention.unsqueeze(3))
attention_p = (feature_p.unsqueeze(2) * attention.unsqueeze(3))
att_mean_h = self.div_with_small_value(attention_h.sum(dim=2), attention.sum(dim=2, keepdim=True))
att_mean_p = self.div_with_small_value(attention_p.sum(dim=1), attention.sum(dim=1, keepdim=True).permute(0, 2, 1))
if (self.args.match.lower() == 'node-graph'):
multi_p = self.multi_perspective_match_func(v1=feature_p, v2=att_mean_h, w=self.mp_w)
multi_h = self.multi_perspective_match_func(v1=feature_h, v2=att_mean_p, w=self.mp_w)
else:
raise NotImplementedError
match_p = multi_p
match_h = multi_h
if (self.args.match_agg.lower() == 'bilstm'):
p_agg_bilstm_h0 = torch.zeros((2 * 1), match_p.size(0), self.gcn_last_filter, dtype=torch.float32).to(self.device)
p_agg_bilstm_c0 = torch.zeros((2 * 1), match_p.size(0), self.gcn_last_filter, dtype=torch.float32).to(self.device)
h_agg_bilstm_h0 = torch.zeros((2 * 1), match_h.size(0), self.gcn_last_filter, dtype=torch.float32).to(self.device)
h_agg_bilstm_c0 = torch.zeros((2 * 1), match_h.size(0), self.gcn_last_filter, dtype=torch.float32).to(self.device)
(_, (agg_p_last, _)) = self.agg_bilstm(match_p, (p_agg_bilstm_h0, p_agg_bilstm_c0))
agg_p = agg_p_last.permute(1, 0, 2).contiguous().view((- 1), (self.hidden_size * 2))
(_, (agg_h_last, _)) = self.agg_bilstm(match_h, (h_agg_bilstm_h0, h_agg_bilstm_c0))
agg_h = agg_h_last.permute(1, 0, 2).contiguous().view((- 1), (self.hidden_size * 2))
elif (self.args.match_agg.lower() == 'avg'):
agg_p = torch.mean(match_p, dim=1)
agg_h = torch.mean(match_h, dim=1)
elif (self.args.match_agg.lower() == 'fc_avg'):
agg_p = torch.mean(self.fc_agg(match_p), dim=1)
agg_h = torch.mean(self.fc_agg(match_h), dim=1)
elif (self.args.match_agg.lower() == 'max'):
agg_p = torch.max(match_p, dim=1)[0]
agg_h = torch.max(match_h, dim=1)[0]
elif (self.args.match_agg.lower() == 'fc_max'):
agg_p = torch.max(self.fc_agg(match_p), dim=1)[0]
agg_h = torch.max(self.fc_agg(match_h), dim=1)[0]
else:
raise NotImplementedError
if (self.global_flag is True):
global_gcn_agg_p = self.global_aggregation_info(v=feature_p, agg_func_name=self.global_agg)
global_gcn_agg_h = self.global_aggregation_info(v=feature_h, agg_func_name=self.global_agg)
agg_p = torch.cat([agg_p, global_gcn_agg_p], dim=1)
agg_h = torch.cat([agg_h, global_gcn_agg_h], dim=1)
if (self.args.task.lower() == 'regression'):
x = torch.cat([agg_p, agg_h], dim=1)
x = functional.dropout(x, p=self.dropout, training=self.training)
x = functional.relu(self.predict_fc1(x))
x = functional.dropout(x, p=self.dropout, training=self.training)
x = functional.relu(self.predict_fc2(x))
x = functional.dropout(x, p=self.dropout, training=self.training)
x = functional.relu(self.predict_fc3(x))
x = functional.dropout(x, p=self.dropout, training=self.training)
x = self.predict_fc4(x)
x = torch.sigmoid(x).squeeze((- 1))
return x
elif (self.args.task.lower() == 'classification'):
sim = functional.cosine_similarity(agg_p, agg_h, dim=1).clamp(min=(- 1), max=1)
return sim
else:
raise NotImplementedError
|
def train(model, train_loader, val_loader, batch_size, criterion, optimizer, target_accr=None, err_margin=(0.01, 0.01), best_accr=(0, 0), topk=(1, 5), lr_decay=0.1, saved_epoch=0, log='train.csv', pname='model.pth'):
meters = {}
for i in topk:
meters[i] = AverageMeter()
with open(log, 'a') as f:
f.write((time.strftime('%b/%d/%Y %H:%M:%S', time.localtime()) + '\n'))
f.write((('epoches, ' + ','.join(['top{}'.format(i) for i in topk])) + '\n'))
num_epoch = saved_epoch
epoch = 5
num_data = (len(train_loader) * batch_size)
if (target_accr is None):
old_accr = best_accr
while True:
model.eval()
result = tuple(validate(model, batch_size, val_loader, topk, True))
if (len(list(filter((lambda t: (t[0] > t[1])), zip(best_accr, result)))) == 0):
torch.save({'params': model.state_dict(), 'optim': optimizer.state_dict(), 'epoch': num_epoch}, pname)
with open(log, 'a') as f:
f.write((((str(num_epoch) + ',') + ','.join([str(r) for r in result])) + '\n'))
for (i, r) in enumerate(result):
if (target_accr is None):
if ((r - old_accr[i]) > err_margin[i]):
break
elif ((target_accr[i] - r) > err_margin[i]):
break
else:
with open(log, 'a') as f:
f.write((time.strftime('%b/%d/%Y %H:%M:%S', time.localtime()) + '\n'))
break
if (target_accr is None):
old_accr = result
model.train()
for e in range(epoch):
for i in topk:
meters[i].reset()
print('Training on {} data'.format(num_data))
for (i, data) in enumerate(train_loader, 0):
index = 0
(inputs, labels) = data
(inputs, labels) = (Variable(inputs).cuda(), Variable(labels).cuda())
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
result = accuracy(outputs.data, labels.data, topk)
for (j, k) in enumerate(meters.keys()):
meters[k].update(result[j][0], inputs.size(0))
loss.backward()
optimizer.step()
if ((i % 20) == 0):
print('Progress {:2.1%}'.format(((i * batch_size) / num_data)), end='\r')
print('Epoch: [{0}]\t'.format(e))
for k in meters.keys():
print(' * Prec@{i} {topi.avg:.3f}%'.format(i=k, topi=meters[k]))
num_epoch += 1
if ((num_epoch % 10) == 0):
for p in optimizer.param_groups:
if (p['lr'] > 1e-07):
p['lr'] *= (lr_decay ** (num_epoch / 10))
print('change lr to {}'.format(p['lr']))
|
def validate(model, batch_size, val_loader, topk=(1, 5), cuda=True):
meters = {}
for i in topk:
meters[i] = AverageMeter()
model.eval()
start = time.time()
num_data = (len(val_loader) * batch_size)
print('Validating on {} data'.format(num_data))
for (i, (input, target)) in enumerate(val_loader):
if cuda:
input = input.cuda()
target = target.cuda(non_blocking=True)
input_var = Variable(input)
target_var = Variable(target)
output = model(input_var)
result = accuracy(output.data, target, topk)
for (j, k) in enumerate(meters.keys()):
meters[k].update(result[j][0], input.size(0))
if ((i % 20) == 0):
print('Progress {:2.1%}'.format(((i * batch_size) / num_data)), end='\r')
time_elapse = (time.time() - start)
print('\ninference time:', str(timedelta(seconds=time_elapse)))
for k in meters.keys():
print(' * Prec@{i} {topi.avg:.3f}%'.format(i=k, topi=meters[k]))
return (meters[k].avg for k in meters.keys())
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
|
def accuracy(output, target, topk=(1,)):
'Computes the precision@k for the specified values of k'
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res
|
def gen_loaders(path, BATCH_SIZE, NUM_WORKERS):
traindir = os.path.join(path, 'train')
valdir = os.path.join(path, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(traindir, transforms.Compose([transforms.RandomSizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]))
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS, pin_memory=True)
val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(valdir, transforms.Compose([transforms.Scale(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])), batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS, pin_memory=True)
return (train_loader, val_loader)
|
def main():
global args
args = parser.parse_args()
use_cp = (args.decomp == 'cp')
use_model = (args.model is not None)
use_param = (args.resume is not None)
use_state = (args.state is not None)
eval_mode = args.val
decomp_func = (torch_cp_decomp if use_cp else tucker_decomp)
if (args.arch == 'resnet50'):
decomp_arch = decomp_resnet
elif (args.arch == 'alexnet'):
decomp_arch = decomp_alexnet
else:
sys.exit('architecture not supported')
rank_func = (est_rank if use_cp else tucker_rank)
BATCH_SIZE = 128
NUM_WORKERS = 8
DATA_PATH = args.path
if (DATA_PATH is None):
sys.exit('Path to dataset cannot be empty')
tl.set_backend('pytorch')
(train_loader, val_loader) = gen_loaders(DATA_PATH, BATCH_SIZE, NUM_WORKERS)
net = models.__dict__[args.arch](pretrained=True)
if use_model:
checkpoint = torch.load(args.model)
arch = checkpoint['arch']
params = checkpoint['params']
for (n, m) in net.named_children():
setattr(net, n, arch[n])
net.load_state_dict(params)
else:
net = decomp_arch(net, rank_func, decomp_func)
torch.save({'arch': dict(net.named_children()), 'params': net.state_dict()}, ('cp_round_model.pth' if use_cp else 'tucker_round_model.pth'))
lr = (1e-06 if use_cp else 0.001)
optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9)
epoch = 0
if use_param:
checkpoint = torch.load(args.resume)
old_state = checkpoint['params']
opt = checkpoint['optim']
epoch = checkpoint['epoch']
new_state = OrderedDict()
for (k, v) in old_state.items():
name = k[7:]
new_state[name] = v
net.load_state_dict(new_state)
optimizer.load_state_dict(opt)
for state in optimizer.state.values():
for (k, v) in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
if use_state:
old_state = torch.load(args.state)
new_state = OrderedDict()
for (k, v) in old_state.items():
name = k[7:]
new_state[name] = v
net.load_state_dict(new_state)
net = nn.DataParallel(net).cuda()
target = (76.15, 92.87)
criterion = nn.CrossEntropyLoss().cuda()
train_args = OrderedDict()
if (not eval_mode):
train_args['model'] = net
train_args['trainloader'] = train_loader
train_args['testloader'] = val_loader
train_args['batch_size'] = BATCH_SIZE
train_args['criterion'] = criterion
train_args['optimizer'] = optimizer
train_args['target_accr'] = target
train_args['err_margin'] = (1.5, 1.5)
train_args['best_acc'] = (0, 0)
train_args['topk'] = (1, 5)
train_args['lr_decay'] = 0.8
train_args['saved_epoch'] = epoch
train_args['log'] = ('cp_acc.csv' if use_cp else 'tucker_acc.csv')
train_args['pname'] = ('cp_best.pth' if use_cp else 'tucker_best.pth')
train(*train_args.values())
torch.save(net.state_dict(), ('cp_state.pth' if use_cp else 'tucker_state.pth'))
else:
train_args['model'] = net
train_args['batch_size'] = BATCH_SIZE
train_args['testloader'] = val_loader
validate(*train_args.values())
|
def tucker_rank(layer):
W = layer.weight.data
mode3 = tl.base.unfold(W, 0)
mode4 = tl.base.unfold(W, 1)
diag_0 = EVBMF(mode3)
diag_1 = EVBMF(mode4)
d1 = diag_0.shape[0]
d2 = diag_1.shape[1]
del mode3
del mode4
del diag_0
del diag_1
return [int((np.ceil((d1 / 16)) * 16)), int((np.ceil((d2 / 16)) * 16))]
|
def est_rank(layer):
W = layer.weight.data
mode3 = tl.base.unfold(W, 0)
mode4 = tl.base.unfold(W, 1)
diag_0 = EVBMF(mode3)
diag_1 = EVBMF(mode4)
return int((np.ceil((max([diag_0.shape[0], diag_1.shape[0]]) / 16)) * 16))
|
def decomp_alexnet(net, rank_func, decomp_func):
i = 1
while (i < len(net.features)):
layer_i = net.features[i]
if (not isinstance(layer_i, nn.Conv2d)):
i += 1
continue
layer_i = net.features[i]
rank = rank_func(layer_i)
print('rank of the {}th layer: {}'.format(i, rank))
print('begin decomposing layer {}'.format(i))
decomp_layers = decomp_func(layer_i, rank)
print('finished decomposing layer {}'.format(i))
net.features = nn.Sequential(*((list(net.features[:i]) + decomp_layers) + list(net.features[(i + 1):])))
i += len(decomp_layers)
return net
|
def decomp_resnet(net, rank_func, decomp_func):
mulfunc = (lambda x, y: (x * y))
for (n, m) in net.named_children():
num_children = sum((1 for i in m.children()))
if (num_children != 0):
layer = getattr(net, n)
for i in range(num_children):
bottleneck = layer[i]
conv2 = getattr(bottleneck, 'conv2')
rank = rank_func(conv2)
if (type(rank) == int):
reduced = (rank ** 2)
else:
reduced = reduce(mulfunc, rank)
if (reduced < reduce(mulfunc, [conv2.in_channels, conv2.out_channels])):
print('ranks for bottleneck {} in {}: {}'.format(i, n, rank))
new_layers = decomp_func(conv2, rank)
setattr(bottleneck, 'conv2', nn.Sequential(*new_layers))
del conv2
del bottleneck
del layer
return net
|
def torch_cp_decomp(layer, rank):
W = layer.weight.data
(last, first, vertical, horizontal) = parafac(W, rank=rank, init='random')
pointwise_s_to_r_layer = nn.Conv2d(in_channels=first.shape[0], out_channels=first.shape[1], kernel_size=1, padding=0, bias=False)
depthwise_r_to_r_layer = nn.Conv2d(in_channels=rank, out_channels=rank, kernel_size=vertical.shape[0], stride=layer.stride, padding=layer.padding, dilation=layer.dilation, groups=rank, bias=False)
pointwise_r_to_t_layer = nn.Conv2d(in_channels=last.shape[1], out_channels=last.shape[0], kernel_size=1, padding=0, bias=True)
if (layer.bias is not None):
pointwise_r_to_t_layer.bias.data = layer.bias.data
sr = first.t_().unsqueeze_((- 1)).unsqueeze_((- 1))
rt = last.unsqueeze_((- 1)).unsqueeze_((- 1))
rr = torch.stack([(vertical.narrow(1, i, 1) @ torch.t(horizontal).narrow(0, i, 1)) for i in range(rank)]).unsqueeze_(1)
pointwise_s_to_r_layer.weight.data = sr
pointwise_r_to_t_layer.weight.data = rt
depthwise_r_to_r_layer.weight.data = rr
new_layers = [pointwise_s_to_r_layer, depthwise_r_to_r_layer, pointwise_r_to_t_layer]
return new_layers
|
def tucker_decomp(layer, rank):
W = layer.weight.data
(core, [last, first]) = partial_tucker(W, modes=[0, 1], ranks=rank, init='svd')
first_layer = nn.Conv2d(in_channels=first.shape[0], out_channels=first.shape[1], kernel_size=1, padding=0, bias=False)
core_layer = nn.Conv2d(in_channels=core.shape[1], out_channels=core.shape[0], kernel_size=layer.kernel_size, stride=layer.stride, padding=layer.padding, dilation=layer.dilation, bias=False)
last_layer = nn.Conv2d(in_channels=last.shape[1], out_channels=last.shape[0], kernel_size=1, padding=0, bias=True)
if (layer.bias is not None):
last_layer.bias.data = layer.bias.data
fk = first.t_().unsqueeze_((- 1)).unsqueeze_((- 1))
lk = last.unsqueeze_((- 1)).unsqueeze_((- 1))
first_layer.weight.data = fk
last_layer.weight.data = lk
core_layer.weight.data = core
new_layers = [first_layer, core_layer, last_layer]
return new_layers
|
def get_gm(acc, cs, ppl):
return (((max(acc, 0) * max(cs, 0)) * max((1 / ppl), 0)) ** (1 / 3))
|
def tokenize(text, tags=False, lemmas=False):
processed = process_pipeline.process(text)
content = [l for l in processed.split('\n') if (not l.startswith('#'))]
tagged = [w.split('\t') for w in content if w]
tokens = []
for token in tagged:
if (token[3] == 'PUNCT'):
continue
token_res = ''
if lemmas:
token_res = token[2]
else:
token_res = token[1]
if tags:
token_res += ('_' + token[3])
tokens.append(token_res)
return tokens
|
def get_sentence_vector(text):
tokens = tokenize(text, lemmas=True)
embd = [model[token] for token in tokens]
return np.mean(embd, axis=0).reshape(1, (- 1))
|
def get_cosine_sim(text1, text2):
try:
return cosine_similarity(get_sentence_vector(text1), get_sentence_vector(text2))
except:
return 0
|
def get_cosine_sim_corpus(original_sentences, transferred_sentences):
results = []
for index in tqdm(range(len(original_sentences))):
results.append(get_cosine_sim(original_sentences[index], transferred_sentences[index]))
return np.mean(results)
|
def get_word_overlap(text1, text2):
tokens1 = tokenize(text1, lemmas=True)
tokens2 = tokenize(text2, lemmas=True)
union = set((tokens1 + tokens2))
intersection = list((set(tokens1) & set(tokens2)))
return (len(intersection) / len(union))
|
def get_word_overlap_corpus(original_sentences, transferred_sentences):
results = []
for index in tqdm(range(len(original_sentences))):
results.append(get_word_overlap(original_sentences[index], transferred_sentences[index]))
return np.mean(results)
|
def get_bleu_corpus(original_sentences, transferred_sentences):
references = []
hypothesises = []
for sentence in original_sentences:
references.append([[sentence]])
for sentence in transferred_sentences:
hypothesises.append([sentence])
return corpus_bleu(references, hypothesises, weights=[1])
|
def calc_bleu(inputs, preds):
bleu_sim = 0
counter = 0
print('Calculating BLEU similarity')
for i in range(len(inputs)):
if ((len(inputs[i]) > 3) and (len(preds[i]) > 3)):
bleu_sim += sentence_bleu([inputs[i]], preds[i])
counter += 1
return float((bleu_sim / counter))
|
class Args():
def __init__(self):
self.model_type = 'gpt2'
self.model_name_or_path = 'sberbank-ai/rugpt2large'
self.prompt = ''
self.length = 50
self.stop_token = '</s>'
self.k = 5
self.p = 0.95
self.temperature = 1
self.repetition_penalty = 1
self.num_return_sequences = 1
self.device = 'cuda:0'
self.seed = 42
|
def get_gpt2_ppl_corpus(test_sentences):
args = Args()
args.model_name_or_path = 'sberbank-ai/rugpt2large'
(model_class, tokenizer_class) = MODEL_CLASSES[args.model_type]
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
model = model_class.from_pretrained(args.model_name_or_path)
model.to(args.device)
lls = []
weights = []
for text in tqdm(test_sentences):
encodings = tokenizer(f'''
{text}
''', return_tensors='pt')
input_ids = encodings.input_ids.to(args.device)
target_ids = input_ids.clone()
w = max(0, (len(input_ids[0]) - 1))
if (w > 0):
with torch.no_grad():
outputs = model(input_ids, labels=target_ids)
log_likelihood = outputs[0]
ll = log_likelihood.item()
else:
ll = 0
lls.append(ll)
weights.append(w)
return (np.dot(lls, weights) / sum(weights))
|
def classify_preds(args, preds):
print('Calculating style of predictions')
results = []
tokenizer = BertTokenizer.from_pretrained('SkolkovoInstitute/russian_toxicity_classifier')
model = BertForSequenceClassification.from_pretrained('SkolkovoInstitute/russian_toxicity_classifier')
for i in tqdm.tqdm(range(0, len(preds), args.batch_size)):
batch = tokenizer(preds[i:(i + args.batch_size)], return_tensors='pt', padding=True)
result = model(**batch)['logits'].argmax(1).float().data.tolist()
results.extend([(1 - item) for item in result])
return results
|
class condBERT():
def __init__(self, device='cuda', from_pretrained=True):
def adjust_logits(logits):
return (logits - (token_toxicities * 100))
model_name = 'Geotrend/bert-base-ru-cased'
tokenizer_ru = BertTokenizer.from_pretrained(model_name)
model = BertForMaskedLM.from_pretrained(model_name)
if from_pretrained:
print('Loading fine-tuned weights.')
if (not os.path.isdir(BERT_WEIGHTS.split('/')[0])):
os.system('gdown --id 1z5UlXYpZPBC0hlP6W8EMdcgCZmpO5lPg && unzip ru_cond_bert_geotrend.zip')
model_dict = torch.load(BERT_WEIGHTS, map_location=device)
model.load_state_dict(model_dict, strict=False)
model.to(device)
if (not os.path.isdir(VOCAB_DIRNAME)):
print('Loading pre-calculated toxicity vocabularies.')
os.system('gdown --id 1BZTmXqvJe-R0MzYbY6QD7KaySLiM38Em && unzip ru_vocabularies_geotrend.zip')
with open((VOCAB_DIRNAME + '/negative-words.txt'), 'r') as f:
s = f.readlines()
negative_words = list(map((lambda x: x[:(- 1)]), s))
with open((VOCAB_DIRNAME + '/positive-words.txt'), 'r') as f:
s = f.readlines()
positive_words = list(map((lambda x: x[:(- 1)]), s))
with open((VOCAB_DIRNAME + '/word2coef.pkl'), 'rb') as f:
word2coef = pickle.load(f)
token_toxicities = []
with open((VOCAB_DIRNAME + '/token_toxicities.txt'), 'r') as f:
for line in f.readlines():
token_toxicities.append(float(line))
token_toxicities = np.array(token_toxicities)
token_toxicities = np.maximum(0, np.log((1 / ((1 / token_toxicities) - 1))))
for tok in ['.', ',', '-']:
token_toxicities[tokenizer_ru.encode(tok)][1] = 3
predictor = MaskedTokenPredictorBert(model, tokenizer_ru, max_len=250, device=device, label=0, contrast_penalty=0.0, logits_postprocessor=adjust_logits)
self.editor = CondBertRewriter(model=model, tokenizer=tokenizer_ru, device=device, neg_words=negative_words, pos_words=positive_words, word2coef=word2coef, token_toxicities=token_toxicities, predictor=predictor)
return
def detoxify(self, text):
return self.editor.translate(text, prnt=False)
|
def cosine(v1, v2):
return (np.dot(v1, v2) / np.sqrt(((sum((v1 ** 2)) * sum((v2 ** 2))) + 1e-10)))
|
class EmbeddingSimilarityChooser():
def __init__(self, sim_coef=100, tokenizer=None):
self.glove_embedding = WordEmbeddings('glove')
self.sim_coef = sim_coef
self.tokenizer = tokenizer
def embed(self, text):
toks = self.glove_embedding.embed(Sentence(text))[0]
return np.mean([t.embedding.cpu().numpy() for t in toks], axis=0)
def decode(self, tokens):
if isinstance(tokens, str):
return tokens
if self.tokenizer:
return self.tokenizer.convert_tokens_to_string(tokens)
return ' '.join(tokens).replace(' ##', '')
def __call__(self, hypotheses, original=None, scores=None, **kwargs):
e = self.embed(self.decode(original))
candidates = [(fill_words, score, cosine(e, self.embed(self.decode(fill_words)))) for (fill_words, score) in zip(hypotheses, scores)]
candidates = sorted(candidates, key=(lambda x: (x[1] + (x[2] * self.sim_coef))), reverse=True)
return candidates[0][0]
|
class RuEmbeddingSimilarityChooser():
def __init__(self, sim_coef=100, tokenizer=None):
self.glove_embedding = WordEmbeddings('glove')
self.sim_coef = sim_coef
self.tokenizer = tokenizer
def embed(self, text):
toks = self.glove_embedding.embed(Sentence(text))[0]
return np.mean([t.embedding.cpu().numpy() for t in toks], axis=0)
def decode(self, tokens):
if isinstance(tokens, str):
return tokens
if self.tokenizer:
return self.tokenizer.convert_tokens_to_string(tokens)
return ' '.join(tokens).replace(' ##', '')
def __call__(self, hypotheses, original=None, scores=None, **kwargs):
e = self.embed(self.decode(original))
candidates = [(fill_words, score, cosine(e, self.embed(self.decode(fill_words)))) for (fill_words, score) in zip(hypotheses, scores)]
candidates = sorted(candidates, key=(lambda x: (x[1] + (x[2] * self.sim_coef))), reverse=True)
return candidates[0][0]
|
def group_by_first_token(texts, tokenizer):
seqs = [tokenizer.encode(x, add_special_tokens=False) for x in texts]
grouped = defaultdict(list)
for seq in seqs:
grouped[seq[0]].append(seq)
return grouped
|
def default_chooser(hypotheses, original=None, **kwargs):
return hypotheses[0]
|
class CondBertRewriter():
def __init__(self, model, tokenizer, device, neg_words, pos_words, word2coef, token_toxicities, predictor=None):
self.model = model
self.tokenizer = tokenizer
self.device = device
self.neg_words = neg_words
self.pos_words = pos_words
self.word2coef = word2coef
self.token_toxicities = token_toxicities
self.predictor = predictor
self.v = {v: k for (k, v) in tokenizer.vocab.items()}
self.device_toxicities = torch.tensor(token_toxicities).to(self.device)
self.neg_complex_tokens = group_by_first_token(neg_words, self.tokenizer)
self.pos_complex_tokens = group_by_first_token(pos_words, self.tokenizer)
self.mask_index = self.tokenizer.convert_tokens_to_ids('[MASK]')
def toks_to_words(self, token_ids):
' Merge subword tokens into whole words '
indices = []
for (i, token_id) in enumerate(token_ids):
token_text = self.v[token_id]
if token_text.startswith('##'):
indices.append(i)
else:
if indices:
toks = [self.v[token_ids[t]] for t in indices]
word = ''.join(([toks[0]] + [t[2:] for t in toks[1:]]))
(yield (indices, word))
indices = [i]
def get_mask_fast(self, inp: str, bad_words=None, min_bad_score=0, aggressive=True, max_score_margin=0.5):
if (bad_words is None):
bad_words = self.neg_complex_tokens
sentences = [self.tokenizer.encode(inp, add_special_tokens=True)]
sentences_torch = torch.tensor(sentences)
masks = torch.zeros_like(sentences_torch)
for (sent_id, sent) in enumerate(sentences):
for (first_tok_id, tok) in enumerate(sent):
for hypothesis in bad_words.get(tok, []):
n = len(hypothesis)
if (sent[first_tok_id:(first_tok_id + n)] == hypothesis):
for step in range(n):
masks[(sent_id, (first_tok_id + step))] = 1
for (offset, next_token) in enumerate(sent[(first_tok_id + n):]):
if self.tokenizer.convert_ids_to_tokens(next_token).startswith('##'):
masks[(sent_id, ((first_tok_id + n) + offset))] = 1
else:
break
if ((sum(masks[sent_id].numpy()) == 0) or aggressive):
scored_words = []
for (indices, word) in self.toks_to_words(sent):
score = self.word2coef.get(word)
if score:
scored_words.append([indices, word, score])
if scored_words:
max_score = max((s[2] for s in scored_words))
if (max_score > min_bad_score):
for (indices, word, score) in scored_words:
if (score >= max(min_bad_score, (max_score * max_score_margin))):
masks[(sent_id, indices)] = 1
return (sentences_torch, masks)
def translate(self, ss, get_mask=None, label=0, prnt=True, raw=False, toxicity_penalty=15, contrast_penalty=0, mask_toxic=False, duplicate=False):
if (get_mask is None):
get_mask = self.get_mask_fast
if prnt:
print(ss)
if (label == 0):
(input_ids, attn_mask) = get_mask(ss, bad_words=self.neg_complex_tokens)
else:
(input_ids, attn_mask) = get_mask(ss, bad_words=self.pos_complex_tokens)
if (attn_mask.sum().numpy() == 0):
return ss
masked = (torch.ones_like(input_ids) * (- 100))
for i in range(input_ids.shape[0]):
masked[i][(attn_mask[i] == 1)] = input_ids[i][(attn_mask[i] == 1)]
if duplicate:
input_ids = torch.cat([input_ids, input_ids], axis=1)
attn_mask = torch.cat([torch.zeros_like(attn_mask), attn_mask], axis=1)
if mask_toxic:
input_ids[i][(attn_mask[i] == 1)] = self.mask_index
input_ids = input_ids.to(self.device)
self.model.eval()
outputs = self.model(input_ids, token_type_ids=(torch.ones_like(input_ids) * label))
if contrast_penalty:
neg_outputs = self.model(input_ids, token_type_ids=(torch.ones_like(input_ids) * (1 - label)))
else:
neg_outputs = None
if raw:
return outputs[0]
for i in range(input_ids.shape[0]):
logits = outputs[(- 1)][i][(attn_mask[i] == 1)]
if toxicity_penalty:
logits -= (self.device_toxicities * toxicity_penalty)
if contrast_penalty:
neg_logits = neg_outputs[(- 1)][i][(attn_mask[i] == 1)]
scores = (torch.softmax(logits, (- 1)) - (torch.softmax(neg_logits, (- 1)) * contrast_penalty))
else:
scores = logits
input_ids[i][(attn_mask[i] == 1)] = scores.argmax(dim=1)
result = self.tokenizer.convert_tokens_to_string([self.tokenizer.convert_ids_to_tokens(i.item()) for i in input_ids[0][1:(- 1)]])
return result.split('[SEP] [CLS] ')[(- 1)]
def convert_mask(self, tok_ids, mask_ids, duplicate=False, start_from=0):
toks_tmp = [self.tokenizer.convert_ids_to_tokens(tok_ids[0])[1:(- 1)]]
mask_pos = None
toks = []
mask_toks = []
has_mask = False
for (i, is_masked) in enumerate(mask_ids[0][1:(- 1)]):
tok = toks_tmp[0][i]
if (not has_mask):
if (is_masked and (i >= start_from) and (not tok.startswith('##'))):
has_mask = True
mask_pos = [i]
mask_toks.append(tok)
toks.append(tok)
elif ((not is_masked) or (not tok.startswith('##'))):
toks.extend(toks_tmp[0][i:])
break
else:
mask_toks.append(tok)
toks = [toks]
if duplicate:
toks = [((toks_tmp[0] + ['[SEP]']) + toks[0])]
mask_pos[0] += (len(toks_tmp[0]) + 1)
return (toks, mask_pos, mask_toks)
def replacement_loop(self, text, span_detector=None, predictor=None, verbose=True, chooser=default_chooser, n_tokens=(1, 2, 3), n_top=10, mask_token=False, **predictor_args):
if (span_detector is None):
span_detector = self.get_mask_fast
if (predictor is None):
predictor = self.predictor
new_text = text
look_from = 0
for i in range(10):
(tok_ids, mask_ids) = span_detector(new_text)
if (not sum(mask_ids[0][(1 + look_from):])):
break
(toks, mask_pos, mask_toks) = self.convert_mask(tok_ids, mask_ids, duplicate=False, start_from=look_from)
if (mask_pos is None):
return new_text
(texts, scores) = predictor.generate(toks, mask_pos, n_tokens=list(n_tokens), n_top=n_top, fix_multiunit=False, mask_token=mask_token, **predictor_args)
replacement = chooser(hypotheses=texts[0], scores=scores[0], original=mask_toks)
if isinstance(replacement, str):
replacement = [replacement]
if verbose:
print(mask_toks, '->', replacement)
new_toks = ((toks[0][:mask_pos[0]] + replacement) + toks[0][(mask_pos[0] + 1):])
new_text = self.tokenizer.convert_tokens_to_string(new_toks)
look_from = (mask_pos[0] + 1)
return new_text
|
def bpe_tokenize(bpe_tokenizer, sentence):
sent_bpe_tokens = []
sent_bpe_offsets = []
for token in sentence:
token_bpes = bpe_tokenizer.tokenize(token.text)
sent_bpe_offsets += [(token.begin, token.end) for _ in range(len(token_bpes))]
sent_bpe_tokens += token_bpes
return (sent_bpe_tokens, sent_bpe_offsets)
|
def nlargest_indexes(arr, n_top):
arr_ids = np.argpartition(arr, (- n_top))[(- n_top):]
sel_arr = arr[arr_ids]
top_ids = arr_ids[np.argsort((- sel_arr))]
return top_ids
|
def remove_masked_token_subwords(masked_position, bpe_tokens, bpe_offsets):
'\n If the masked token has been tokenied into multiple subwords: like dieting-->diet and ##ing\n keep the first subword and remove others.\n '
logger.debug(f'bpe tokens: {bpe_tokens}')
logger.debug(f'bpe offsets: {bpe_offsets}')
if (len(masked_position[1]) > 1):
indexes_to_del = masked_position[1][1:]
del bpe_tokens[masked_position[0]][indexes_to_del[0]:(indexes_to_del[(- 1)] + 1)]
del bpe_offsets[masked_position[0]][indexes_to_del[0]:(indexes_to_del[(- 1)] + 1)]
masked_position = (masked_position[0], masked_position[1][0])
logger.debug(f'bpe offsets: {str(bpe_tokens)}')
logger.debug(f'bpe offsets: {str(bpe_offsets)}')
return (masked_position, bpe_tokens, bpe_offsets)
|
def merge_sorted_results(objects_left, scores_left, objects_right, scores_right, max_elems):
result_objects = []
result_scores = []
j = 0
i = 0
while True:
if (len(result_scores) == max_elems):
break
if (i == len(scores_left)):
result_objects += objects_right[j:((j + max_elems) - len(result_scores))]
result_scores += scores_right[j:((j + max_elems) - len(result_scores))]
break
if (j == len(scores_right)):
result_objects += objects_left[i:((i + max_elems) - len(result_scores))]
result_scores += scores_left[i:((i + max_elems) - len(result_scores))]
break
if (scores_left[i] > scores_right[j]):
result_objects.append(objects_left[i])
result_scores.append(scores_left[i])
i += 1
else:
result_objects.append(objects_right[j])
result_scores.append(scores_right[j])
j += 1
return (result_objects, result_scores)
|
class MaskedTokenPredictorBert():
def __init__(self, model, bpe_tokenizer, max_len=250, mask_in_multiunit=False, device=None, label=0, logits_postprocessor=None, contrast_penalty=0):
self._model = model
self._bpe_tokenizer = bpe_tokenizer
self._max_len = max_len
self._mask_in_multiunit = mask_in_multiunit
self.device = (device or torch.device('cuda'))
self.label = label
self.logits_postprocessor = logits_postprocessor
self.contrast_penalty = contrast_penalty
def __call__(self, sentences, masked_position, **kwargs):
if (type(masked_position) is not list):
bpe_tokens = [bpe_tokens]
masked_position = [masked_position]
b_masked_pos = []
b_bpe_tokens = []
for (sent, mask_pos) in zip(sentences, masked_position):
(bpe_tokens, bpe_offsets) = bpe_tokenize(self._bpe_tokenizer, sent)
masked_position = find_bpe_position_by_offset([bpe_offsets], (sent[mask_pos].begin, sent[mask_pos].end))
(masked_position, bpe_tokens, _) = remove_masked_token_subwords(masked_position, [bpe_tokens], [bpe_offsets])
bpe_tokens = bpe_tokens[0]
logger.debug(f'Bpe tokens: {bpe_tokens}')
b_bpe_tokens.append(bpe_tokens)
b_masked_pos.append(masked_position[1])
return self.generate(b_bpe_tokens, b_masked_pos, **kwargs)
def generate(self, b_bpe_tokens, b_masked_pos, mask_token=True, n_top=5, n_units=1, n_tokens=[1], fix_multiunit=True, beam_size=10, multiunit_lookup=100, max_multiunit=10, **kwargs):
result_preds = [[] for _ in range(len(b_bpe_tokens))]
result_scores = [[] for _ in range(len(b_bpe_tokens))]
if (1 in n_tokens):
(result_preds, result_scores) = self.predict_single_word(b_bpe_tokens, b_masked_pos, mask_token=mask_token, n_top=n_top, n_units=n_units, multiunit_lookup=multiunit_lookup, fix_multiunit=fix_multiunit, max_multiunit=max_multiunit)
for n_t in n_tokens:
if (n_t == 1):
continue
(pred_tokens, pred_scores) = self.predict_token_sequence(b_bpe_tokens, b_masked_pos, mask_token=mask_token, n_top=n_top, n_units=n_units, seq_len=n_t, multiunit_lookup=multiunit_lookup, fix_multiunit=fix_multiunit, beam_size=beam_size, max_multiunit=max_multiunit)
for i in range(len(b_bpe_tokens)):
(result_preds[i], result_scores[i]) = merge_sorted_results(result_preds[i], result_scores[i], pred_tokens[i], pred_scores[i], n_top)
return (result_preds, result_scores)
def predict_single_unit(self, bpe_tokens, masked_position, mask_token, n_top):
bpe_tokens = copy.deepcopy(bpe_tokens)
max_len = min([(max((len(e) for e in bpe_tokens)) + 2), self._max_len])
token_ids = []
for i in range(len(bpe_tokens)):
bpe_tokens[i] = bpe_tokens[i][:(max_len - 2)]
if mask_token:
bpe_tokens[i][masked_position[i]] = '[MASK]'
bpe_tokens[i] = ((['[CLS]'] + bpe_tokens[i]) + ['[SEP]'])
logger.debug(f'Masked BPE tokens: {bpe_tokens[i]}')
token_ids.append(self._bpe_tokenizer.convert_tokens_to_ids(bpe_tokens[i]))
token_ids = pad_sequences(token_ids, maxlen=max_len, dtype='long', truncating='post', padding='post')
attention_masks_tensor = torch.tensor((token_ids > 0)).long().to(self.device)
tokens_tensor = torch.tensor(token_ids).to(self.device)
segments_ids = (np.ones_like(token_ids, dtype=int) * self.label)
segments_tensor = torch.tensor(segments_ids).to(self.device)
self._model.eval()
with torch.no_grad():
target_sent = self._model(tokens_tensor, segments_tensor, attention_masks_tensor)[0]
if self.contrast_penalty:
with torch.no_grad():
another = self._model(tokens_tensor, (1 - segments_tensor), attention_masks_tensor)[0]
diff = (torch.softmax(target_sent, (- 1)) - (self.contrast_penalty * torch.softmax(another, (- 1))))
target_sent = torch.log(torch.clamp(diff, 1e-20))
target_sent = target_sent.detach().cpu().numpy()
final_top_scores = []
final_top_tokens = []
for i in range(target_sent.shape[0]):
row = target_sent[i]
idx = masked_position[i]
logits = row[(idx + 1)]
logits = self.adjust_logits(logits)
top_ids = nlargest_indexes(logits, n_top)
top_scores = [target_sent[i][(masked_position[i] + 1)][j] for j in top_ids]
top_tokens = self._bpe_tokenizer.convert_ids_to_tokens(top_ids)
final_top_scores.append(top_scores)
final_top_tokens.append(top_tokens)
return (final_top_tokens, final_top_scores)
def adjust_logits(self, logits):
if self.logits_postprocessor:
return self.logits_postprocessor(logits)
return logits
def predict_single_word(self, bpe_tokens, masked_position, mask_token, n_top, n_units, fix_multiunit, multiunit_lookup, max_multiunit):
(pred_tokens, scores) = self.predict_single_unit(bpe_tokens, masked_position, mask_token=mask_token, n_top=n_top)
final_pred_tokens = []
final_scores = []
for j in range(len(pred_tokens)):
if (n_units > 1):
pred_tokens[j] = list(reversed(pred_tokens[j][:multiunit_lookup]))
scores[j] = list(reversed(scores[j][:multiunit_lookup]))
seq_list = self.generate_multiunit_token(masked_position[j], bpe_tokens[j], n_top=multiunit_lookup, n_units=n_units)
for seq in seq_list[:max_multiunit]:
(seq_pred, seq_scores) = seq
multiunit_token = '_'.join(seq_pred)
if fix_multiunit:
multiunit_token = multiunit_token.replace('#', '')
multiunit_token = multiunit_token.replace('_', '')
multiunit_score = scipy.stats.hmean(seq_scores)
ind = bisect.bisect(scores[j], multiunit_score)
pred_tokens[j].insert(ind, multiunit_token)
scores[j].insert(ind, multiunit_score)
pred_tokens[j] = list(reversed(pred_tokens[j]))
scores[j] = list(reversed(scores[j]))
logger.debug(f'Predicted words: {pred_tokens[j]}')
final_pred_tokens.append(pred_tokens[j][:n_top])
final_scores.append(scores[j][:n_top])
return (final_pred_tokens, final_scores)
def generate_multiunit_token(self, masked_position, bpe_tokens, n_top, n_units):
final_result = []
final_result_scores = []
bpe_tokens = copy.deepcopy(bpe_tokens)
bpe_tokens.insert(masked_position, '[MASK]')
(predictions, scores) = self.predict_single_unit([bpe_tokens], [(masked_position + 1)], n_top=n_top, mask_token=self._mask_in_multiunit)
predictions = predictions[0]
scores = scores[0]
good_preds = []
b_bpe_tokens = []
for (i, pred) in (e for e in enumerate(predictions) if (e[1][0] == '#')):
tmp = copy.deepcopy(bpe_tokens)
tmp[(masked_position + 1)] = pred
b_bpe_tokens.append(tmp)
good_preds.append((i, pred))
if (not good_preds):
return []
loader = DataLoader(b_bpe_tokens, batch_size=10, collate_fn=(lambda _: _))
preds = []
pred_scores = []
for batch in loader:
(bb_preds, bb_pred_scores) = self.predict_single_unit(batch, [masked_position for _ in range(len(batch))], mask_token=False, n_top=n_top)
preds += bb_preds
pred_scores += bb_pred_scores
for i in range(len(preds)):
result = [preds[i][0], good_preds[i][1]]
result_scores = [pred_scores[i][0], scores[good_preds[i][0]]]
(tail, tail_scores) = self.generate_from_tail(preds[i][0], b_bpe_tokens[i], masked_position, max_subunits=(n_units - 2), n_top=n_top)
result = (tail + result)
result_scores = (tail_scores + result_scores)
final_result.append(result)
final_result_scores.append(result_scores)
return list(zip(final_result, final_result_scores))
def generate_from_tail(self, pred, bpe_tokens, masked_position, max_subunits, n_top):
result = []
result_scores = []
it = 0
while ((pred[0] == '#') and (it < max_subunits)):
bpe_tokens[masked_position] = pred
bpe_tokens.insert(masked_position, '[MASK]')
(preds, pred_scores) = self.predict_single_unit([bpe_tokens], [masked_position], n_top=n_top, mask_token=False)
pred = preds[0][0]
result.append(pred)
result_scores.append(pred_scores[0][0])
it += 1
return (list(reversed(result)), list(reversed(result_scores)))
def generate_variants(self, bpe_tokens, mask_pos, gen_tokens, gen_scores, seq_len):
batch_size = len(bpe_tokens)
if (not gen_tokens):
(yield (bpe_tokens, ([0.0] * batch_size), [[] for _ in range(batch_size)], mask_pos))
return
for var_num in range(len(gen_tokens[0])):
if (not gen_tokens[0][var_num]):
continue
variant = []
new_mask = []
var_t = []
var_s = []
for i in range(batch_size):
new_bpe = copy.deepcopy(bpe_tokens[i])
for seq_num in range(len(gen_tokens[i][var_num])):
new_bpe[(mask_pos[i] + seq_num)] = gen_tokens[i][var_num][seq_num]
var_t.append(gen_tokens[i][var_num])
var_s.append(gen_scores[i][var_num])
new_mask.append((mask_pos[i] + len(gen_tokens[i][var_num])))
variant.append(new_bpe)
(yield (variant, var_s, var_t, new_mask))
def update_beam(self, prev_tokens, prev_score, new_scores, new_tokens, gen_scores, gen_tokens):
for i in range(len(gen_scores)):
final_gen_score = (prev_score + gen_scores[i])
insert_pos = bisect.bisect(new_scores, final_gen_score)
new_scores.insert(insert_pos, final_gen_score)
del new_scores[0]
new_tokens.insert(insert_pos, (prev_tokens + [gen_tokens[i]]))
if (len(new_tokens) > len(new_scores)):
del new_tokens[0]
def predict_token_sequence(self, bpe_tokens, masked_pos, mask_token, n_top, seq_len, beam_size, n_units, fix_multiunit, multiunit_lookup, max_multiunit):
bpe_tokens = copy.deepcopy(bpe_tokens)
batch_size = len(bpe_tokens)
for i in range(batch_size):
for seq_num in range((seq_len - 1)):
bpe_tokens[i].insert((masked_pos[i] + 1), '[MASK]')
gen_scores = []
gen_tokens = []
for seq_num in range(seq_len):
gen_scores_seq = [[0.0 for __ in range(beam_size)] for _ in range(batch_size)]
gen_tokens_seq = [[[] for __ in range(beam_size)] for _ in range(batch_size)]
for (variant, variant_score, prev_tokens, new_mask) in self.generate_variants(bpe_tokens, masked_pos, gen_tokens, gen_scores, seq_len=seq_len):
(top_tokens, top_scores) = self.predict_single_word(variant, new_mask, mask_token=True, n_top=n_top, n_units=n_units, fix_multiunit=fix_multiunit, multiunit_lookup=multiunit_lookup, max_multiunit=max_multiunit)
for i in range(batch_size):
self.update_beam(prev_tokens[i], variant_score[i], gen_scores_seq[i], gen_tokens_seq[i], top_scores[i], top_tokens[i])
gen_tokens = gen_tokens_seq
gen_scores = gen_scores_seq
gen_scores = [[(e / seq_len) for e in l] for l in gen_scores]
return ([list(reversed(e)) for e in gen_tokens], [list(reversed(e)) for e in gen_scores])
|
def get_masked_tokens_from_tagged_text(tagged_text):
chunks = tagged_text.split('__')
masks = []
curr_offset = 0
clean_text = ''
for (chunk_num, chunk) in enumerate(chunks):
if ((chunk_num % 2) == 1):
masks.append((curr_offset, (curr_offset + len(chunk))))
curr_offset += len(chunk)
clean_text += chunk
return (masks, clean_text)
|
def preprocess_tagged_text(t_text, ppl):
(masked_tokens, clean_text) = get_masked_tokens_from_tagged_text(t_text)
logger.debug(f'Clean text: {clean_text}')
lng_ann = ppl(clean_text)
sentences = [CSentence(lng_ann['tokens'], sent) for sent in lng_ann['sentences']]
if (masked_tokens == []):
masked_pos = (0, [0])
else:
masked_pos = find_bpe_position_by_offset([[(word.begin, word.end) for word in sent] for sent in sentences], masked_tokens[0])
return (masked_pos[1][0], sentences[masked_pos[0]])
|
def process_batch(b_text, predictor, ppl, *args, **kwargs):
l_masked_position = []
l_bpe_tokens = []
l_masked_tokens = []
for j in range(len(b_text)):
(masked_position, tokens) = preprocess_tagged_text(b_text[j], ppl)
l_masked_position.append(masked_position)
l_bpe_tokens.append(tokens)
l_masked_tokens.append(tokens[masked_position])
logger.info(f'Masked token: {tokens[masked_position]}')
(pred_tokens, scores) = predictor(l_bpe_tokens, l_masked_position, *args, **kwargs)
return (pred_tokens, scores, l_masked_tokens)
|
def analyze_tagged_text(tagged_text, predictor, ppl, batch_size=10, progress_bar=None, n_units=0, n_top=5, fix_multiunit=True, mask_token=True, n_tokens=[1], max_multiunit=10, multiunit_lookup=100, contexts=None):
'\n - tagged_text (str): a text with a masked tokens highlighted as "__something__" .\n - predictor (object): a predictor object, look in masked_token_predictor_bert.py\n - batch_size (int): a number of examples in an inference batch of the predictor model (it affects speed \n of generation of single words and multi word phrases)\n - progress_bar(tqdm): use standard tqdm or tqdm_notebook. To eliminate the progress bar, keep it None.\n - n_units(int): number of units to be predicted at maximum during a word generation.\n - n_top(int): maximum number of predictions at all.\n - fix_multiunit(bool): if True the multiunit tokens will look as a regular token without ##. If False you can see\n where the multiunit tokens were predicted.\n - mask_token(bool): tells wheather predictor should mask a token or keep it during the inference (only for single unit tokens).\n - n_tokens(List): a list with phrases lengths. Examples: If you want to predict only single words, use [1]. If you wnat additionally two word predictions,\n use [1, 2]. If you want only 2 word predictions use [2], etc.\n '
if (type(tagged_text) is not list):
tagged_text = [tagged_text]
final_pred_tokens = []
final_scores = []
final_masked_tokens = []
progress_bar = ((lambda a: a) if (progress_bar is None) else progress_bar)
for i in progress_bar(range(0, len(tagged_text), batch_size)):
b_text = tagged_text[i:(i + batch_size)]
if contexts:
b_contexts = contexts[i:(i + batch_size)]
else:
b_contexts = None
(b_pred_tokens, b_scores, b_masked_tokens) = process_batch(b_text, predictor, ppl, n_units=n_units, n_top=n_top, fix_multiunit=fix_multiunit, mask_token=mask_token, n_tokens=n_tokens, multiunit_lookup=multiunit_lookup, max_multiunit=max_multiunit, Cs=b_contexts)
final_pred_tokens += b_pred_tokens
final_scores += b_scores
final_masked_tokens += b_masked_tokens
if ((i % 5) == 0):
gc.collect()
torch.cuda.empty_cache()
if (len(final_pred_tokens) == 1):
final_pred_tokens = final_pred_tokens[0]
final_scores = final_scores[0]
final_masked_tokens = final_masked_tokens[0]
return (final_pred_tokens, final_scores, final_masked_tokens)
|
def find_bpe_position_by_offset(bpe_offsets, target_offset):
bpe_nums = []
for (sent_num, sent) in enumerate(bpe_offsets):
if (sent[(- 1)][0] < target_offset[0]):
continue
for (bpe_num, bpe) in enumerate(sent):
if ((target_offset[0] <= bpe[0]) and (bpe[1] <= target_offset[1])):
bpe_nums.append(bpe_num)
return (sent_num, bpe_nums)
|
def generate_seq_indexes(indexes):
if (not indexes):
(yield [])
return
for ind in indexes[0]:
for seq in generate_seq_indexes(indexes[1:]):
(yield ([ind] + seq))
|
def bpe_tokenize(bpe_tokenizer, sentence):
sent_bpe_tokens = []
sent_bpe_offsets = []
for token in sentence:
token_bpes = bpe_tokenizer.tokenize(token.text)
sent_bpe_offsets += [(token.begin, token.end) for _ in range(len(token_bpes))]
sent_bpe_tokens += token_bpes
return (sent_bpe_tokens, sent_bpe_offsets)
|
def nlargest_indexes(arr, n_top):
arr_ids = np.argpartition(arr, (- n_top))[(- n_top):]
sel_arr = arr[arr_ids]
top_ids = arr_ids[np.argsort((- sel_arr))]
return top_ids
|
def remove_masked_token_subwords(masked_position, bpe_tokens, bpe_offsets):
'\n If the masked token has been tokenied into multiple subwords: like dieting-->diet and ##ing\n keep the first subword and remove others.\n '
logger.debug(f'bpe tokens: {bpe_tokens}')
logger.debug(f'bpe offsets: {bpe_offsets}')
if (len(masked_position[1]) > 1):
indexes_to_del = masked_position[1][1:]
del bpe_tokens[masked_position[0]][indexes_to_del[0]:(indexes_to_del[(- 1)] + 1)]
del bpe_offsets[masked_position[0]][indexes_to_del[0]:(indexes_to_del[(- 1)] + 1)]
masked_position = (masked_position[0], masked_position[1][0])
logger.debug(f'bpe offsets: {str(bpe_tokens)}')
logger.debug(f'bpe offsets: {str(bpe_offsets)}')
return (masked_position, bpe_tokens, bpe_offsets)
|
def merge_sorted_results(objects_left, scores_left, objects_right, scores_right, max_elems):
result_objects = []
result_scores = []
j = 0
i = 0
while True:
if (len(result_scores) == max_elems):
break
if (i == len(scores_left)):
result_objects += objects_right[j:((j + max_elems) - len(result_scores))]
result_scores += scores_right[j:((j + max_elems) - len(result_scores))]
break
if (j == len(scores_right)):
result_objects += objects_left[i:((i + max_elems) - len(result_scores))]
result_scores += scores_left[i:((i + max_elems) - len(result_scores))]
break
if (scores_left[i] > scores_right[j]):
result_objects.append(objects_left[i])
result_scores.append(scores_left[i])
i += 1
else:
result_objects.append(objects_right[j])
result_scores.append(scores_right[j])
j += 1
return (result_objects, result_scores)
|
class MaskedTokenPredictorBert():
def __init__(self, model, bpe_tokenizer, max_len=250, mask_in_multiunit=False, device=None, label=0, logits_postprocessor=None, contrast_penalty=0):
self._model = model
self._bpe_tokenizer = bpe_tokenizer
self._max_len = max_len
self._mask_in_multiunit = mask_in_multiunit
self.device = (device or torch.device('cuda'))
self.label = label
self.logits_postprocessor = logits_postprocessor
self.contrast_penalty = contrast_penalty
def __call__(self, sentences, masked_position, **kwargs):
if (type(masked_position) is not list):
bpe_tokens = [bpe_tokens]
masked_position = [masked_position]
b_masked_pos = []
b_bpe_tokens = []
for (sent, mask_pos) in zip(sentences, masked_position):
(bpe_tokens, bpe_offsets) = bpe_tokenize(self._bpe_tokenizer, sent)
masked_position = find_bpe_position_by_offset([bpe_offsets], (sent[mask_pos].begin, sent[mask_pos].end))
(masked_position, bpe_tokens, _) = remove_masked_token_subwords(masked_position, [bpe_tokens], [bpe_offsets])
bpe_tokens = bpe_tokens[0]
logger.debug(f'Bpe tokens: {bpe_tokens}')
b_bpe_tokens.append(bpe_tokens)
b_masked_pos.append(masked_position[1])
return self.generate(b_bpe_tokens, b_masked_pos, **kwargs)
def generate(self, b_bpe_tokens, b_masked_pos, mask_token=True, n_top=5, n_units=1, n_tokens=[1], fix_multiunit=True, beam_size=10, multiunit_lookup=100, max_multiunit=10, **kwargs):
result_preds = [[] for _ in range(len(b_bpe_tokens))]
result_scores = [[] for _ in range(len(b_bpe_tokens))]
if (1 in n_tokens):
(result_preds, result_scores) = self.predict_single_word(b_bpe_tokens, b_masked_pos, mask_token=mask_token, n_top=n_top, n_units=n_units, multiunit_lookup=multiunit_lookup, fix_multiunit=fix_multiunit, max_multiunit=max_multiunit)
for n_t in n_tokens:
if (n_t == 1):
continue
(pred_tokens, pred_scores) = self.predict_token_sequence(b_bpe_tokens, b_masked_pos, mask_token=mask_token, n_top=n_top, n_units=n_units, seq_len=n_t, multiunit_lookup=multiunit_lookup, fix_multiunit=fix_multiunit, beam_size=beam_size, max_multiunit=max_multiunit)
for i in range(len(b_bpe_tokens)):
(result_preds[i], result_scores[i]) = merge_sorted_results(result_preds[i], result_scores[i], pred_tokens[i], pred_scores[i], n_top)
return (result_preds, result_scores)
def predict_single_unit(self, bpe_tokens, masked_position, mask_token, n_top):
bpe_tokens = copy.deepcopy(bpe_tokens)
max_len = min([(max((len(e) for e in bpe_tokens)) + 2), self._max_len])
token_ids = []
for i in range(len(bpe_tokens)):
bpe_tokens[i] = bpe_tokens[i][:(max_len - 2)]
if mask_token:
bpe_tokens[i][masked_position[i]] = '[MASK]'
bpe_tokens[i] = ((['[CLS]'] + bpe_tokens[i]) + ['[SEP]'])
logger.debug(f'Masked BPE tokens: {bpe_tokens[i]}')
token_ids.append(self._bpe_tokenizer.convert_tokens_to_ids(bpe_tokens[i]))
token_ids = pad_sequences(token_ids, maxlen=max_len, dtype='long', truncating='post', padding='post')
attention_masks_tensor = torch.tensor((token_ids > 0)).long().to(self.device)
tokens_tensor = torch.tensor(token_ids).to(self.device)
segments_ids = (np.ones_like(token_ids, dtype=int) * self.label)
segments_tensor = torch.tensor(segments_ids).to(self.device)
self._model.eval()
with torch.no_grad():
target_sent = self._model(tokens_tensor, segments_tensor, attention_masks_tensor)[0]
if self.contrast_penalty:
with torch.no_grad():
another = self._model(tokens_tensor, (1 - segments_tensor), attention_masks_tensor)[0]
diff = (torch.softmax(target_sent, (- 1)) - (self.contrast_penalty * torch.softmax(another, (- 1))))
target_sent = torch.log(torch.clamp(diff, 1e-20))
target_sent = target_sent.detach().cpu().numpy()
final_top_scores = []
final_top_tokens = []
for i in range(target_sent.shape[0]):
row = target_sent[i]
idx = masked_position[i]
logits = row[(idx + 1)]
logits = self.adjust_logits(logits)
top_ids = nlargest_indexes(logits, n_top)
top_scores = [target_sent[i][(masked_position[i] + 1)][j] for j in top_ids]
top_tokens = self._bpe_tokenizer.convert_ids_to_tokens(top_ids)
final_top_scores.append(top_scores)
final_top_tokens.append(top_tokens)
return (final_top_tokens, final_top_scores)
def adjust_logits(self, logits):
if self.logits_postprocessor:
return self.logits_postprocessor(logits)
return logits
def predict_single_word(self, bpe_tokens, masked_position, mask_token, n_top, n_units, fix_multiunit, multiunit_lookup, max_multiunit):
(pred_tokens, scores) = self.predict_single_unit(bpe_tokens, masked_position, mask_token=mask_token, n_top=n_top)
final_pred_tokens = []
final_scores = []
for j in range(len(pred_tokens)):
if (n_units > 1):
pred_tokens[j] = list(reversed(pred_tokens[j][:multiunit_lookup]))
scores[j] = list(reversed(scores[j][:multiunit_lookup]))
seq_list = self.generate_multiunit_token(masked_position[j], bpe_tokens[j], n_top=multiunit_lookup, n_units=n_units)
for seq in seq_list[:max_multiunit]:
(seq_pred, seq_scores) = seq
multiunit_token = '_'.join(seq_pred)
if fix_multiunit:
multiunit_token = multiunit_token.replace('#', '')
multiunit_token = multiunit_token.replace('_', '')
multiunit_score = scipy.stats.hmean(seq_scores)
ind = bisect.bisect(scores[j], multiunit_score)
pred_tokens[j].insert(ind, multiunit_token)
scores[j].insert(ind, multiunit_score)
pred_tokens[j] = list(reversed(pred_tokens[j]))
scores[j] = list(reversed(scores[j]))
logger.debug(f'Predicted words: {pred_tokens[j]}')
final_pred_tokens.append(pred_tokens[j][:n_top])
final_scores.append(scores[j][:n_top])
return (final_pred_tokens, final_scores)
def generate_multiunit_token(self, masked_position, bpe_tokens, n_top, n_units):
final_result = []
final_result_scores = []
bpe_tokens = copy.deepcopy(bpe_tokens)
bpe_tokens.insert(masked_position, '[MASK]')
(predictions, scores) = self.predict_single_unit([bpe_tokens], [(masked_position + 1)], n_top=n_top, mask_token=self._mask_in_multiunit)
predictions = predictions[0]
scores = scores[0]
good_preds = []
b_bpe_tokens = []
for (i, pred) in (e for e in enumerate(predictions) if (e[1][0] == '#')):
tmp = copy.deepcopy(bpe_tokens)
tmp[(masked_position + 1)] = pred
b_bpe_tokens.append(tmp)
good_preds.append((i, pred))
if (not good_preds):
return []
loader = DataLoader(b_bpe_tokens, batch_size=10, collate_fn=(lambda _: _))
preds = []
pred_scores = []
for batch in loader:
(bb_preds, bb_pred_scores) = self.predict_single_unit(batch, [masked_position for _ in range(len(batch))], mask_token=False, n_top=n_top)
preds += bb_preds
pred_scores += bb_pred_scores
for i in range(len(preds)):
result = [preds[i][0], good_preds[i][1]]
result_scores = [pred_scores[i][0], scores[good_preds[i][0]]]
(tail, tail_scores) = self.generate_from_tail(preds[i][0], b_bpe_tokens[i], masked_position, max_subunits=(n_units - 2), n_top=n_top)
result = (tail + result)
result_scores = (tail_scores + result_scores)
final_result.append(result)
final_result_scores.append(result_scores)
return list(zip(final_result, final_result_scores))
def generate_from_tail(self, pred, bpe_tokens, masked_position, max_subunits, n_top):
result = []
result_scores = []
it = 0
while ((pred[0] == '#') and (it < max_subunits)):
bpe_tokens[masked_position] = pred
bpe_tokens.insert(masked_position, '[MASK]')
(preds, pred_scores) = self.predict_single_unit([bpe_tokens], [masked_position], n_top=n_top, mask_token=False)
pred = preds[0][0]
result.append(pred)
result_scores.append(pred_scores[0][0])
it += 1
return (list(reversed(result)), list(reversed(result_scores)))
def generate_variants(self, bpe_tokens, mask_pos, gen_tokens, gen_scores, seq_len):
batch_size = len(bpe_tokens)
if (not gen_tokens):
(yield (bpe_tokens, ([0.0] * batch_size), [[] for _ in range(batch_size)], mask_pos))
return
for var_num in range(len(gen_tokens[0])):
if (not gen_tokens[0][var_num]):
continue
variant = []
new_mask = []
var_t = []
var_s = []
for i in range(batch_size):
new_bpe = copy.deepcopy(bpe_tokens[i])
for seq_num in range(len(gen_tokens[i][var_num])):
new_bpe[(mask_pos[i] + seq_num)] = gen_tokens[i][var_num][seq_num]
var_t.append(gen_tokens[i][var_num])
var_s.append(gen_scores[i][var_num])
new_mask.append((mask_pos[i] + len(gen_tokens[i][var_num])))
variant.append(new_bpe)
(yield (variant, var_s, var_t, new_mask))
def update_beam(self, prev_tokens, prev_score, new_scores, new_tokens, gen_scores, gen_tokens):
for i in range(len(gen_scores)):
final_gen_score = (prev_score + gen_scores[i])
insert_pos = bisect.bisect(new_scores, final_gen_score)
new_scores.insert(insert_pos, final_gen_score)
del new_scores[0]
new_tokens.insert(insert_pos, (prev_tokens + [gen_tokens[i]]))
if (len(new_tokens) > len(new_scores)):
del new_tokens[0]
def predict_token_sequence(self, bpe_tokens, masked_pos, mask_token, n_top, seq_len, beam_size, n_units, fix_multiunit, multiunit_lookup, max_multiunit):
bpe_tokens = copy.deepcopy(bpe_tokens)
batch_size = len(bpe_tokens)
for i in range(batch_size):
for seq_num in range((seq_len - 1)):
bpe_tokens[i].insert((masked_pos[i] + 1), '[MASK]')
gen_scores = []
gen_tokens = []
for seq_num in range(seq_len):
gen_scores_seq = [[0.0 for __ in range(beam_size)] for _ in range(batch_size)]
gen_tokens_seq = [[[] for __ in range(beam_size)] for _ in range(batch_size)]
for (variant, variant_score, prev_tokens, new_mask) in self.generate_variants(bpe_tokens, masked_pos, gen_tokens, gen_scores, seq_len=seq_len):
(top_tokens, top_scores) = self.predict_single_word(variant, new_mask, mask_token=True, n_top=n_top, n_units=n_units, fix_multiunit=fix_multiunit, multiunit_lookup=multiunit_lookup, max_multiunit=max_multiunit)
for i in range(batch_size):
self.update_beam(prev_tokens[i], variant_score[i], gen_scores_seq[i], gen_tokens_seq[i], top_scores[i], top_tokens[i])
gen_tokens = gen_tokens_seq
gen_scores = gen_scores_seq
gen_scores = [[(e / seq_len) for e in l] for l in gen_scores]
return ([list(reversed(e)) for e in gen_tokens], [list(reversed(e)) for e in gen_scores])
|
def add_sys_path(p):
p = os.path.abspath(p)
print(p)
if (p not in sys.path):
sys.path.append(p)
|
def create_parsers():
return PipelineCommon([(ProcessorTokenizerNltkEn(), ['text'], {0: 'tokens'}), (ProcessorSentenceSplitter(), ['tokens'], {0: 'sentences'})])
return ppl
|
def embed(text):
toks = glove_embedding.embed(Sentence(text))[0]
return np.mean([t.embedding.cpu().numpy() for t in toks], axis=0)
|
def cosine(v1, v2):
return (np.dot(v1, v2) / np.sqrt(((sum((v1 ** 2)) * sum((v2 ** 2))) + 1e-10)))
|
def group_by_first_token(texts):
seqs = [tokenizer.encode(x, add_special_tokens=False) for x in texts]
grouped = defaultdict(list)
for seq in seqs:
grouped[seq[0]].append(seq)
return grouped
|
def get_mask_fast(inp: str, bad_words=neg_complex_tokens, min_bad_score=0, aggressive=True):
sentences = [tokenizer.encode(inp, add_special_tokens=True)]
sentences_torch = torch.tensor(sentences)
masks = torch.zeros_like(sentences_torch)
for (sent_id, sent) in enumerate(sentences):
for (first_tok_id, tok) in enumerate(sent):
for hypothesis in bad_words.get(tok, []):
if (sent[first_tok_id:(first_tok_id + len(hypothesis))] == hypothesis):
for step in range(len(hypothesis)):
masks[(sent_id, (first_tok_id + step))] = 1
if ((sum(masks[sent_id].numpy()) == 0) or aggressive):
scored_words = []
for (indices, word) in toks_to_words(sent):
score = word2coef.get(word)
if score:
scored_words.append([indices, word, score])
if scored_words:
max_score = max((s[2] for s in scored_words))
if (max_score > min_bad_score):
for (indices, word, score) in scored_words:
if (score >= max(min_bad_score, (max_score * 0.5))):
masks[(sent_id, indices)] = 1
return (sentences_torch, masks)
|
def toks_to_words(token_ids):
' Merge subword tokens into whole words '
indices = []
for (i, token_id) in enumerate(token_ids):
token_text = v[token_id]
if token_text.startswith('##'):
indices.append(i)
else:
if indices:
toks = [v[token_ids[t]] for t in indices]
word = ''.join(([toks[0]] + [t[2:] for t in toks[1:]]))
(yield (indices, word))
indices = [i]
|
def convert_mask(tok_ids, mask_ids, tokenizer, duplicate=False):
toks_tmp = [tokenizer.convert_ids_to_tokens(tok_ids[0])[1:(- 1)]]
mask_pos = None
toks = []
mask_toks = []
has_mask = False
for (i, is_masked) in enumerate(mask_ids[0][1:(- 1)]):
if is_masked:
mask_toks.append(toks_tmp[0][i])
if (not has_mask):
if is_masked:
has_mask = True
mask_pos = [i]
toks.append(toks_tmp[0][i])
elif (not is_masked):
toks.extend(toks_tmp[0][i:])
break
toks = [toks]
if duplicate:
toks = [((toks_tmp[0] + ['[SEP]']) + toks[0])]
mask_pos[0] += (len(toks_tmp[0]) + 1)
return (toks, mask_pos, mask_toks)
|
def get_hypotheses(text, top=10, duplicate=False, mask_token=False, reorder=True, sim_coef=30):
tokenizer = predictor._bpe_tokenizer
(tok_ids, mask_ids) = get_mask_fast(text)
(toks, mask_pos, mask_toks) = convert_mask(tok_ids, mask_ids, tokenizer=tokenizer, duplicate=duplicate)
mask_text = tokenizer.convert_tokens_to_string(mask_toks)
(texts, scores) = predictor.generate(toks, mask_pos, n_top=top, n_units=1, n_tokens=[1, 2, 3], max_multiunit=50, fix_multiunit=False, mask_token=mask_token, multiunit_lookup=200)
texts = texts[0]
scores = scores[0]
e = embed(mask_text)
candidates = [(fill_words, score, cosine(e, embed(' '.join(fill_words)))) for (fill_words, score) in zip(texts, scores)]
if reorder:
candidates = sorted(candidates, key=(lambda x: (x[1] + (x[2] * sim_coef))), reverse=True)
return candidates
|
def get_masked_tokens_from_tagged_text(tagged_text):
chunks = tagged_text.split('__')
masks = []
curr_offset = 0
clean_text = ''
for (chunk_num, chunk) in enumerate(chunks):
if ((chunk_num % 2) == 1):
masks.append((curr_offset, (curr_offset + len(chunk))))
curr_offset += len(chunk)
clean_text += chunk
return (masks, clean_text)
|
def preprocess_tagged_text(t_text, ppl):
(masked_tokens, clean_text) = get_masked_tokens_from_tagged_text(t_text)
logger.debug(f'Clean text: {clean_text}')
lng_ann = ppl(clean_text)
sentences = [CSentence(lng_ann['tokens'], sent) for sent in lng_ann['sentences']]
if (masked_tokens == []):
masked_pos = (0, [0])
else:
masked_pos = find_bpe_position_by_offset([[(word.begin, word.end) for word in sent] for sent in sentences], masked_tokens[0])
return (masked_pos[1][0], sentences[masked_pos[0]])
|
def process_batch(b_text, predictor, ppl, *args, **kwargs):
l_masked_position = []
l_bpe_tokens = []
l_masked_tokens = []
for j in range(len(b_text)):
(masked_position, tokens) = preprocess_tagged_text(b_text[j], ppl)
l_masked_position.append(masked_position)
l_bpe_tokens.append(tokens)
l_masked_tokens.append(tokens[masked_position])
logger.info(f'Masked token: {tokens[masked_position]}')
(pred_tokens, scores) = predictor(l_bpe_tokens, l_masked_position, *args, **kwargs)
return (pred_tokens, scores, l_masked_tokens)
|
def analyze_tagged_text(tagged_text, predictor, ppl, batch_size=10, progress_bar=None, n_units=0, n_top=5, fix_multiunit=True, mask_token=True, n_tokens=[1], max_multiunit=10, multiunit_lookup=100, contexts=None):
'\n - tagged_text (str): a text with a masked tokens highlighted as "__something__" .\n - predictor (object): a predictor object, look in masked_token_predictor_bert.py\n - batch_size (int): a number of examples in an inference batch of the predictor model (it affects speed \n of generation of single words and multi word phrases)\n - progress_bar(tqdm): use standard tqdm or tqdm_notebook. To eliminate the progress bar, keep it None.\n - n_units(int): number of units to be predicted at maximum during a word generation.\n - n_top(int): maximum number of predictions at all.\n - fix_multiunit(bool): if True the multiunit tokens will look as a regular token without ##. If False you can see\n where the multiunit tokens were predicted.\n - mask_token(bool): tells wheather predictor should mask a token or keep it during the inference (only for single unit tokens).\n - n_tokens(List): a list with phrases lengths. Examples: If you want to predict only single words, use [1]. If you wnat additionally two word predictions,\n use [1, 2]. If you want only 2 word predictions use [2], etc.\n '
if (type(tagged_text) is not list):
tagged_text = [tagged_text]
final_pred_tokens = []
final_scores = []
final_masked_tokens = []
progress_bar = ((lambda a: a) if (progress_bar is None) else progress_bar)
for i in progress_bar(range(0, len(tagged_text), batch_size)):
b_text = tagged_text[i:(i + batch_size)]
if contexts:
b_contexts = contexts[i:(i + batch_size)]
else:
b_contexts = None
(b_pred_tokens, b_scores, b_masked_tokens) = process_batch(b_text, predictor, ppl, n_units=n_units, n_top=n_top, fix_multiunit=fix_multiunit, mask_token=mask_token, n_tokens=n_tokens, multiunit_lookup=multiunit_lookup, max_multiunit=max_multiunit, Cs=b_contexts)
final_pred_tokens += b_pred_tokens
final_scores += b_scores
final_masked_tokens += b_masked_tokens
if ((i % 5) == 0):
gc.collect()
torch.cuda.empty_cache()
if (len(final_pred_tokens) == 1):
final_pred_tokens = final_pred_tokens[0]
final_scores = final_scores[0]
final_masked_tokens = final_masked_tokens[0]
return (final_pred_tokens, final_scores, final_masked_tokens)
|
def find_bpe_position_by_offset(bpe_offsets, target_offset):
bpe_nums = []
for (sent_num, sent) in enumerate(bpe_offsets):
if (sent[(- 1)][0] < target_offset[0]):
continue
for (bpe_num, bpe) in enumerate(sent):
if ((target_offset[0] <= bpe[0]) and (bpe[1] <= target_offset[1])):
bpe_nums.append(bpe_num)
return (sent_num, bpe_nums)
|
def generate_seq_indexes(indexes):
if (not indexes):
(yield [])
return
for ind in indexes[0]:
for seq in generate_seq_indexes(indexes[1:]):
(yield ([ind] + seq))
|
class Args():
def __init__(self):
self.model_type = 'gpt2'
self.model_name_or_path = 'sberbank-ai/rugpt3large_based_on_gpt2'
self.prompt = ''
self.length = 50
self.stop_token = '</s>'
self.k = 5
self.p = 0.95
self.temperature = 1
self.repetition_penalty = 1
self.num_return_sequences = 1
self.seed = 42
|
class detoxGPT():
def __init__(self, device='cuda', model_path='rugpt3_large_200'):
self.args = Args()
self.args.device = device
self.args.model_name_or_path = model_path
if (not os.path.isdir(self.args.model_name_or_path)):
print('Loading fine-tuned weights.')
os.system(('gdown --id 1RYUku5_MWXZF2xlIpOTZmi9_DH-SG0lz && mkdir rugpt3_large_200 && unzip rugpt3_large_200.zip -d ' + self.args.model_name_or_path))
(model_class, tokenizer_class) = MODEL_CLASSES[self.args.model_type]
self.tokenizer = tokenizer_class.from_pretrained(self.args.model_name_or_path)
self.model = model_class.from_pretrained(self.args.model_name_or_path)
self.model.to(self.args.device)
def generate_sequences(self, prompt_text, delimiter='>>>'):
self.args.prompt_text = prompt_text
if prompt_text.endswith('.txt'):
with open(prompt_text, 'r') as f:
prompt_text = f.read()
encoded_prompt = self.tokenizer.encode(prompt_text, add_special_tokens=False, return_tensors='pt')
encoded_prompt = encoded_prompt.to(self.args.device)
output_sequences = self.model.generate(input_ids=encoded_prompt, max_length=(self.args.length + len(encoded_prompt[0])), temperature=self.args.temperature, top_k=self.args.k, top_p=self.args.p, repetition_penalty=self.args.repetition_penalty, do_sample=True, num_return_sequences=self.args.num_return_sequences)
if (len(output_sequences.shape) > 2):
output_sequences.squeeze_()
generated_sequences = []
for (generated_sequence_idx, generated_sequence) in enumerate(output_sequences):
text = self.tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
text = text[:(text.find(self.args.stop_token) if self.args.stop_token else None)]
text = text[len(self.tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)):]
if (delimiter in text):
text = text.split(delimiter)[0].rstrip()
else:
text = text.split('\n')[0].rstrip()
generated_sequences.append(text)
return generated_sequences
def detoxify(self, text, num_return_sequences=10, k=3, p=0.5, temperature=10.0):
results = []
self.args.num_return_sequences = num_return_sequences
self.args.k = k
self.args.p = p
self.args.temperature = temperature
self.args.length = (len(text) + 10)
generated_sequences = self.generate_sequences((text + ' >>> '))
results.append([re.sub('<pad>', '', x) for x in generated_sequences])
return results[0][0][:self.args.length]
|
def get_pooling_types_dict():
'Get dictionary mapping pooling type number to type name\n '
desc = caffe_pb2.PoolingParameter.PoolMethod.DESCRIPTOR
d = {}
for (k, v) in desc.values_by_name.items():
d[v.number] = k
return d
|
def get_edge_label(layer):
'Define edge label based on layer type.\n '
if (layer.type == 'Data'):
edge_label = ('Batch ' + str(layer.data_param.batch_size))
elif ((layer.type == 'Convolution') or (layer.type == 'Deconvolution')):
edge_label = str(layer.convolution_param.num_output)
elif (layer.type == 'InnerProduct'):
edge_label = str(layer.inner_product_param.num_output)
else:
edge_label = '""'
return edge_label
|
def get_layer_label(layer, rankdir):
"Define node label based on layer type.\n\n Parameters\n ----------\n layer : ?\n rankdir : {'LR', 'TB', 'BT'}\n Direction of graph layout.\n\n Returns\n -------\n string :\n A label for the current layer\n "
if (rankdir in ('TB', 'BT')):
separator = ' '
else:
separator = '\\n'
if ((layer.type == 'Convolution') or (layer.type == 'Deconvolution')):
node_label = ('"%s%s(%s)%skernel size: %d%sstride: %d%spad: %d"' % (layer.name, separator, layer.type, separator, layer.convolution_param.kernel_size, separator, layer.convolution_param.stride, separator, layer.convolution_param.pad))
elif (layer.type == 'Pooling'):
pooling_types_dict = get_pooling_types_dict()
node_label = ('"%s%s(%s %s)%skernel size: %d%sstride: %d%spad: %d"' % (layer.name, separator, pooling_types_dict[layer.pooling_param.pool], layer.type, separator, layer.pooling_param.kernel_size, separator, layer.pooling_param.stride, separator, layer.pooling_param.pad))
else:
node_label = ('"%s%s(%s)"' % (layer.name, separator, layer.type))
return node_label
|
def choose_color_by_layertype(layertype):
'Define colors for nodes based on the layer type.\n '
color = '#6495ED'
if ((layertype == 'Convolution') or (layertype == 'Deconvolution')):
color = '#FF5050'
elif (layertype == 'Pooling'):
color = '#FF9900'
elif (layertype == 'InnerProduct'):
color = '#CC33FF'
return color
|
def get_pydot_graph(caffe_net, rankdir, label_edges=True):
"Create a data structure which represents the `caffe_net`.\n\n Parameters\n ----------\n caffe_net : object\n rankdir : {'LR', 'TB', 'BT'}\n Direction of graph layout.\n label_edges : boolean, optional\n Label the edges (default is True).\n\n Returns\n -------\n pydot graph object\n "
pydot_graph = pydot.Dot(caffe_net.name, graph_type='digraph', rankdir=rankdir)
pydot_nodes = {}
pydot_edges = []
for layer in caffe_net.layer:
node_label = get_layer_label(layer, rankdir)
node_name = ('%s_%s' % (layer.name, layer.type))
if ((len(layer.bottom) == 1) and (len(layer.top) == 1) and (layer.bottom[0] == layer.top[0])):
pydot_nodes[node_name] = pydot.Node(node_label, **NEURON_LAYER_STYLE)
else:
layer_style = LAYER_STYLE_DEFAULT
layer_style['fillcolor'] = choose_color_by_layertype(layer.type)
pydot_nodes[node_name] = pydot.Node(node_label, **layer_style)
for bottom_blob in layer.bottom:
pydot_nodes[(bottom_blob + '_blob')] = pydot.Node(('%s' % bottom_blob), **BLOB_STYLE)
edge_label = '""'
pydot_edges.append({'src': (bottom_blob + '_blob'), 'dst': node_name, 'label': edge_label})
for top_blob in layer.top:
pydot_nodes[(top_blob + '_blob')] = pydot.Node(('%s' % top_blob))
if label_edges:
edge_label = get_edge_label(layer)
else:
edge_label = '""'
pydot_edges.append({'src': node_name, 'dst': (top_blob + '_blob'), 'label': edge_label})
for node in pydot_nodes.values():
pydot_graph.add_node(node)
for edge in pydot_edges:
pydot_graph.add_edge(pydot.Edge(pydot_nodes[edge['src']], pydot_nodes[edge['dst']], label=edge['label']))
return pydot_graph
|
def draw_net(caffe_net, rankdir, ext='png'):
"Draws a caffe net and returns the image string encoded using the given\n extension.\n\n Parameters\n ----------\n caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer.\n ext : string, optional\n The image extension (the default is 'png').\n\n Returns\n -------\n string :\n Postscript representation of the graph.\n "
return get_pydot_graph(caffe_net, rankdir).create(format=ext)
|
def draw_net_to_file(caffe_net, filename, rankdir='LR'):
"Draws a caffe net, and saves it to file using the format given as the\n file extension. Use '.raw' to output raw text that you can manually feed\n to graphviz to draw graphs.\n\n Parameters\n ----------\n caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer.\n filename : string\n The path to a file where the networks visualization will be stored.\n rankdir : {'LR', 'TB', 'BT'}\n Direction of graph layout.\n "
ext = filename[(filename.rfind('.') + 1):]
with open(filename, 'wb') as fid:
fid.write(draw_net(caffe_net, rankdir, ext))
|
def param_name_dict():
'Find out the correspondence between layer names and parameter names.'
layer = caffe_pb2.LayerParameter()
param_names = [s for s in dir(layer) if s.endswith('_param')]
param_type_names = [type(getattr(layer, s)).__name__ for s in param_names]
param_names = [s[:(- len('_param'))] for s in param_names]
param_type_names = [s[:(- len('Parameter'))] for s in param_type_names]
return dict(zip(param_type_names, param_names))
|
def to_proto(*tops):
'Generate a NetParameter that contains all layers needed to compute\n all arguments.'
layers = OrderedDict()
autonames = Counter()
for top in tops:
top.fn._to_proto(layers, {}, autonames)
net = caffe_pb2.NetParameter()
net.layer.extend(layers.values())
return net
|
def assign_proto(proto, name, val):
'Assign a Python object to a protobuf message, based on the Python\n type (in recursive fashion). Lists become repeated fields/messages, dicts\n become messages, and other types are assigned directly.'
if isinstance(val, list):
if isinstance(val[0], dict):
for item in val:
proto_item = getattr(proto, name).add()
for (k, v) in six.iteritems(item):
assign_proto(proto_item, k, v)
else:
getattr(proto, name).extend(val)
elif isinstance(val, dict):
for (k, v) in six.iteritems(val):
assign_proto(getattr(proto, name), k, v)
else:
setattr(proto, name, val)
|
class Top(object):
'A Top specifies a single output blob (which could be one of several\n produced by a layer.)'
def __init__(self, fn, n):
self.fn = fn
self.n = n
def to_proto(self):
'Generate a NetParameter that contains all layers needed to compute\n this top.'
return to_proto(self)
def _to_proto(self, layers, names, autonames):
return self.fn._to_proto(layers, names, autonames)
|
class Function(object):
'A Function specifies a layer, its parameters, and its inputs (which\n are Tops from other layers).'
def __init__(self, type_name, inputs, params):
self.type_name = type_name
self.inputs = inputs
self.params = params
self.ntop = self.params.get('ntop', 1)
if ('ntop' in self.params):
del self.params['ntop']
self.in_place = self.params.get('in_place', False)
if ('in_place' in self.params):
del self.params['in_place']
self.tops = tuple((Top(self, n) for n in range(self.ntop)))
def _get_name(self, names, autonames):
if ((self not in names) and (self.ntop > 0)):
names[self] = self._get_top_name(self.tops[0], names, autonames)
elif (self not in names):
autonames[self.type_name] += 1
names[self] = (self.type_name + str(autonames[self.type_name]))
return names[self]
def _get_top_name(self, top, names, autonames):
if (top not in names):
autonames[top.fn.type_name] += 1
names[top] = (top.fn.type_name + str(autonames[top.fn.type_name]))
return names[top]
def _to_proto(self, layers, names, autonames):
if (self in layers):
return
bottom_names = []
for inp in self.inputs:
inp._to_proto(layers, names, autonames)
bottom_names.append(layers[inp.fn].top[inp.n])
layer = caffe_pb2.LayerParameter()
layer.type = self.type_name
layer.bottom.extend(bottom_names)
if self.in_place:
layer.top.extend(layer.bottom)
else:
for top in self.tops:
layer.top.append(self._get_top_name(top, names, autonames))
layer.name = self._get_name(names, autonames)
for (k, v) in six.iteritems(self.params):
if k.endswith('param'):
assign_proto(layer, k, v)
else:
try:
assign_proto(getattr(layer, (_param_names[self.type_name] + '_param')), k, v)
except (AttributeError, KeyError):
assign_proto(layer, k, v)
layers[self] = layer
|
class NetSpec(object):
'A NetSpec contains a set of Tops (assigned directly as attributes).\n Calling NetSpec.to_proto generates a NetParameter containing all of the\n layers needed to produce all of the assigned Tops, using the assigned\n names.'
def __init__(self):
super(NetSpec, self).__setattr__('tops', OrderedDict())
def __setattr__(self, name, value):
self.tops[name] = value
def __getattr__(self, name):
return self.tops[name]
def to_proto(self):
names = {v: k for (k, v) in six.iteritems(self.tops)}
autonames = Counter()
layers = OrderedDict()
for (name, top) in six.iteritems(self.tops):
top._to_proto(layers, names, autonames)
net = caffe_pb2.NetParameter()
net.layer.extend(layers.values())
return net
|
class Layers(object):
'A Layers object is a pseudo-module which generates functions that specify\n layers; e.g., Layers().Convolution(bottom, kernel_size=3) will produce a Top\n specifying a 3x3 convolution applied to bottom.'
def __getattr__(self, name):
def layer_fn(*args, **kwargs):
fn = Function(name, args, kwargs)
if (fn.ntop == 0):
return fn
elif (fn.ntop == 1):
return fn.tops[0]
else:
return fn.tops
return layer_fn
|
class Parameters(object):
'A Parameters object is a pseudo-module which generates constants used\n in layer parameters; e.g., Parameters().Pooling.MAX is the value used\n to specify max pooling.'
def __getattr__(self, name):
class Param():
def __getattr__(self, param_name):
return getattr(getattr(caffe_pb2, (name + 'Parameter')), param_name)
return Param()
|
class TestLayerTypeList(unittest.TestCase):
def test_standard_types(self):
for type_name in ['Data', 'Convolution', 'InnerProduct']:
self.assertIn(type_name, caffe.layer_type_list(), ('%s not in layer_type_list()' % type_name))
|
def simple_net_file(num_output):
'Make a simple net prototxt, based on test_net.cpp, returning the name\n of the (temporary) file.'
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f.write((("name: 'testnet' force_backward: true\n layer { type: 'DummyData' name: 'data' top: 'data' top: 'label'\n dummy_data_param { num: 5 channels: 2 height: 3 width: 4\n num: 5 channels: 1 height: 1 width: 1\n data_filler { type: 'gaussian' std: 1 }\n data_filler { type: 'constant' } } }\n layer { type: 'Convolution' name: 'conv' bottom: 'data' top: 'conv'\n convolution_param { num_output: 11 kernel_size: 2 pad: 3\n weight_filler { type: 'gaussian' std: 1 }\n bias_filler { type: 'constant' value: 2 } }\n param { decay_mult: 1 } param { decay_mult: 0 }\n }\n layer { type: 'InnerProduct' name: 'ip' bottom: 'conv' top: 'ip'\n inner_product_param { num_output: " + str(num_output)) + "\n weight_filler { type: 'gaussian' std: 2.5 }\n bias_filler { type: 'constant' value: -3 } } }\n layer { type: 'SoftmaxWithLoss' name: 'loss' bottom: 'ip' bottom: 'label'\n top: 'loss' }"))
f.close()
return f.name
|
class TestNet(unittest.TestCase):
def setUp(self):
self.num_output = 13
net_file = simple_net_file(self.num_output)
self.net = caffe.Net(net_file, caffe.TRAIN)
self.net.blobs['label'].data[...] = np.random.randint(self.num_output, size=self.net.blobs['label'].data.shape)
os.remove(net_file)
def test_memory(self):
'Check that holding onto blob data beyond the life of a Net is OK'
params = sum(map(list, six.itervalues(self.net.params)), [])
blobs = self.net.blobs.values()
del self.net
total = 0
for p in params:
total += (p.data.sum() + p.diff.sum())
for bl in blobs:
total += (bl.data.sum() + bl.diff.sum())
def test_forward_backward(self):
self.net.forward()
self.net.backward()
def test_inputs_outputs(self):
self.assertEqual(self.net.inputs, [])
self.assertEqual(self.net.outputs, ['loss'])
def test_save_and_read(self):
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f.close()
self.net.save(f.name)
net_file = simple_net_file(self.num_output)
net2 = caffe.Net(net_file, f.name, caffe.TRAIN)
os.remove(net_file)
os.remove(f.name)
for name in self.net.params:
for i in range(len(self.net.params[name])):
self.assertEqual(abs((self.net.params[name][i].data - net2.params[name][i].data)).sum(), 0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.