code stringlengths 17 6.64M |
|---|
def format_metric(metric):
'\n 把计算出的评价指标转化为str,float保留四位小数\n :param metric:\n :return:\n \n Convert the evaluation measures into str, keep four decimal places for float\n :param metric:\n :return:\n '
if ((type(metric) is not tuple) and (type(metric) is not list)):
metric = [metric]
format_str = []
if ((type(metric) is tuple) or (type(metric) is list)):
for m in metric:
if ((type(m) is float) or (type(m) is np.float) or (type(m) is np.float32) or (type(m) is np.float64)):
format_str.append(('%.6f' % m))
elif ((type(m) is int) or (type(m) is np.int) or (type(m) is np.int32) or (type(m) is np.int64)):
format_str.append(('%d' % m))
return ','.join(format_str)
|
def shuffle_in_unison_scary(data):
'\n shuffle整个数据集dict的内容\n :param data:\n :return:\n \n shuffle the contents of the dict of whole dataset\n :param data:\n :return:\n '
rng_state = np.random.get_state()
for d in data:
np.random.set_state(rng_state)
np.random.shuffle(data[d])
return data
|
def best_result(metric, results_list):
'\n 求一个结果list中最佳的结果\n :param metric:\n :param results_list:\n :return:\n \n Compute the best result in a list of results\n :param metric:\n :param results_list:\n :return:\n '
if ((type(metric) is list) or (type(metric) is tuple)):
metric = metric[0]
if (metric in LOWER_METRIC_LIST):
return min(results_list)
return max(results_list)
|
def strictly_increasing(l):
'\n 判断是否严格单调增\n :param l:\n :return:\n \n Test if monotonically increasing\n :param l:\n :return:\n '
return all(((x < y) for (x, y) in zip(l, l[1:])))
|
def strictly_decreasing(l):
'\n 判断是否严格单调减\n :param l:\n :return:\n \n Test if monotonically decreasing\n :param l:\n :return:\n '
return all(((x > y) for (x, y) in zip(l, l[1:])))
|
def non_increasing(l):
'\n 判断是否单调非增\n :param l:\n :return:\n \n Test if monotonically non-increasing\n :param l:\n :return:\n '
return all(((x >= y) for (x, y) in zip(l, l[1:])))
|
def non_decreasing(l):
'\n 判断是否单调非减\n :param l:\n :return:\n \n Test if monotonically non-decreasing\n :param l:\n :return:\n '
return all(((x <= y) for (x, y) in zip(l, l[1:])))
|
def monotonic(l):
'\n 判断是否单调\n :param l:\n :return:\n \n Test if monotonic\n :param l:\n :return:\n '
return (non_increasing(l) or non_decreasing(l))
|
def numpy_to_torch(d, gpu=True, requires_grad=True):
'\n numpy array转化为pytorch tensor,有gpu则放到gpu\n :param d:\n :param gpu: whether put tensor to gpu\n :param requires_grad: whether the tensor requires grad\n :return:\n \n Convert numpy array to pytorch tensor, if there is gpu then put into gpu\n :param d:\n :param gpu: whether put tensor to gpu\n :param requires_grad: whether the tensor requires grad\n :return:\n '
t = torch.from_numpy(d)
if (d.dtype is np.float):
t.requires_grad = requires_grad
if gpu:
t = tensor_to_gpu(t)
return t
|
def tensor_to_gpu(t):
if (torch.cuda.device_count() > 0):
t = t.cuda()
return t
|
def get_init_paras_dict(class_name, paras_dict):
base_list = inspect.getmro(class_name)
paras_list = []
for base in base_list:
paras = inspect.getfullargspec(base.__init__)
paras_list.extend(paras.args)
paras_list = sorted(list(set(paras_list)))
out_dict = {}
for para in paras_list:
if (para == 'self'):
continue
out_dict[para] = paras_dict[para]
return out_dict
|
def check_dir_and_mkdir(path):
if ((os.path.basename(path).find('.') == (- 1)) or path.endswith('/')):
dirname = path
else:
dirname = os.path.dirname(path)
if (not os.path.exists(dirname)):
print('make dirs:', dirname)
os.makedirs(dirname)
return
|
class DataLoader(object):
'\n 只负责load数据集文件,记录一些数据集信息\n Only responsible for loading the dataset file, and recording some information of the dataset\n '
@staticmethod
def parse_data_args(parser):
'\n data loader 的数据集相关的命令行参数\n :param parser:\n :return:\n \n Command-line parameters of the data loader related to the dataset\n :param parser:\n :return:\n '
parser.add_argument('--path', type=str, default=DATASET_DIR, help='Input data dir.')
parser.add_argument('--dataset', type=str, default='ml100k-1-5', help='Choose a dataset.')
parser.add_argument('--sep', type=str, default=SEP, help='sep of csv file.')
parser.add_argument('--seq_sep', type=str, default=SEQ_SEP, help='sep of sequences in csv file.')
parser.add_argument('--label', type=str, default=LABEL, help='name of dataset label column.')
parser.add_argument('--drop_neg', type=int, default=1, help='whether drop all negative samples when ranking')
return parser
def __init__(self, path, dataset, label=LABEL, load_data=True, sep=SEP, seq_sep=SEQ_SEP):
"\n 初始化\n :param path: 数据集目录\n :param dataset: 数据集名称\n :param label: 标签column的名称\n :param load_data: 是否要载入数据文件,否则只载入数据集信息\n :param sep: csv的分隔符\n :param seq_sep: 变长column的内部分隔符,比如历史记录可能为'1,2,4'\n \n Initialization\n :param path: path of the dataset\n :param dataset: name of the dataset\n :param label: label name of the columns\n :param load_data: if or not to load the dataset file, otherwise only load the dataset information\n :param sep: separator token of the csv\n :param seq_sep: internal separator token of variable-length columns, e.g., user history records could be '1,2,4'\n "
self.dataset = dataset
self.path = os.path.join(path, dataset)
self.train_file = os.path.join(self.path, (dataset + TRAIN_SUFFIX))
self.validation_file = os.path.join(self.path, (dataset + VALIDATION_SUFFIX))
self.test_file = os.path.join(self.path, (dataset + TEST_SUFFIX))
self.info_file = os.path.join(self.path, (dataset + INFO_SUFFIX))
self.user_file = os.path.join(self.path, (dataset + USER_SUFFIX))
self.item_file = os.path.join(self.path, (dataset + ITEM_SUFFIX))
self.train_pos_file = os.path.join(self.path, (dataset + TRAIN_POS_SUFFIX))
self.validation_pos_file = os.path.join(self.path, (dataset + VALIDATION_POS_SUFFIX))
self.test_pos_file = os.path.join(self.path, (dataset + TEST_POS_SUFFIX))
self.train_neg_file = os.path.join(self.path, (dataset + TRAIN_NEG_SUFFIX))
self.validation_neg_file = os.path.join(self.path, (dataset + VALIDATION_NEG_SUFFIX))
self.test_neg_file = os.path.join(self.path, (dataset + TEST_NEG_SUFFIX))
(self.sep, self.seq_sep) = (sep, seq_sep)
self.load_data = load_data
self.label = label
(self.train_df, self.validation_df, self.test_df) = (None, None, None)
self._load_user_item()
self._load_data()
self._load_his()
self._load_info()
if ((not os.path.exists(self.info_file)) or self.load_data):
self._save_info()
def _load_user_item(self):
'\n 载入用户和物品的csv特征文件\n :return:\n \n Load the csv feature file of users and items\n :return:\n '
(self.user_df, self.item_df) = (None, None)
if (os.path.exists(self.user_file) and self.load_data):
logging.info('load user csv...')
self.user_df = pd.read_csv(self.user_file, sep='\t')
if (os.path.exists(self.item_file) and self.load_data):
logging.info('load item csv...')
self.item_df = pd.read_csv(self.item_file, sep='\t')
def _load_data(self):
'\n 载入训练集、验证集、测试集csv文件\n :return:\n \n Load the training set, validation set, and testing set csv file\n :return:\n '
if (os.path.exists(self.train_file) and self.load_data):
logging.info('load train csv...')
self.train_df = pd.read_csv(self.train_file, sep=self.sep)
logging.info(('size of train: %d' % len(self.train_df)))
if (self.label in self.train_df):
logging.info(('train label: ' + str(dict(Counter(self.train_df[self.label]).most_common()))))
if (os.path.exists(self.validation_file) and self.load_data):
logging.info('load validation csv...')
self.validation_df = pd.read_csv(self.validation_file, sep=self.sep)
logging.info(('size of validation: %d' % len(self.validation_df)))
if (self.label in self.validation_df):
logging.info(('validation label: ' + str(dict(Counter(self.validation_df[self.label]).most_common()))))
if (os.path.exists(self.test_file) and self.load_data):
logging.info('load test csv...')
self.test_df = pd.read_csv(self.test_file, sep=self.sep)
logging.info(('size of test: %d' % len(self.test_df)))
if (self.label in self.test_df):
logging.info(('test label: ' + str(dict(Counter(self.test_df[self.label]).most_common()))))
def _save_info(self):
def json_type(o):
if isinstance(o, np.int64):
return int(o)
raise TypeError
max_json = json.dumps(self.column_max, default=json_type)
min_json = json.dumps(self.column_min, default=json_type)
out_f = open(self.info_file, 'w')
out_f.write(((max_json + os.linesep) + min_json))
logging.info(('Save dataset info to ' + self.info_file))
def _load_info(self):
'\n 载入数据集信息文件,如果不存在则创建\n :return:\n \n Load the dataset information file, if does not exist then create the file\n :return:\n '
(max_dict, min_dict) = ({}, {})
if ((not os.path.exists(self.info_file)) or self.load_data):
for df in [self.train_df, self.validation_df, self.test_df, self.user_df, self.item_df]:
if (df is None):
continue
for c in df.columns:
if (c not in max_dict):
max_dict[c] = df[c].max()
else:
max_dict[c] = max(df[c].max(), max_dict[c])
if (c not in min_dict):
min_dict[c] = df[c].min()
else:
min_dict[c] = min(df[c].min(), min_dict[c])
else:
lines = open(self.info_file, 'r').readlines()
max_dict = json.loads(lines[0])
min_dict = json.loads(lines[1])
self.column_max = max_dict
self.column_min = min_dict
self.label_max = self.column_max[self.label]
self.label_min = self.column_min[self.label]
logging.info(('label: %d-%d' % (self.label_min, self.label_max)))
(self.user_num, self.item_num) = (0, 0)
if (UID in self.column_max):
self.user_num = (self.column_max[UID] + 1)
if (IID in self.column_max):
self.item_num = (self.column_max[IID] + 1)
logging.info(('# of users: %d' % self.user_num))
logging.info(('# of items: %d' % self.item_num))
self.user_features = [f for f in self.column_max.keys() if f.startswith('u_')]
logging.info(('# of user features: %d' % len(self.user_features)))
self.item_features = [f for f in self.column_max.keys() if f.startswith('i_')]
logging.info(('# of item features: %d' % len(self.item_features)))
self.context_features = [f for f in self.column_max.keys() if f.startswith('c_')]
logging.info(('# of context features: %d' % len(self.context_features)))
self.features = ((self.context_features + self.user_features) + self.item_features)
logging.info(('# of features: %d' % len(self.features)))
def _load_his(self):
"\n 载入数据集按uid合并的历史交互记录,两列 'uid' 和 'iids',没有则创建\n :return:\n \n Load the history interaction records of the dataset merged according to uid, two columns of 'uid' and 'iids', if non-existing then create\n :return:\n "
if ((not self.load_data) or (UID not in self.train_df) or (IID not in self.train_df)):
return
if (not os.path.exists(self.train_pos_file)):
logging.info('building train pos history csv...')
train_pos_df = group_user_interactions_df(self.train_df, pos_neg=1, label=self.label, seq_sep=self.seq_sep)
train_pos_df.to_csv(self.train_pos_file, index=False, sep=self.sep)
if (not os.path.exists(self.validation_pos_file)):
logging.info('building validation pos history csv...')
validation_pos_df = group_user_interactions_df(self.validation_df, pos_neg=1, label=self.label, seq_sep=self.seq_sep)
validation_pos_df.to_csv(self.validation_pos_file, index=False, sep=self.sep)
if (not os.path.exists(self.test_pos_file)):
logging.info('building test pos history csv...')
test_pos_df = group_user_interactions_df(self.test_df, pos_neg=1, label=self.label, seq_sep=self.seq_sep)
test_pos_df.to_csv(self.test_pos_file, index=False, sep=self.sep)
if (not os.path.exists(self.train_neg_file)):
logging.info('building train neg history csv...')
train_neg_df = group_user_interactions_df(self.train_df, pos_neg=0, label=self.label, seq_sep=self.seq_sep)
train_neg_df.to_csv(self.train_neg_file, index=False, sep=self.sep)
if (not os.path.exists(self.validation_neg_file)):
logging.info('building validation neg history csv...')
validation_neg_df = group_user_interactions_df(self.validation_df, pos_neg=0, label=self.label, seq_sep=self.seq_sep)
validation_neg_df.to_csv(self.validation_neg_file, index=False, sep=self.sep)
if (not os.path.exists(self.test_neg_file)):
logging.info('building test neg history csv...')
test_neg_df = group_user_interactions_df(self.test_df, pos_neg=0, label=self.label, seq_sep=self.seq_sep)
test_neg_df.to_csv(self.test_neg_file, index=False, sep=self.sep)
def build_his(his_df, seqs_sep):
uids = his_df[UID].tolist()
iids = his_df[IIDS].astype(str).str.split(seqs_sep).values
iids = [[int(j) for j in i] for i in iids]
user_his = dict(zip(uids, iids))
return user_his
(self.train_pos_df, self.train_user_pos) = (None, None)
(self.validation_pos_df, self.validation_user_pos) = (None, None)
(self.test_pos_df, self.test_user_pos) = (None, None)
(self.train_neg_df, self.train_user_neg) = (None, None)
(self.validation_neg_df, self.validation_user_neg) = (None, None)
(self.test_neg_df, self.test_user_neg) = (None, None)
if self.load_data:
logging.info('load history csv...')
self.train_pos_df = pd.read_csv(self.train_pos_file, sep=self.sep)
self.train_user_pos = build_his(self.train_pos_df, self.seq_sep)
self.validation_pos_df = pd.read_csv(self.validation_pos_file, sep=self.sep)
self.validation_user_pos = build_his(self.validation_pos_df, self.seq_sep)
self.test_pos_df = pd.read_csv(self.test_pos_file, sep=self.sep)
self.test_user_pos = build_his(self.test_pos_df, self.seq_sep)
self.train_neg_df = pd.read_csv(self.train_neg_file, sep=self.sep)
self.train_user_neg = build_his(self.train_neg_df, self.seq_sep)
self.validation_neg_df = pd.read_csv(self.validation_neg_file, sep=self.sep)
self.validation_user_neg = build_his(self.validation_neg_df, self.seq_sep)
self.test_neg_df = pd.read_csv(self.test_neg_file, sep=self.sep)
self.test_user_neg = build_his(self.test_neg_df, self.seq_sep)
def feature_info(self, include_id=True, include_item_features=True, include_user_features=True, include_context_features=True):
"\n 生成模型需要的特征数目、维度等信息,特征最终会在DataProcessor中转换为multi-hot的稀疏标示,\n 例:uid(0-2),iid(0-2),u_age(0-2),i_xx(0-1),\n 那么uid=0,iid=1,u_age=1,i_xx=0会转换为100 010 010 10的稀疏表示0,4,7,9\n :param include_id: 模型是否将uid,iid当成普通特征看待,将和其他特征一起转换到multi-hot embedding中,否则是单独两列\n :param include_item_features: 模型是否包含物品特征\n :param include_user_features: 模型是否包含用户特征\n :param include_context_features: 模型是否包含上下文特征\n :return: 所有特征,例['uid', 'iid', 'u_age', 'i_xx']\n 所有特征multi-hot之后的总维度,例 11\n 每个特征在mult-hot中所在范围的最小index,例[0, 3, 6, 9]\n 每个特征在mult-hot中所在范围的最大index,例[2, 5, 8, 10]\n \n Generate the information needed by the model such as number of features and dimensions, features will be eventually converted to multi-hot sparse representation in DataProcessor\n e.g., uid(0-2),iid(0-2),u_age(0-2),i_xx(0-1),\n thus uid=0,iid=1,u_age=1,i_xx=0 will be converted to the sparse representation of 100 010 010 10, i.e., 0,4,7,9\n :param include_id: if or not the model will consider uid,iid as normal features, will be converted to multi-hot embedding with other features, otherwise they will be two seperate columns\n :param include_item_features: if or not the model will include item features\n :param include_user_features: if or not the model will include user features\n :param include_context_features: if or not the model will inculde context features\n :return: all features, e.g., ['uid', 'iid', 'u_age', 'i_xx']\n Total dimension of all features after multi-hot, e.g., 11\n Each feature's minimum index in the multi-hot, e.g., [0, 3, 6, 9]\n Each feature's maximum index in the multi-hot, e.g., [2, 5, 8, 10]\n "
features = []
if include_id:
if (UID in self.column_max):
features.append(UID)
if (IID in self.column_max):
features.append(IID)
if include_user_features:
features.extend(self.user_features)
if include_item_features:
features.extend(self.item_features)
if include_context_features:
features.extend(self.context_features)
feature_dims = 0
(feature_min, feature_max) = ([], [])
for f in features:
feature_min.append(feature_dims)
feature_dims += int((self.column_max[f] + 1))
feature_max.append((feature_dims - 1))
logging.info(('Model # of features %d' % len(features)))
logging.info(('Model # of feature dims %d' % feature_dims))
return (features, feature_dims, feature_min, feature_max)
def append_his(self, all_his=1, max_his=10, neg_his=0, neg_column=0):
assert (not ((all_his == 1) and (self.train_df is None)))
(his_dict, neg_dict) = ({}, {})
for df in [self.train_df, self.validation_df, self.test_df]:
if ((df is None) or (C_HISTORY in df)):
continue
(history, neg_history) = ([], [])
if ((all_his != 1) or (df is self.train_df)):
(uids, iids, labels) = (df[UID].tolist(), df[IID].tolist(), df[self.label].tolist())
for (i, uid) in enumerate(uids):
(iid, label) = (iids[i], labels[i])
if (uid not in his_dict):
his_dict[uid] = []
if (uid not in neg_dict):
neg_dict[uid] = []
tmp_his = (his_dict[uid] if (max_his <= 0) else his_dict[uid][(- max_his):])
tmp_neg = (neg_dict[uid] if (max_his <= 0) else neg_dict[uid][(- max_his):])
history.append(str(tmp_his).replace(' ', '')[1:(- 1)])
neg_history.append(str(tmp_neg).replace(' ', '')[1:(- 1)])
if ((label <= 0) and (neg_his == 1) and (neg_column == 0)):
his_dict[uid].append((- iid))
elif ((label <= 0) and (neg_column == 1)):
neg_dict[uid].append(iid)
elif (label > 0):
his_dict[uid].append(iid)
if (all_his == 1):
(history, neg_history) = ([], [])
for uid in df[UID].tolist():
if (uid in his_dict):
history.append(str(his_dict[uid]).replace(' ', '')[1:(- 1)])
else:
history.append('')
if (uid in neg_dict):
neg_history.append(str(neg_dict[uid]).replace(' ', '')[1:(- 1)])
else:
neg_history.append('')
df[C_HISTORY] = history
if ((neg_his == 1) and (neg_column == 1)):
df[C_HISTORY_NEG] = neg_history
def drop_neg(self, train=True, validation=True, test=True):
"\n 如果是top n推荐,只保留正例,负例是训练过程中采样得到\n :return:\n \n If it's top n recommendation, only keep the positive examples, negative examples are sampled during training\n :return:\n "
logging.info('Drop Neg Samples...')
if (train and (self.train_df is not None)):
self.train_df = self.train_df[(self.train_df[self.label] > 0)].reset_index(drop=True)
if (validation and (self.validation_df is not None)):
self.validation_df = self.validation_df[(self.validation_df[self.label] > 0)].reset_index(drop=True)
if (test and (self.test_df is not None)):
self.test_df = self.test_df[(self.test_df[self.label] > 0)].reset_index(drop=True)
logging.info(('size of train: %d' % len(self.train_df)))
logging.info(('size of validation: %d' % len(self.validation_df)))
logging.info(('size of test: %d' % len(self.test_df)))
def label_01(self, train=True, validation=True, test=True):
'\n 将label转换为01二值\n :return:\n \n Converte the label to 01 binary values\n :return:\n '
logging.info('Transform label to 0-1')
if (train and (self.train_df is not None) and (self.label in self.train_df)):
self.train_df[self.label] = self.train_df[self.label].apply((lambda x: (1 if (x > 0) else 0)))
logging.info(('train label: ' + str(dict(Counter(self.train_df[self.label]).most_common()))))
if (validation and (self.validation_df is not None) and (self.label in self.validation_df)):
self.validation_df[self.label] = self.validation_df[self.label].apply((lambda x: (1 if (x > 0) else 0)))
logging.info(('validation label: ' + str(dict(Counter(self.validation_df[self.label]).most_common()))))
if (test and (self.test_df is not None) and (self.label in self.test_df)):
self.test_df[self.label] = self.test_df[self.label].apply((lambda x: (1 if (x > 0) else 0)))
logging.info(('test label: ' + str(dict(Counter(self.test_df[self.label]).most_common()))))
self.label_min = 0
self.label_max = 1
|
class ProLogicDL(DataLoader):
def __init__(self, path, dataset, *args, **kwargs):
"\n 初始化\n :param path: 数据集目录\n :param dataset: 数据集名称\n :param label: 标签column的名称\n :param load_data: 是否要载入数据文件,否则只载入数据集信息\n :param sep: csv的分隔符\n :param seqs_sep: 变长column的内部分隔符,比如历史记录可能为'1,2,4'\n \n Initialization\n :param path: path of the dataset\n :param dataset: name of the dataset\n :param label: label name of the columns\n :param load_data: if or not to load the dataset file, otherwise only load the dataset information\n :param sep: separator token of the csv\n :param seq_sep: internal separator token of variable-length columns, e.g., user history records could be '1,2,4'\n "
self.variable_file = os.path.join(os.path.join(path, dataset), (dataset + VARIABLE_SUFFIX))
self.variable_df = pd.read_csv(self.variable_file, sep='\t')
DataLoader.__init__(self, *args, path=path, dataset=dataset, **kwargs)
def _load_info(self):
'\n 载入数据集信息文件,如果不存在则创建\n :return:\n \n Load the dataset information file, if non-existing then create\n :return:\n '
DataLoader._load_info(self)
self.column_max['variable'] = self.variable_df['variable'].max()
self.column_min['variable'] = self.variable_df['variable'].min()
self.variable_num = (self.column_max['variable'] + 1)
logging.info(('# of variables: %d' % self.variable_num))
|
class DataProcessor(object):
data_columns = [UID, IID, X]
info_columns = [SAMPLE_ID, TIME]
@staticmethod
def parse_dp_args(parser):
'\n 数据处理生成batch的命令行参数\n :param parser:\n :return:\n \n Command-line parameters to generate batches in data processing\n :param parser:\n :return:\n '
parser.add_argument('--test_sample_n', type=int, default=100, help='Negative sample num for each instance in test/validation set when ranking.')
parser.add_argument('--train_sample_n', type=int, default=1, help='Negative sample num for each instance in train set when ranking.')
parser.add_argument('--sample_un_p', type=float, default=1.0, help='Sample from neg/pos with 1-p or unknown+neg/pos with p.')
parser.add_argument('--unlabel_test', type=int, default=0, help='If the label of test is unknown, do not sample neg of test set.')
return parser
@staticmethod
def batch_to_gpu(batch):
if (torch.cuda.device_count() > 0):
new_batch = {}
for c in batch:
if (type(batch[c]) is torch.Tensor):
new_batch[c] = batch[c].cuda()
else:
new_batch[c] = batch[c]
return new_batch
return batch
def __init__(self, data_loader, rank, train_sample_n, test_sample_n, sample_un_p, unlabel_test=0):
'\n 初始化\n :param data_loader: DataLoader对象\n :param model: Model对象\n :param rank: 1=topn推荐 0=评分或点击预测\n :param test_sample_n: topn推荐时的测试集负例采样比例 正:负=1:test_sample_n\n \n Initialization\n :param data_loader: DataLoader object\n :param model: Model object\n :param rank: 1 = topn recommendation, 0 = rating or click prediction\n :param test_sample_n: Ratio of negative sampling in the testing dataset in topn recommendation, positive : native = 1 : test_sample_n\n '
self.data_loader = data_loader
self.rank = rank
(self.train_data, self.validation_data, self.test_data) = (None, None, None)
self.test_sample_n = test_sample_n
self.train_sample_n = train_sample_n
self.sample_un_p = sample_un_p
self.unlabel_test = unlabel_test
if (self.rank == 1):
self.train_history_pos = defaultdict(set)
for uid in data_loader.train_user_pos.keys():
self.train_history_pos[uid] = set(data_loader.train_user_pos[uid])
self.validation_history_pos = defaultdict(set)
for uid in data_loader.validation_user_pos.keys():
self.validation_history_pos[uid] = set(data_loader.validation_user_pos[uid])
self.test_history_pos = defaultdict(set)
for uid in data_loader.test_user_pos.keys():
self.test_history_pos[uid] = set(data_loader.test_user_pos[uid])
self.train_history_neg = defaultdict(set)
for uid in data_loader.train_user_neg.keys():
self.train_history_neg[uid] = set(data_loader.train_user_neg[uid])
self.validation_history_neg = defaultdict(set)
for uid in data_loader.validation_user_neg.keys():
self.validation_history_neg[uid] = set(data_loader.validation_user_neg[uid])
self.test_history_neg = defaultdict(set)
for uid in data_loader.test_user_neg.keys():
self.test_history_neg[uid] = set(data_loader.test_user_neg[uid])
self.vt_batches_buffer = {}
def get_train_data(self, epoch, model):
'\n 将dataloader中的训练集Dataframe转换为所需要的字典后返回,每一轮都要shuffle\n 该字典会被用来生成batches\n :param epoch: <0则不shuffle\n :param model: Model类\n :return: 字典dict\n \n Convert the training dataset Dataframe in the dataloader into the needed dict and return, need to shuffle in every round\n This dict will be used to generate batches\n :param epoch: if < 0 then no shuffling\n :param model: Model class\n :return: dict\n '
if (self.train_data is None):
logging.info('Prepare Train Data...')
self.train_data = self.format_data_dict(self.data_loader.train_df, model)
self.train_data[SAMPLE_ID] = np.arange(0, len(self.train_data[Y]))
if (epoch >= 0):
utils.shuffle_in_unison_scary(self.train_data)
return self.train_data
def get_validation_data(self, model):
'\n 将dataloader中的验证集Dataframe转换为所需要的字典后返回\n 如果是topn推荐则每个正例对应采样test_sample_n个负例\n 该字典会被用来生成batches\n :param model: Model类\n :return: 字典dict\n \n Convert the validate datasest Dataframe in the dataloader into the needed dict and return\n If doing topn recommendation then for each positive example sample test_sample_n number of negative examples\n This dict will be used to generate batches\n :param model: Model class\n :return: dict\n '
if (self.validation_data is None):
logging.info('Prepare Validation Data...')
df = self.data_loader.validation_df
if (self.rank == 1):
tmp_df = df.rename(columns={self.data_loader.label: Y})
tmp_df = tmp_df.drop(tmp_df[(tmp_df[Y] <= 0)].index)
neg_df = self.generate_neg_df(inter_df=tmp_df, feature_df=df, sample_n=self.test_sample_n, train=False)
df = pd.concat([df, neg_df], ignore_index=True)
self.validation_data = self.format_data_dict(df, model)
self.validation_data[SAMPLE_ID] = np.arange(0, len(self.validation_data[Y]))
return self.validation_data
def get_test_data(self, model):
'\n 将dataloader中的测试集Dataframe转换为所需要的字典后返回\n 如果是topn推荐则每个正例对应采样test_sample_n个负例\n 该字典会被用来生成batches\n :param model: Model类\n :return: 字典dict\n \n Convert the testing datasest Dataframe in the dataloader into the needed dict and return\n If doing topn recommendation then for each positive example sample test_sample_n number of negative examples\n This dict will be used to generate batches\n :param model: Model class\n :return: dict\n '
if (self.test_data is None):
logging.info('Prepare Test Data...')
df = self.data_loader.test_df
if ((self.rank == 1) and (self.unlabel_test == 0)):
tmp_df = df.rename(columns={self.data_loader.label: Y})
tmp_df = tmp_df.drop(tmp_df[(tmp_df[Y] <= 0)].index)
neg_df = self.generate_neg_df(inter_df=tmp_df, feature_df=df, sample_n=self.test_sample_n, train=False)
df = pd.concat([df, neg_df], ignore_index=True)
self.test_data = self.format_data_dict(df, model)
self.test_data[SAMPLE_ID] = np.arange(0, len(self.test_data[Y]))
return self.test_data
def get_feed_dict(self, data, batch_start, batch_size, train, neg_data=None, special_cols=None):
'\n topn模型产生一个batch,如果是训练需要对每个正样本采样一个负样本,保证每个batch前一半是正样本,后一半是对应的负样本\n :param data: data dict,由self.get_*_data()和self.format_data_dict()系列函数产生\n :param batch_start: batch开始的index\n :param batch_size: batch大小\n :param train: 训练还是测试\n :param neg_data: 负例的data dict,如果已经有可以传入拿来用\n :param special_cols: 需要特殊处理的column\n :return: batch的feed dict\n \n topn model will produce a batch, if doing training then need to sample a negative example for each positive example, and garanttee that for each batch the first half are positive examples and the second half are negative examples\n :param data: data dict, produced by self.get_*_data() and self.format_data_dict() functions\n :param batch_start: starting index of the batch\n :param batch_size: batch size\n :param train: training or testing\n :param neg_data: data dict of negative examples, if alreay exist can use directly\n :param special_cols: columns that need special treatment\n :return: feed dict of the batch\n '
total_data_num = len(data[SAMPLE_ID])
batch_end = min(len(data[self.data_columns[0]]), (batch_start + batch_size))
real_batch_size = (batch_end - batch_start)
total_batch_size = ((real_batch_size * (self.train_sample_n + 1)) if ((self.rank == 1) and train) else real_batch_size)
feed_dict = {TRAIN: train, RANK: self.rank, REAL_BATCH_SIZE: real_batch_size, TOTAL_BATCH_SIZE: total_batch_size}
if (Y in data):
feed_dict[Y] = utils.numpy_to_torch(data[Y][batch_start:(batch_start + real_batch_size)], gpu=False)
for c in (self.info_columns + self.data_columns):
if ((c not in data) or (data[c].size <= 0)):
continue
d = data[c][batch_start:(batch_start + real_batch_size)]
if ((self.rank == 1) and train):
neg_d = np.concatenate([neg_data[c][((total_data_num * i) + batch_start):(((total_data_num * i) + batch_start) + real_batch_size)] for i in range(self.train_sample_n)])
d = np.concatenate([d, neg_d])
feed_dict[c] = d
for c in self.data_columns:
if (c not in feed_dict):
continue
if ((special_cols is not None) and (c in special_cols)):
continue
feed_dict[c] = utils.numpy_to_torch(feed_dict[c], gpu=False)
return feed_dict
def _check_vt_buffer(self, data, batch_size, train, model):
buffer_key = ''
if ((data is self.train_data) and (not train)):
buffer_key = '_'.join(['train', str(batch_size), str(model)])
elif (data is self.validation_data):
buffer_key = '_'.join(['validation', str(batch_size), str(model)])
elif (data is self.test_data):
buffer_key = '_'.join(['test', str(batch_size), str(model)])
if ((buffer_key != '') and (buffer_key in self.vt_batches_buffer)):
return self.vt_batches_buffer[buffer_key]
return buffer_key
def prepare_batches(self, data, batch_size, train, model):
'\n 将data dict全部转换为batch\n :param data: dict 由self.get_*_data()和self.format_data_dict()系列函数产生\n :param batch_size: batch大小\n :param train: 训练还是测试\n :param model: Model类\n :return: list of batches\n \n Convert all data dict to batches\n :param data: dict, generated by self.get_*_data() and self.format_data_dict() functions\n :param batch_size: batch size\n :param train: training or testing\n :param model: Model class\n :return: list of batches\n '
buffer_key = self._check_vt_buffer(data=data, batch_size=batch_size, train=train, model=model)
if (type(buffer_key) != str):
return buffer_key
if (data is None):
return None
num_example = len(data[Y])
total_batch = int((((num_example + batch_size) - 1) / batch_size))
assert (num_example > 0)
neg_data = None
if (train and (self.rank == 1)):
neg_data = self.generate_neg_data(data, self.data_loader.train_df, sample_n=self.train_sample_n, train=True, model=model)
batches = []
for batch in tqdm(range(total_batch), leave=False, ncols=100, mininterval=1, desc='Prepare Batches'):
batches.append(self.get_feed_dict(data=data, batch_start=(batch * batch_size), batch_size=batch_size, train=train, neg_data=neg_data))
if (buffer_key != ''):
self.vt_batches_buffer[buffer_key] = batches
return batches
def format_data_dict(self, df, model):
"\n 将dataloader的训练、验证、测试Dataframe转换为需要的data dict\n :param df: pandas Dataframe, 在推荐问题中通常包含 UID,IID,'label' 三列\n :param model: Model类\n :return: data dict\n \n Convert the training, validation and testing Dataframes of the dataloader to the needed data dict\n :param df: pandas Dataframe, usually includes three columns UID,IID,'label' in recommendation problems\n :param model: Model class\n :return: data dict\n "
data_loader = self.data_loader
data = {}
out_columns = []
if (UID in df):
out_columns.append(UID)
data[UID] = df[UID].values
if (IID in df):
out_columns.append(IID)
data[IID] = df[IID].values
if (TIME in df):
data[TIME] = df[TIME].values
if (data_loader.label in df.columns):
data[Y] = np.array(df[data_loader.label], dtype=np.float32)
else:
logging.warning(('No Labels In Data: ' + data_loader.label))
data[Y] = np.zeros(len(df), dtype=np.float32)
ui_id = df[out_columns]
out_df = ui_id
if ((data_loader.user_df is not None) and model.include_user_features):
out_df = pd.merge(out_df, data_loader.user_df, on=UID, how='left')
if ((data_loader.item_df is not None) and model.include_item_features):
out_df = pd.merge(out_df, data_loader.item_df, on=IID, how='left')
if (model.include_context_features and (len(data_loader.context_features) > 0)):
context = df[data_loader.context_features]
out_df = pd.concat([out_df, context], axis=1, ignore_index=True)
out_df = out_df.fillna(0)
if (not model.include_id):
out_df = out_df.drop(columns=out_columns)
'\n 把特征全部转换为multi-hot向量\n 例:uid(0-2),iid(0-2),u_age(0-2),i_xx(0-1),\n 那么uid=0,iid=1,u_age=1,i_xx=0会转换为100 010 010 10的稀疏表示0,4,7,9\n \n Convert all features into multi-hot vectors\n e.g., uid(0-2),iid(0-2),u_age(0-2),i_xx(0-1),\n then uid=0,iid=1,u_age=1,i_xx=0 will be converted to the sparse representation of 100 010 010 10, i.e., 0,4,7,9\n '
base = 0
for feature in out_df.columns:
out_df[feature] = out_df[feature].apply((lambda x: (x + base)))
base += int((data_loader.column_max[feature] + 1))
data[X] = out_df.values.astype(int)
assert (len(data[X]) == len(data[Y]))
return data
def generate_neg_data(self, data, feature_df, sample_n, train, model):
'\n 产生neg_data dict一般为prepare_batches_rk train=True时所用\n :param data:\n :param feature_df:\n :param sample_n:\n :param train:\n :param model:\n :return:\n \n Generate neg_data dict, usually used when prepare_batches_rk train=True\n :param data:\n :param feature_df:\n :param sample_n:\n :param train:\n :param model:\n :return:\n '
inter_df = pd.DataFrame()
for c in [UID, IID, Y, TIME]:
if (c in data):
inter_df[c] = data[c]
else:
assert (c == TIME)
neg_df = self.generate_neg_df(inter_df=inter_df, feature_df=feature_df, sample_n=sample_n, train=train)
neg_data = self.format_data_dict(neg_df, model)
neg_data[SAMPLE_ID] = (np.arange(0, len(neg_data[Y])) + len(data[SAMPLE_ID]))
return neg_data
def generate_neg_df(self, inter_df, feature_df, sample_n, train):
'\n 根据uid,iid和训练or验证测试的dataframe产生负样本\n :param sample_n: 负采样数目\n :param train: 训练集or验证集测试集负采样\n :return:\n \n Generate negative examples according to uid,iid and the dataframe of traininig, validation or testing\n :param sample_n: number of negative examples\n :param train: negative sampling for training set, validation set or testing set\n :return:\n '
other_columns = [c for c in inter_df.columns if (c not in [UID, Y])]
neg_df = self._sample_neg_from_uid_list(uids=inter_df[UID].tolist(), labels=inter_df[Y].tolist(), sample_n=sample_n, train=train, other_infos=inter_df[other_columns].to_dict('list'))
neg_df = pd.merge(neg_df, feature_df, on=([UID] + other_columns), how='left')
neg_df = neg_df.drop(columns=[IID])
neg_df = neg_df.rename(columns={'iid_neg': IID})
neg_df = neg_df[feature_df.columns]
neg_df[self.data_loader.label] = 0
return neg_df
def _sample_neg_from_uid_list(self, uids, labels, sample_n, train, other_infos=None):
'\n 根据uid的list采样对应的负样本\n :param uids: uid list\n :param sample_n: 每个uid采样几个负例\n :param train: 为训练集采样还是测试集采样\n :param other_infos: 除了uid,iid,label之外可能需要复制的信息,比如交互历史(前n个item),\n 在generate_neg_df被用来复制原始正例iid\n :return: 返回DataFrame,还需经过self.format_data_dict()转换为data dict\n \n Sample the corresponding negative examples according to the list of uid\n :param uids: uid list\n :param sample_n: number of negative examples to sample for each uid\n :param train: Sample for training set or testing set\n :param other_infos: Information that may need to be copied except for uid,iid,label, e.g., history interactions (first n item)\n used to copy original positive example idd in generate_neg_df\n :return: return the DataFrame, also need to be converted to data dict through self.format_data_dict()\n '
if (other_infos is None):
other_infos = {}
iid_list = []
other_info_list = {}
for info in other_infos:
other_info_list[info] = []
item_num = self.data_loader.item_num
for (index, uid) in enumerate(uids):
if (labels[index] > 0):
train_history = self.train_history_pos
(validation_history, test_history) = (self.validation_history_pos, self.test_history_pos)
known_train = self.train_history_neg
else:
assert train
train_history = self.train_history_neg
(validation_history, test_history) = (self.validation_history_neg, self.test_history_neg)
known_train = self.train_history_pos
if train:
inter_iids = train_history[uid]
else:
inter_iids = ((train_history[uid] | validation_history[uid]) | test_history[uid])
remain_iids_num = (item_num - len(inter_iids))
assert (remain_iids_num >= sample_n)
remain_iids = None
if (((1.0 * remain_iids_num) / item_num) < 0.2):
remain_iids = [i for i in range(1, item_num) if (i not in inter_iids)]
sampled = set()
if (remain_iids is None):
unknown_iid_list = []
for i in range(sample_n):
iid = np.random.randint(1, self.data_loader.item_num)
while ((iid in inter_iids) or (iid in sampled)):
iid = np.random.randint(1, self.data_loader.item_num)
unknown_iid_list.append(iid)
sampled.add(iid)
else:
unknown_iid_list = np.random.choice(remain_iids, sample_n, replace=False)
if (train and (self.sample_un_p < 1)):
known_iid_list = (list(np.random.choice(list(known_train[uid]), min(sample_n, len(known_train[uid])), replace=False)) if (len(known_train[uid]) != 0) else [])
known_iid_list = (known_iid_list + unknown_iid_list)
tmp_iid_list = []
sampled = set()
for i in range(sample_n):
p = np.random.rand()
if ((p < self.sample_un_p) or (len(known_iid_list) == 0)):
iid = unknown_iid_list.pop(0)
while (iid in sampled):
iid = unknown_iid_list.pop(0)
else:
iid = known_iid_list.pop(0)
while (iid in sampled):
iid = known_iid_list.pop(0)
tmp_iid_list.append(iid)
sampled.add(iid)
iid_list.append(tmp_iid_list)
else:
iid_list.append(unknown_iid_list)
(all_uid_list, all_iid_list) = ([], [])
for i in range(sample_n):
for (index, uid) in enumerate(uids):
all_uid_list.append(uid)
all_iid_list.append(iid_list[index][i])
for info in other_infos:
other_info_list[info].append(other_infos[info][index])
neg_df = pd.DataFrame(data=list(zip(all_uid_list, all_iid_list)), columns=[UID, 'iid_neg'])
for info in other_infos:
neg_df[info] = other_info_list[info]
return neg_df
|
class HistoryDP(DataProcessor):
data_columns = [UID, IID, X, C_HISTORY, C_HISTORY_NEG]
info_columns = [SAMPLE_ID, TIME, C_HISTORY_LENGTH, C_HISTORY_NEG_LENGTH]
@staticmethod
def parse_dp_args(parser):
'\n 数据处理生成batch的命令行参数\n :param parser:\n :return:\n \n Command-line parameters to generate batches in data processing\n :param parser:\n :return:\n '
parser.add_argument('--all_his', type=int, default=0, help='Append all history in the training set')
parser.add_argument('--max_his', type=int, default=(- 1), help='Max history length. All his if max_his <= 0')
parser.add_argument('--neg_his', type=int, default=0, help='Whether keep negative interactions in the history')
parser.add_argument('--neg_column', type=int, default=0, help='Whether keep negative interactions in the history as a single column')
parser.add_argument('--sparse_his', type=int, default=0, help='Whether use sparse representation of user history.')
parser.add_argument('--sup_his', type=int, default=0, help='If sup_his > 0, supplement history list with 0')
parser.add_argument('--drop_first', type=int, default=1, help='If drop_first > 0, drop the first user interacted item with no previous history')
return DataProcessor.parse_dp_args(parser)
def __init__(self, max_his, sup_his, sparse_his, drop_first, *args, **kwargs):
self.max_his = max_his
self.sparse_his = sparse_his
self.sup_his = sup_his
self.drop_first = drop_first
DataProcessor.__init__(self, *args, **kwargs)
def get_feed_dict(self, data, batch_start, batch_size, train, neg_data=None, special_cols=None):
'\n topn模型产生一个batch,如果是训练需要对每个正样本采样一个负样本,保证每个batch前一半是正样本,后一半是对应的负样本\n :param data: data dict,由self.get_*_data()和self.format_data_dict()系列函数产生\n :param batch_start: batch开始的index\n :param batch_size: batch大小\n :param train: 训练还是测试\n :param neg_data: 负例的data dict,如果已经有可以传入拿来用\n :param special_cols: 需要特殊处理的column\n :return: batch的feed dict\n \n topn model will produce a batch, if doing training then need to sample a negative example for each positive example, and garanttee that for each batch the first half are positive examples and the second half are negative examples\n :param data: data dict, produced by self.get_*_data() and self.format_data_dict() functions\n :param batch_start: starting index of the batch\n :param batch_size: batch size\n :param train: training or testing\n :param neg_data: data dict of negative examples, if alreay exist can use directly\n :param special_cols: columns that need special treatment\n :return: feed dict of the batch\n '
feed_dict = DataProcessor.get_feed_dict(self, data, batch_start, batch_size, train, neg_data=neg_data, special_cols=([C_HISTORY, C_HISTORY_NEG] if (special_cols is None) else ([C_HISTORY, C_HISTORY_NEG] + special_cols)))
(his_cs, his_ls) = ([C_HISTORY], [C_HISTORY_LENGTH])
if (C_HISTORY_NEG in feed_dict):
his_cs.append(C_HISTORY_NEG)
his_ls.append(C_HISTORY_NEG_LENGTH)
for (i, c) in enumerate(his_cs):
(lc, d) = (his_ls[i], feed_dict[c])
if (self.sparse_his == 1):
(x, y, v) = ([], [], [])
for (idx, iids) in enumerate(d):
x.extend(([idx] * len(iids)))
y.extend([abs(iid) for iid in iids])
v.extend([(1.0 if (iid > 0) else ((- 1.0) if (iid < 0) else 0)) for iid in iids])
if (len(x) <= 0):
i = utils.numpy_to_torch(np.array([[0], [0]]), gpu=False)
v = utils.numpy_to_torch(np.array([0.0], dtype=np.float32), gpu=False)
else:
i = utils.numpy_to_torch(np.array([x, y]), gpu=False)
v = utils.numpy_to_torch(np.array(v, dtype=np.float32), gpu=False)
history = torch.sparse.FloatTensor(i, v, torch.Size([len(d), self.data_loader.item_num]))
feed_dict[c] = history
feed_dict[lc] = [len(iids) for iids in d]
else:
lengths = [len(iids) for iids in d]
max_length = max(lengths)
new_d = np.array([(x + ([0] * (max_length - len(x)))) for x in d])
feed_dict[c] = utils.numpy_to_torch(new_d, gpu=False)
feed_dict[lc] = lengths
return feed_dict
def format_data_dict(self, df, model):
'\n 除了常规的uid,iid,label,user、item、context特征外,还需处理历史交互\n :param df: 训练、验证、测试df\n :param model: Model类\n :return:\n \n Except for normal uid,iid,label,user, item, context features, also need to process history interactions\n :param df: training, validation, and testing df\n :param model: Model class\n :return:\n '
assert (C_HISTORY in df)
his_cs = [C_HISTORY]
if (C_HISTORY_NEG in df):
his_cs.append(C_HISTORY_NEG)
if (self.drop_first == 1):
for c in his_cs:
df = df[df[c].apply((lambda x: (len(x) > 0)))]
data_dict = DataProcessor.format_data_dict(self, df, model)
for c in his_cs:
his = df[c].apply((lambda x: eval((('[' + x) + ']'))))
data_dict[c] = his.values
return data_dict
|
class ProLogicDP(DataProcessor):
data_columns = [X]
info_columns = [SAMPLE_ID]
@staticmethod
def parse_dp_args(parser):
'\n 数据处理生成batch的命令行参数\n :param parser:\n :return:\n \n Command-line parameters to generate batches in data processing\n :param parser:\n :return:\n '
parser.add_argument('--shuffle_or', type=int, default=1, help='whether shuffle the or-list of each sent during training.')
parser.add_argument('--shuffle_and', type=int, default=1, help='whether shuffle the and-list of each sent during training.')
return DataProcessor.parse_dp_args(parser)
def __init__(self, shuffle_or, shuffle_and, *args, **kwargs):
self.shuffle_or = shuffle_or
self.shuffle_and = shuffle_and
DataProcessor.__init__(self, *args, **kwargs)
assert (self.rank == 0)
def get_feed_dict(self, data, batch_start, batch_size, train, neg_data=None, special_cols=None):
feed_dict = DataProcessor.get_feed_dict(self, data, batch_start, batch_size, train, neg_data=neg_data, special_cols=[X])
x = [[i for i in s] for s in feed_dict[X]]
if train:
if self.shuffle_and:
x = [[list(np.random.choice(o, size=len(o), replace=False)) for o in s] for s in x]
if self.shuffle_or:
for s in x:
np.random.shuffle(s)
max_or_length = max([len(s) for s in x])
for s in x:
while (len(s) < max_or_length):
s.append([])
max_and_length = max([max([len(s[i]) for s in x]) for i in range(max_or_length)])
x = [[(s[i] + ([0] * (max_and_length - len(s[i])))) for i in range(max_or_length)] for s in x]
or_length = [len(i) for i in x[0]]
feed_dict[X] = utils.numpy_to_torch(np.array(x), gpu=False)
feed_dict[K_OR_LENGTH] = or_length
return feed_dict
def format_data_dict(self, df, model):
data_loader = self.data_loader
data = {}
if (data_loader.label in df.columns):
data[Y] = np.array(df[data_loader.label], dtype=np.float32)
else:
logging.warning(('No Labels In Data: ' + data_loader.label))
data['Y'] = np.zeros(len(df), dtype=np.float32)
or_list = df[C_SENT].apply((lambda x: x.split('v')))
x_list = or_list.apply((lambda x: [i.split('^') for i in x]))
x_list = x_list.apply((lambda x: [[int(v.replace('~', '-')) for v in i] for i in x]))
data[X] = x_list.values
return data
|
class ProLogicRecDP(HistoryDP):
@staticmethod
def parse_dp_args(parser):
'\n 数据处理生成batch的命令行参数\n :param parser:\n :return:\n \n Command-line parameters to generate batches in data processing\n :param parser:\n :return:\n '
parser.add_argument('--shuffle_his', type=int, default=0, help='whether shuffle the his-list of each sent during training.')
return HistoryDP.parse_dp_args(parser)
def __init__(self, shuffle_his, *args, **kwargs):
self.shuffle_his = shuffle_his
HistoryDP.__init__(self, *args, **kwargs)
assert (self.sparse_his == 0)
def get_feed_dict(self, data, batch_start, batch_size, train, neg_data=None, special_cols=None):
'\n topn模型产生一个batch,如果是训练需要对每个正样本采样一个负样本,保证每个batch前一半是正样本,后一半是对应的负样本\n :param data: data dict,由self.get_*_data()和self.format_data_dict()系列函数产生\n :param batch_start: batch开始的index\n :param batch_size: batch大小\n :param train: 训练还是测试\n :param neg_data: 负例的data dict,如果已经有可以传入拿来用\n :param special_cols: 需要特殊处理的column\n :return: batch的feed dict\n \n topn model will produce a batch, if doing training then need to sample a negative example for each positive example, and garanttee that for each batch the first half are positive examples and the second half are negative examples\n :param data: data dict, produced by self.get_*_data() and self.format_data_dict() functions\n :param batch_start: starting index of the batch\n :param batch_size: batch size\n :param train: training or testing\n :param neg_data: data dict of negative examples, if alreay exist can use directly\n :param special_cols: columns that need special treatment\n :return: feed dict of the batch\n '
feed_dict = DataProcessor.get_feed_dict(self, data, batch_start, batch_size, train, neg_data=neg_data, special_cols=([C_HISTORY, C_HISTORY_NEG] if (special_cols is None) else ([C_HISTORY, C_HISTORY_NEG] + special_cols)))
assert (C_HISTORY_NEG not in feed_dict)
d = [[i for i in x] for x in feed_dict[C_HISTORY]]
if (train and self.shuffle_his):
d = [(list(np.random.choice(x, len(x), replace=False)) if (len(x) != 0) else []) for x in d]
lengths = [len(iids) for iids in d]
max_length = max(lengths)
new_d = np.array([(x + ([0] * (max_length - len(x)))) for x in d])
feed_dict[C_HISTORY] = utils.numpy_to_torch(new_d, gpu=False)
feed_dict[C_HISTORY_LENGTH] = lengths
return feed_dict
|
class RNNLogicDP(DataProcessor):
data_columns = [X]
info_columns = [SAMPLE_ID]
def __init__(self, *args, **kwargs):
DataProcessor.__init__(self, *args, **kwargs)
assert (self.rank == 0)
def get_feed_dict(self, data, batch_start, batch_size, train, neg_data=None, special_cols=None):
feed_dict = DataProcessor.get_feed_dict(self, data, batch_start, batch_size, train, neg_data=neg_data, special_cols=[X])
lengths = [len(seq) for seq in feed_dict[X]]
max_length = max(lengths)
new_d = np.array([(x + ([0] * (max_length - len(x)))) for x in feed_dict[X]])
feed_dict[X] = utils.numpy_to_torch(new_d, gpu=False)
feed_dict[K_S_LENGTH] = lengths
return feed_dict
def format_data_dict(self, df, model):
data_loader = self.data_loader
data = {}
if (data_loader.label in df.columns):
data[Y] = np.array(df[data_loader.label], dtype=np.float32)
else:
logging.warning(('No Labels In Data: ' + data_loader.label))
data[Y] = np.zeros(len(df), dtype=np.float32)
and_no = data_loader.variable_num
(or_no, not_no) = ((and_no + 1), (and_no + 2))
def sent_to_list(sent):
result = sent.replace('^', (',%d,' % and_no))
result = result.replace('v', (',%d,' % or_no))
result = result.replace('~', ('%d,' % not_no))
result = result.split(',')
result = [int(i) for i in result]
return result
elements = df[C_SENT].apply((lambda x: sent_to_list(x)))
data[X] = elements.values
assert (len(data[X]) == len(data[Y]))
return data
|
def format_5core(in_json, out_csv, label01=True):
records = []
for line in open(in_json, 'r'):
record = json.loads(line)
records.append(record)
out_df = pd.DataFrame()
out_df[UID] = [r['reviewerID'] for r in records]
out_df[IID] = [r['asin'] for r in records]
out_df[LABEL] = [r['overall'] for r in records]
out_df[TIME] = [r['unixReviewTime'] for r in records]
out_df = out_df.sort_values(by=[TIME, UID, IID])
out_df = out_df.drop_duplicates([UID, IID]).reset_index(drop=True)
uids = sorted(out_df[UID].unique())
uid_dict = dict(zip(uids, range(1, (len(uids) + 1))))
out_df[UID] = out_df[UID].apply((lambda x: uid_dict[x]))
iids = sorted(out_df[IID].unique())
iid_dict = dict(zip(iids, range(1, (len(iids) + 1))))
out_df[IID] = out_df[IID].apply((lambda x: iid_dict[x]))
if label01:
out_df[LABEL] = out_df[LABEL].apply((lambda x: (1 if (x > 3) else 0)))
print('label:', out_df[LABEL].min(), out_df[LABEL].max())
print(Counter(out_df[LABEL]))
out_df.to_csv(out_csv, sep='\t', index=False)
return out_df
|
def main():
all_data_file = os.path.join(DATA_DIR, 'reviews_Electronics01_5.csv')
format_5core(in_json=os.path.join(RAW_DATA, 'reviews_Electronics_5.json'), out_csv=all_data_file, label01=True)
dataset_name = '5Electronics01-1-5'
leave_out_by_time_csv(all_data_file, dataset_name, leave_n=1, warm_n=5)
return
|
def format_user_feature(out_file):
print('format_user_feature', USERS_FILE)
user_df = pd.read_csv(USERS_FILE, sep='|', header=None)
user_df = user_df[[0, 1, 2, 3]]
user_df.columns = [UID, 'u_age', 'u_gender', 'u_occupation']
(min_age, max_age) = (10, 60)
user_df['u_age'] = user_df['u_age'].apply((lambda x: (1 if (x < min_age) else (int((x / 5)) if (x <= max_age) else ((int((max_age / 5)) + 1) if (x > max_age) else 0)))))
user_df['u_gender'] = user_df['u_gender'].apply((lambda x: defaultdict(int, {'M': 1, 'F': 2})[x]))
occupation = {'none': 0, 'other': 1}
for o in user_df['u_occupation'].unique():
if (o not in occupation):
occupation[o] = len(occupation)
user_df['u_occupation'] = user_df['u_occupation'].apply((lambda x: defaultdict(int, occupation)[x]))
user_df.to_csv(out_file, index=False, sep='\t')
return user_df
|
def format_item_feature(out_file):
print('format_item_feature', ITEMS_FILE, out_file)
item_df = pd.read_csv(ITEMS_FILE, sep='|', header=None, encoding='ISO-8859-1')
item_df = item_df.drop([1, 3, 4], axis=1)
item_df.columns = [IID, 'i_year', 'i_Other', 'i_Action', 'i_Adventure', 'i_Animation', "i_Children's", 'i_Comedy', 'i_Crime', 'i_Documentary ', 'i_Drama ', 'i_Fantasy ', 'i_Film-Noir ', 'i_Horror ', 'i_Musical ', 'i_Mystery ', 'i_Romance ', 'i_Sci-Fi ', 'i_Thriller ', 'i_War ', 'i_Western']
item_df['i_year'] = item_df['i_year'].apply((lambda x: (int(str(x).split('-')[(- 1)]) if pd.notnull(x) else (- 1))))
seps = ([0, 1940, 1950, 1960, 1970, 1980, 1985] + list(range(1990, int((item_df['i_year'].max() + 2)))))
year_dict = {}
for (i, sep) in enumerate(seps[:(- 1)]):
for j in range(seps[i], seps[(i + 1)]):
year_dict[j] = (i + 1)
item_df['i_year'] = item_df['i_year'].apply((lambda x: defaultdict(int, year_dict)[x]))
for c in item_df.columns[2:]:
item_df[c] = (item_df[c] + 1)
item_df.to_csv(out_file, index=False, sep='\t')
return item_df
|
def format_all_inter(out_file, label01=True):
print('format_all_inter', RATINGS_FILE, out_file)
inter_df = pd.read_csv(RATINGS_FILE, sep='\t', header=None)
inter_df.columns = [UID, IID, LABEL, TIME]
inter_df = inter_df.sort_values(by=TIME)
inter_df = inter_df.drop_duplicates([UID, IID]).reset_index(drop=True)
if label01:
inter_df[LABEL] = inter_df[LABEL].apply((lambda x: (1 if (x > 3) else 0)))
print('label:', inter_df[LABEL].min(), inter_df[LABEL].max())
print(Counter(inter_df[LABEL]))
inter_df.to_csv(out_file, sep='\t', index=False)
return inter_df
|
def main():
format_user_feature(USER_FEATURE_FILE)
format_item_feature(ITEM_FEATURE_FILE)
format_all_inter(ALL_DATA_FILE, label01=True)
dataset_name = 'ml100k01-1-5'
leave_out_by_time_csv(ALL_DATA_FILE, dataset_name, leave_n=1, warm_n=5, u_f=USER_FEATURE_FILE, i_f=ITEM_FEATURE_FILE)
return
|
def random_variables(num=100):
values = np.random.randint(2, size=num)
variables = dict(zip(range(1, (num + 1)), values))
return variables
|
def random_logic_sent(variables, min_or=1, max_or=5, min_and=1, max_and=5, v_ps=None):
num_or = np.random.randint(min_or, (max_or + 1))
ors = []
for i in range(num_or):
num_and = np.random.randint(min_and, (max_and + 1))
ands = []
for j in range(num_and):
if_not = np.random.randint(2)
v = (np.random.randint(len(variables)) + 1)
if (if_not == 0):
ands.append(('~' + str(v)))
else:
ands.append(str(v))
ors.append('^'.join(ands))
sent = 'v'.join(ors)
return sent
|
def calcu_logic_sent(sent, variables):
ors = sent.split('v')
for i in ors:
ands = i.split('^')
tmp_ands = 0
for j in ands:
if_not = j.startswith('~')
j = (int(j) if (not if_not) else int(j[1:]))
tmp = (variables[j] if (not if_not) else (1 - variables[j]))
tmp_ands += tmp
if (tmp_ands == len(ands)):
return 1
return 0
|
def random_logic_dataset(sent_num=3000, variables_num=1000, min_or=1, max_or=5, min_and=1, max_and=5):
variables = random_variables(num=variables_num)
(sents, labels) = ([], [])
for i in tqdm(range(sent_num), leave=False, ncols=100, mininterval=1):
sent = random_logic_sent(variables, min_or=min_or, max_or=max_or, min_and=min_and, max_and=max_and)
label = calcu_logic_sent(sent, variables)
sents.append(sent)
labels.append(label)
dataset = pd.DataFrame(data=list(zip(sents, labels)), columns=[C_SENT, LABEL])
return (variables, dataset)
|
def main():
dataset_name = 'logic1k_3k-15-15'
(variables, dataset) = random_logic_dataset(variables_num=1000, sent_num=3000, min_and=1, max_and=5, min_or=1, max_or=5)
print(dataset)
dataset_dir = os.path.join(DATASET_DIR, dataset_name)
if (not os.path.exists(dataset_dir)):
os.mkdir(dataset_dir)
all_data_file = os.path.join(dataset_dir, (dataset_name + '.all.csv'))
dataset.to_csv(all_data_file, index=False, sep='\t')
variables = pd.DataFrame(data=list(variables.items()), columns=['variable', 'value']).sort_values('variable').reset_index(drop=True)
variables.to_csv(os.path.join(dataset_dir, (dataset_name + '.variable.csv')), index=False, sep='\t')
random_split_data(all_data_file, dataset_name=dataset_name)
return
|
def build_run_environment(para_dict, dl_name, dp_name, model_name, runner_name):
if (type(para_dict) is str):
para_dict = eval(para_dict)
if (type(dl_name) is str):
dl_name = eval(dl_name)
if (type(dp_name) is str):
dp_name = eval(dp_name)
if (type(model_name) is str):
model_name = eval(model_name)
if (type(runner_name) is str):
runner_name = eval(runner_name)
torch.manual_seed(para_dict['random_seed'])
torch.cuda.manual_seed(para_dict['random_seed'])
np.random.seed(para_dict['random_seed'])
os.environ['CUDA_VISIBLE_DEVICES'] = para_dict['gpu']
logging.info(('# cuda devices: %d' % torch.cuda.device_count()))
para_dict['load_data'] = True
dl_paras = utils.get_init_paras_dict(dl_name, para_dict)
logging.info(((str(dl_name) + ': ') + str(dl_paras)))
data_loader = dl_name(**dl_paras)
if ('all_his' in para_dict):
data_loader.append_his(all_his=para_dict['all_his'], max_his=para_dict['max_his'], neg_his=para_dict['neg_his'], neg_column=para_dict['neg_column'])
if (para_dict['rank'] == 1):
data_loader.label_01()
if (para_dict['drop_neg'] == 1):
data_loader.drop_neg()
para_dict['data_loader'] = data_loader
dp_paras = utils.get_init_paras_dict(dp_name, para_dict)
logging.info(((str(dp_name) + ': ') + str(dp_paras)))
data_processor = dp_name(**dp_paras)
data_processor.get_train_data(epoch=(- 1), model=model_name)
data_processor.get_validation_data(model=model_name)
data_processor.get_test_data(model=model_name)
(features, feature_dims, feature_min, feature_max) = data_loader.feature_info(include_id=model_name.include_id, include_item_features=model_name.include_item_features, include_user_features=model_name.include_user_features)
(para_dict['feature_num'], para_dict['feature_dims']) = (len(features), feature_dims)
para_dict['user_feature_num'] = len([f for f in features if f.startswith('u_')])
para_dict['item_feature_num'] = len([f for f in features if f.startswith('i_')])
para_dict['context_feature_num'] = len([f for f in features if f.startswith('c_')])
data_loader_vars = vars(data_loader)
for key in data_loader_vars:
if (key not in para_dict):
para_dict[key] = data_loader_vars[key]
model_paras = utils.get_init_paras_dict(model_name, para_dict)
logging.info(((str(model_name) + ': ') + str(model_paras)))
model = model_name(**model_paras)
model.load_model()
if (torch.cuda.device_count() > 0):
model = model.cuda()
runner_paras = utils.get_init_paras_dict(runner_name, para_dict)
logging.info(((str(runner_name) + ': ') + str(runner_paras)))
runner = runner_name(**runner_paras)
return (data_loader, data_processor, model, runner)
|
def main():
init_parser = argparse.ArgumentParser(description='Model', add_help=False)
init_parser.add_argument('--rank', type=int, default=1, help='1=ranking, 0=rating/click')
init_parser.add_argument('--data_loader', type=str, default='', help='Choose data_loader')
init_parser.add_argument('--model_name', type=str, default='BaseModel', help='Choose model to run.')
init_parser.add_argument('--runner_name', type=str, default='', help='Choose runner')
init_parser.add_argument('--data_processor', type=str, default='', help='Choose runner')
(init_args, init_extras) = init_parser.parse_known_args()
model_name = eval(init_args.model_name)
if (init_args.data_loader == ''):
init_args.data_loader = model_name.data_loader
data_loader_name = eval(init_args.data_loader)
if (init_args.data_processor == ''):
init_args.data_processor = model_name.data_processor
data_processor_name = eval(init_args.data_processor)
if (init_args.runner_name == ''):
init_args.runner_name = model_name.runner
runner_name = eval(init_args.runner_name)
parser = argparse.ArgumentParser(description='')
parser = utils.parse_global_args(parser)
parser = data_loader_name.parse_data_args(parser)
parser = model_name.parse_model_args(parser, model_name=init_args.model_name)
parser = runner_name.parse_runner_args(parser)
parser = data_processor_name.parse_dp_args(parser)
(origin_args, extras) = parser.parse_known_args()
paras = sorted(vars(origin_args).items(), key=(lambda kv: kv[0]))
log_name_exclude = ['check_epoch', 'eval_batch_size', 'gpu', 'label', 'load', 'log_file', 'metrics', 'model_path', 'path', 'pre_gpu', 'result_file', 'sep', 'seq_sep', 'train', 'unlabel_test', 'verbose', 'dataset', 'random_seed']
log_file_name = ([(str(init_args.rank) + str(origin_args.drop_neg)), init_args.model_name, origin_args.dataset, str(origin_args.random_seed)] + [(p[0].replace('_', '')[:3] + str(p[1])) for p in paras if (p[0] not in log_name_exclude)])
log_file_name = [l.replace(' ', '-').replace('_', '-') for l in log_file_name]
log_file_name = '_'.join(log_file_name)
if (origin_args.log_file == os.path.join(LOG_DIR, 'log.txt')):
origin_args.log_file = os.path.join(LOG_DIR, ('%s/%s.txt' % (init_args.model_name, log_file_name)))
utils.check_dir_and_mkdir(origin_args.log_file)
if (origin_args.result_file == os.path.join(RESULT_DIR, 'result.npy')):
origin_args.result_file = os.path.join(RESULT_DIR, ('%s/%s.npy' % (init_args.model_name, log_file_name)))
utils.check_dir_and_mkdir(origin_args.result_file)
if (origin_args.model_path == os.path.join(MODEL_DIR, ('%s/%s.pt' % (init_args.model_name, init_args.model_name)))):
origin_args.model_path = os.path.join(MODEL_DIR, ('%s/%s.pt' % (init_args.model_name, log_file_name)))
utils.check_dir_and_mkdir(origin_args.model_path)
args = copy.deepcopy(origin_args)
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(filename=args.log_file, level=args.verbose)
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(vars(init_args))
logging.info(vars(origin_args))
logging.info(extras)
logging.info(('DataLoader: ' + init_args.data_loader))
logging.info(('Model: ' + init_args.model_name))
logging.info(('Runner: ' + init_args.runner_name))
logging.info(('DataProcessor: ' + init_args.data_processor))
torch.manual_seed(args.random_seed)
torch.cuda.manual_seed(args.random_seed)
np.random.seed(args.random_seed)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
logging.info(('# cuda devices: %d' % torch.cuda.device_count()))
args.load_data = True
dl_para_dict = utils.get_init_paras_dict(data_loader_name, vars(args))
logging.info(((init_args.data_loader + ': ') + str(dl_para_dict)))
data_loader = data_loader_name(**dl_para_dict)
if ('all_his' in origin_args):
data_loader.append_his(all_his=origin_args.all_his, max_his=origin_args.max_his, neg_his=origin_args.neg_his, neg_column=origin_args.neg_column)
if (init_args.rank == 1):
data_loader.label_01()
if (origin_args.drop_neg == 1):
data_loader.drop_neg()
(args.data_loader, args.rank) = (data_loader, init_args.rank)
dp_para_dict = utils.get_init_paras_dict(data_processor_name, vars(args))
logging.info(((init_args.data_processor + ': ') + str(dp_para_dict)))
data_processor = data_processor_name(**dp_para_dict)
data_processor.get_train_data(epoch=(- 1), model=model_name)
data_processor.get_validation_data(model=model_name)
data_processor.get_test_data(model=model_name)
(features, feature_dims, feature_min, feature_max) = data_loader.feature_info(include_id=model_name.include_id, include_item_features=model_name.include_item_features, include_user_features=model_name.include_user_features)
(args.feature_num, args.feature_dims) = (len(features), feature_dims)
args.user_feature_num = len([f for f in features if f.startswith('u_')])
args.item_feature_num = len([f for f in features if f.startswith('i_')])
args.context_feature_num = len([f for f in features if f.startswith('c_')])
data_loader_vars = vars(data_loader)
for key in data_loader_vars:
if (key not in args.__dict__):
args.__dict__[key] = data_loader_vars[key]
model_para_dict = utils.get_init_paras_dict(model_name, vars(args))
logging.info(((init_args.model_name + ': ') + str(model_para_dict)))
model = model_name(**model_para_dict)
model.apply(model.init_paras)
if (torch.cuda.device_count() > 0):
model = model.cuda()
runner_para_dict = utils.get_init_paras_dict(runner_name, vars(args))
logging.info(((init_args.runner_name + ': ') + str(runner_para_dict)))
runner = runner_name(**runner_para_dict)
logging.info(((('Test Before Training: train= %s validation= %s test= %s' % (utils.format_metric(runner.evaluate(model, data_processor.get_train_data(epoch=(- 1), model=model), data_processor)), utils.format_metric(runner.evaluate(model, data_processor.get_validation_data(model=model), data_processor)), (utils.format_metric(runner.evaluate(model, data_processor.get_test_data(model=model), data_processor)) if (args.unlabel_test == 0) else '-1'))) + ' ') + ','.join(runner.metrics)))
if (args.load > 0):
model.load_model()
if (args.train > 0):
runner.train(model, data_processor)
train_result = runner.predict(model, data_processor.get_train_data(epoch=(- 1), model=model), data_processor)
validation_result = runner.predict(model, data_processor.get_validation_data(model=model), data_processor)
test_result = runner.predict(model, data_processor.get_test_data(model=model), data_processor)
np.save(args.result_file.replace('.npy', '__train.npy'), train_result)
np.save(args.result_file.replace('.npy', '__validation.npy'), validation_result)
np.save(args.result_file.replace('.npy', '__test.npy'), test_result)
logging.info(('Save Results to ' + args.result_file))
all_metrics = ['rmse', 'mae', 'auc', 'f1', 'accuracy', 'precision', 'recall']
if (init_args.rank == 1):
all_metrics = (((['ndcg@1', 'ndcg@5', 'ndcg@10', 'ndcg@20', 'ndcg@50', 'ndcg@100'] + ['hit@1', 'hit@5', 'hit@10', 'hit@20', 'hit@50', 'hit@100']) + ['precision@1', 'precision@5', 'precision@10', 'precision@20', 'precision@50', 'precision@100']) + ['recall@1', 'recall@5', 'recall@10', 'recall@20', 'recall@50', 'recall@100'])
results = [train_result, validation_result, test_result]
name_map = ['Train', 'Valid', 'Test']
datasets = [data_processor.get_train_data(epoch=(- 1), model=model), data_processor.get_validation_data(model=model)]
if (args.unlabel_test != 1):
datasets.append(data_processor.get_test_data(model=model))
for (i, dataset) in enumerate(datasets):
metrics = model.evaluate_method(results[i], datasets[i], metrics=all_metrics, error_skip=True)
log_info = ('Test After Training on %s: ' % name_map[i])
log_metrics = [('%s=%s' % (metric, utils.format_metric(metrics[j]))) for (j, metric) in enumerate(all_metrics)]
log_info += ', '.join(log_metrics)
logging.info(((os.linesep + log_info) + os.linesep))
if (args.verbose <= logging.DEBUG):
if (args.unlabel_test == 0):
logging.debug(runner.evaluate(model, data_processor.get_test_data(model=model), data_processor))
logging.debug(runner.evaluate(model, data_processor.get_test_data(model=model), data_processor))
else:
logging.debug(runner.evaluate(model, data_processor.get_validation_data(model=model), data_processor))
logging.debug(runner.evaluate(model, data_processor.get_validation_data(model=model), data_processor))
logging.info(('# of params: %d' % model.total_parameters))
logging.info(vars(origin_args))
logging.info(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
return
|
class BiasedMF(RecModel):
def _init_weights(self):
self.uid_embeddings = torch.nn.Embedding(self.user_num, self.ui_vector_size)
self.iid_embeddings = torch.nn.Embedding(self.item_num, self.ui_vector_size)
self.user_bias = torch.nn.Embedding(self.user_num, 1)
self.item_bias = torch.nn.Embedding(self.item_num, 1)
self.global_bias = torch.nn.Parameter(torch.tensor(0.1))
self.l2_embeddings = ['uid_embeddings', 'iid_embeddings', 'user_bias', 'item_bias']
def predict(self, feed_dict):
(check_list, embedding_l2) = ([], [])
u_ids = feed_dict[UID]
i_ids = feed_dict[IID]
u_bias = self.user_bias(u_ids).view([(- 1)])
i_bias = self.item_bias(i_ids).view([(- 1)])
embedding_l2.extend([u_bias, i_bias])
cf_u_vectors = self.uid_embeddings(u_ids)
cf_i_vectors = self.iid_embeddings(i_ids)
embedding_l2.extend([cf_u_vectors, cf_i_vectors])
prediction = (cf_u_vectors * cf_i_vectors).sum(dim=1).view([(- 1)])
prediction = (((prediction + u_bias) + i_bias) + self.global_bias)
check_list.append(('prediction', prediction))
out_dict = {PREDICTION: prediction, CHECK: check_list, EMBEDDING_L2: embedding_l2}
return out_dict
|
class CNNLogic(DeepModel):
include_id = False
include_user_features = False
include_item_features = False
include_context_features = False
data_loader = 'ProLogicDL'
data_processor = 'RNNLogicDP'
@staticmethod
def parse_model_args(parser, model_name='CNNLogic'):
parser.add_argument('--filter_size', type=str, default='[2,4,6,8]', help='list or int, means the size of filters')
parser.add_argument('--filter_num', type=str, default='[16,16,16,16]', help='list or int, means the number of filters')
parser.add_argument('--pooling', type=str, default='sum', help='Pooling type: sum, min, max, mean')
return DeepModel.parse_model_args(parser, model_name)
def __init__(self, filter_size, filter_num, pooling, variable_num, feature_num=(- 1), *args, **kwargs):
self.pooling = pooling.lower()
self.filter_size = (filter_size if (type(filter_size) == list) else eval(filter_size))
self.filter_num = (filter_num if (type(filter_num) == list) else eval(filter_num))
if (type(self.filter_size) is int):
self.filter_size = [self.filter_size]
if (type(self.filter_num) is int):
self.filter_num = [self.filter_num]
assert (len(self.filter_size) == len(self.filter_num))
assert (len(self.filter_size) > 0)
DeepModel.__init__(self, *args, feature_num=(variable_num + 3), **kwargs)
assert (self.label_min == 0)
assert (self.label_max == 1)
def _init_weights(self):
self.feature_embeddings = torch.nn.Embedding(self.feature_num, self.f_vector_size)
self.l2_embeddings = ['feature_embeddings']
for (i, size) in enumerate(self.filter_size):
setattr(self, ('conv_%d' % size), torch.nn.Conv2d(in_channels=1, out_channels=self.filter_num[i], kernel_size=(size, self.f_vector_size)))
pre_size = sum(self.filter_num)
for (i, layer_size) in enumerate(self.layers):
setattr(self, ('layer_%d' % i), torch.nn.Linear(pre_size, layer_size))
pre_size = layer_size
self.prediction = torch.nn.Linear(pre_size, 1)
def predict(self, feed_dict):
(check_list, embedding_l2) = ([], [])
lengths = feed_dict[K_S_LENGTH]
sents = feed_dict[X]
valid_words = sents.gt(0).long()
sent_lengths = valid_words.sum(dim=(- 1))
sents_vectors = (self.feature_embeddings(sents) * valid_words.unsqueeze(dim=(- 1)).float())
sents_vectors = sents_vectors.unsqueeze(dim=1)
conv_vectors = []
for (i, size) in enumerate(self.filter_size):
conv2d = getattr(self, ('conv_%d' % size))(sents_vectors).squeeze(dim=(- 1))
if (self.pooling == 'sum'):
conv_vector = conv2d.sum(dim=(- 1))
elif (self.pooling == 'max'):
conv_vector = conv2d.max(dim=(- 1))
elif (self.pooling == 'min'):
conv_vector = conv2d.min(dim=(- 1))
else:
conv_vector = conv2d.mean(dim=(- 1))
conv_vectors.append(conv_vector)
pre_layer = torch.cat(conv_vectors, dim=(- 1))
for i in range(0, len(self.layers)):
pre_layer = getattr(self, ('layer_%d' % i))(pre_layer)
pre_layer = F.relu(pre_layer)
pre_layer = torch.nn.Dropout(p=feed_dict[DROPOUT])(pre_layer)
prediction = self.prediction(pre_layer).sigmoid().view([(- 1)])
out_dict = {PREDICTION: prediction, CHECK: check_list, EMBEDDING_L2: embedding_l2}
return out_dict
def forward(self, feed_dict):
"\n 除了预测之外,还计算loss\n :param feed_dict: 模型输入,是个dict\n :return: 输出,是个dict,prediction是预测值,check是需要检查的中间结果,loss是损失\n \n Except for making predictions, also compute the loss\n :param feed_dict: model input, it's a dict\n :return: output, it's a dict, prediction is the predicted value, check means needs to check the intermediate result, loss is the loss\n "
out_dict = self.predict(feed_dict)
check_list = out_dict[CHECK]
(prediction, label) = (out_dict[PREDICTION], feed_dict[Y])
check_list.append(('prediction', prediction))
check_list.append(('label', label))
if (self.loss_sum == 1):
loss = torch.nn.BCELoss(reduction='sum')(prediction, label)
else:
loss = torch.nn.MSELoss(reduction='mean')(prediction, label)
out_dict[LOSS] = loss
out_dict[LOSS_L2] = self.l2(out_dict)
out_dict[CHECK] = check_list
return out_dict
|
class DeepModel(BaseModel):
@staticmethod
def parse_model_args(parser, model_name='DeepModel'):
parser.add_argument('--f_vector_size', type=int, default=64, help='Size of feature vectors.')
parser.add_argument('--layers', type=str, default='[64]', help='Size of each layer.')
return BaseModel.parse_model_args(parser, model_name)
def __init__(self, feature_dims, f_vector_size, layers, *args, **kwargs):
self.feature_dims = feature_dims
self.f_vector_size = f_vector_size
self.layers = (layers if (type(layers) == list) else eval(layers))
BaseModel.__init__(self, *args, **kwargs)
def _init_weights(self):
self.feature_embeddings = torch.nn.Embedding(self.feature_dims, self.f_vector_size)
self.l2_embeddings = ['feature_embeddings']
pre_size = (self.f_vector_size * self.feature_num)
for (i, layer_size) in enumerate(self.layers):
setattr(self, ('layer_%d' % i), torch.nn.Linear(pre_size, layer_size))
setattr(self, ('bn_%d' % i), torch.nn.BatchNorm1d(layer_size))
pre_size = layer_size
self.prediction = torch.nn.Linear(pre_size, 1)
def predict(self, feed_dict):
(check_list, embedding_l2) = ([], [])
nonzero_embeddings = self.feature_embeddings(feed_dict[X])
embedding_l2.append(nonzero_embeddings)
pre_layer = nonzero_embeddings.view([(- 1), (self.feature_num * self.f_vector_size)])
for i in range(0, len(self.layers)):
pre_layer = getattr(self, ('layer_%d' % i))(pre_layer)
pre_layer = getattr(self, ('bn_%d' % i))(pre_layer)
pre_layer = F.relu(pre_layer)
pre_layer = torch.nn.Dropout(p=feed_dict[DROPOUT])(pre_layer)
prediction = F.relu(self.prediction(pre_layer)).view([(- 1)])
out_dict = {PREDICTION: prediction, CHECK: check_list, EMBEDDING_L2: embedding_l2}
return out_dict
|
class GRU4Rec(RecModel):
data_processor = 'HistoryDP'
@staticmethod
def parse_model_args(parser, model_name='GRU4Rec'):
parser.add_argument('--hidden_size', type=int, default=64, help='Size of hidden vectors in GRU.')
parser.add_argument('--num_layers', type=int, default=1, help='Number of GRU layers.')
parser.add_argument('--p_layers', type=str, default='[64]', help='Size of each layer.')
parser.add_argument('--neg_emb', type=int, default=1, help='Whether use negative interaction embeddings.')
parser.add_argument('--neg_layer', type=str, default='[]', help='Whether use a neg_layer to transfer negative interaction embeddings. [] means using -v. It is ignored when neg_emb=1')
return RecModel.parse_model_args(parser, model_name)
def __init__(self, neg_emb, neg_layer, hidden_size, num_layers, p_layers, *args, **kwargs):
self.neg_emb = neg_emb
self.hidden_size = hidden_size
self.num_layers = num_layers
self.p_layers = (p_layers if (type(p_layers) == list) else eval(p_layers))
self.neg_layer = (neg_layer if (type(neg_layer) == list) else eval(neg_layer))
RecModel.__init__(self, *args, **kwargs)
def _init_weights(self):
self.iid_embeddings = torch.nn.Embedding(self.item_num, self.ui_vector_size)
self.l2_embeddings = ['iid_embeddings']
if (self.neg_emb == 1):
self.iid_embeddings_neg = torch.nn.Embedding(self.item_num, self.ui_vector_size)
self.l2_embeddings.append('iid_embeddings_neg')
elif (len(self.neg_layer) > 0):
pre_size = self.ui_vector_size
for (i, layer_size) in enumerate(self.neg_layer):
setattr(self, ('neg_layer_%d' % i), torch.nn.Linear(pre_size, layer_size))
pre_size = layer_size
self.neg_layer_out = torch.nn.Linear(pre_size, self.ui_vector_size)
self.rnn = torch.nn.GRU(input_size=self.ui_vector_size, hidden_size=self.hidden_size, batch_first=True, num_layers=self.num_layers)
self.out = torch.nn.Linear(self.hidden_size, self.ui_vector_size, bias=False)
def predict(self, feed_dict):
(check_list, embedding_l2) = ([], [])
i_ids = feed_dict[IID]
history = feed_dict[C_HISTORY]
lengths = feed_dict[C_HISTORY_LENGTH]
valid_his = history.abs().gt(0).long()
his_pos_neg = history.ge(0).unsqueeze((- 1)).float()
his_length = valid_his.sum(dim=(- 1))
pos_his_vectors = (self.iid_embeddings((history.abs() * valid_his)) * valid_his.unsqueeze(dim=(- 1)).float())
if (self.neg_emb == 1):
neg_his_vectors = (self.iid_embeddings_neg((history.abs() * valid_his)) * valid_his.unsqueeze(dim=(- 1)).float())
his_vectors = ((pos_his_vectors * his_pos_neg) + (((- his_pos_neg) + 1) * neg_his_vectors))
elif (len(self.neg_layer) > 1):
pre_layer = pos_his_vectors
for i in range(0, len(self.neg_layer)):
pre_layer = getattr(self, ('neg_layer_%d' % i))(pre_layer)
pre_layer = F.relu(pre_layer)
neg_his_vectors = self.neg_layer_out(pre_layer)
his_vectors = ((pos_his_vectors * his_pos_neg) + (((- his_pos_neg) + 1) * neg_his_vectors))
his_vectors = (his_vectors * valid_his.unsqueeze(dim=(- 1)).float())
else:
his_vectors = (((his_pos_neg - 0.5) * 2) * pos_his_vectors)
embedding_l2.append(his_vectors)
(sort_his_lengths, sort_idx) = torch.topk(his_length, k=len(lengths))
sort_his_vectors = his_vectors.index_select(dim=0, index=sort_idx)
history_packed = torch.nn.utils.rnn.pack_padded_sequence(sort_his_vectors, sort_his_lengths, batch_first=True)
(output, hidden) = self.rnn(history_packed, None)
sort_rnn_vector = self.out(hidden[(- 1)])
unsort_idx = torch.topk(sort_idx, k=len(lengths), largest=False)[1]
rnn_vector = sort_rnn_vector.index_select(dim=0, index=unsort_idx)
cf_i_vectors = self.iid_embeddings(i_ids)
embedding_l2.append(cf_i_vectors)
prediction = (rnn_vector * cf_i_vectors).sum(dim=1).view([(- 1)])
check_list.append(('prediction', prediction))
out_dict = {PREDICTION: prediction, CHECK: check_list, EMBEDDING_L2: embedding_l2}
return out_dict
|
class NARM(GRU4Rec):
data_processor = 'HistoryDP'
@staticmethod
def parse_model_args(parser, model_name='NARM'):
parser.add_argument('--attention_size', type=int, default=16, help='Size of attention hidden space.')
return GRU4Rec.parse_model_args(parser, model_name)
def __init__(self, attention_size, *args, **kwargs):
self.attention_size = attention_size
GRU4Rec.__init__(self, *args, **kwargs)
def _init_weights(self):
self.iid_embeddings = torch.nn.Embedding(self.item_num, self.ui_vector_size)
self.l2_embeddings = ['iid_embeddings']
if (self.neg_emb == 1):
self.iid_embeddings_neg = torch.nn.Embedding(self.item_num, self.ui_vector_size)
self.l2_embeddings.append('iid_embeddings_neg')
elif (len(self.neg_layer) > 0):
pre_size = self.ui_vector_size
for (i, layer_size) in enumerate(self.neg_layer):
setattr(self, ('neg_layer_%d' % i), torch.nn.Linear(pre_size, layer_size))
pre_size = layer_size
self.neg_layer_out = torch.nn.Linear(pre_size, self.ui_vector_size)
self.encoder_g = torch.nn.GRU(input_size=self.ui_vector_size, hidden_size=self.hidden_size, batch_first=True, num_layers=self.num_layers)
self.encoder_l = torch.nn.GRU(input_size=self.ui_vector_size, hidden_size=self.hidden_size, batch_first=True, num_layers=self.num_layers)
self.A1 = torch.nn.Linear(self.hidden_size, self.attention_size, bias=False)
self.A2 = torch.nn.Linear(self.hidden_size, self.attention_size, bias=False)
self.attention_out = torch.nn.Linear(self.attention_size, 1, bias=False)
self.out = torch.nn.Linear((2 * self.hidden_size), self.ui_vector_size, bias=False)
def predict(self, feed_dict):
(check_list, embedding_l2) = ([], [])
i_ids = feed_dict[IID]
history = feed_dict[C_HISTORY]
lengths = feed_dict[C_HISTORY_LENGTH]
valid_his = history.abs().gt(0).long()
his_pos_neg = history.ge(0).unsqueeze((- 1)).float()
his_length = valid_his.sum(dim=(- 1))
pos_his_vectors = (self.iid_embeddings((history.abs() * valid_his)) * valid_his.unsqueeze(dim=(- 1)).float())
if (self.neg_emb == 1):
neg_his_vectors = (self.iid_embeddings_neg((history.abs() * valid_his)) * valid_his.unsqueeze(dim=(- 1)).float())
his_vectors = ((pos_his_vectors * his_pos_neg) + (((- his_pos_neg) + 1) * neg_his_vectors))
elif (len(self.neg_layer) > 1):
pre_layer = pos_his_vectors
for i in range(0, len(self.neg_layer)):
pre_layer = getattr(self, ('neg_layer_%d' % i))(pre_layer)
pre_layer = F.relu(pre_layer)
neg_his_vectors = self.neg_layer_out(pre_layer)
his_vectors = ((pos_his_vectors * his_pos_neg) + (((- his_pos_neg) + 1) * neg_his_vectors))
his_vectors = (his_vectors * valid_his.unsqueeze(dim=(- 1)).float())
else:
his_vectors = (((his_pos_neg - 0.5) * 2) * pos_his_vectors)
embedding_l2.append(his_vectors)
(sort_his_lengths, sort_idx) = torch.topk(his_length, k=len(lengths))
sort_his_vectors = his_vectors.index_select(dim=0, index=sort_idx)
history_packed = torch.nn.utils.rnn.pack_padded_sequence(sort_his_vectors, sort_his_lengths, batch_first=True)
(_, hidden_g) = self.encoder_g(history_packed, None)
(output_l, hidden_l) = self.encoder_l(history_packed, None)
(output_l, _) = torch.nn.utils.rnn.pad_packed_sequence(output_l, batch_first=True)
unsort_idx = torch.topk(sort_idx, k=len(lengths), largest=False)[1]
output_l = output_l.index_select(dim=0, index=unsort_idx)
hidden_g = hidden_g[(- 1)].index_select(dim=0, index=unsort_idx)
attention_l = self.A2(output_l)
attention_g = self.A1(hidden_g)
attention_value = self.attention_out(torch.sigmoid((attention_g.unsqueeze(1) + attention_l)))
exp_att_v = (attention_value.exp() * valid_his.unsqueeze((- 1)).float())
exp_att_v_sum = exp_att_v.sum(dim=1, keepdim=True)
attention_value = (exp_att_v / exp_att_v_sum)
c_l = (attention_value * output_l).sum(1)
pred_vector = self.out(torch.cat((hidden_g, c_l), dim=1))
cf_i_vectors = self.iid_embeddings(i_ids)
embedding_l2.append(cf_i_vectors)
prediction = (pred_vector * cf_i_vectors).sum(dim=(- 1)).view([(- 1)])
check_list.append(('prediction', prediction))
out_dict = {PREDICTION: prediction, CHECK: check_list, EMBEDDING_L2: embedding_l2}
return out_dict
|
class RNNLogic(DeepModel):
include_id = False
include_user_features = False
include_item_features = False
include_context_features = False
data_loader = 'ProLogicDL'
data_processor = 'RNNLogicDP'
@staticmethod
def parse_model_args(parser, model_name='RNNLogic'):
parser.add_argument('--rnn_type', type=str, default='LSTM', help='RNN/LSTM/GRU.')
parser.add_argument('--rnn_bi', type=int, default=0, help='1=bi-rnn/lstm/gru')
return DeepModel.parse_model_args(parser, model_name)
def __init__(self, rnn_type, rnn_bi, variable_num, feature_num=(- 1), *args, **kwargs):
self.rnn_type = rnn_type.lower()
self.rnn_bi = rnn_bi
DeepModel.__init__(self, *args, feature_num=(variable_num + 3), **kwargs)
assert (self.label_min == 0)
assert (self.label_max == 1)
def _init_weights(self):
self.feature_embeddings = torch.nn.Embedding(self.feature_num, self.f_vector_size)
self.l2_embeddings = ['feature_embeddings']
if (self.rnn_type == 'gru'):
self.encoder = torch.nn.GRU(input_size=self.f_vector_size, hidden_size=self.f_vector_size, batch_first=True, bidirectional=(self.rnn_bi == 1))
elif (self.rnn_type == 'lstm'):
self.encoder = torch.nn.LSTM(input_size=self.f_vector_size, hidden_size=self.f_vector_size, batch_first=True, bidirectional=(self.rnn_bi == 1))
else:
self.encoder = torch.nn.RNN(input_size=self.f_vector_size, hidden_size=self.f_vector_size, batch_first=True, bidirectional=(self.rnn_bi == 1))
if (self.rnn_bi == 1):
pre_size = (self.f_vector_size * 2)
else:
pre_size = self.f_vector_size
for (i, layer_size) in enumerate(self.layers):
setattr(self, ('layer_%d' % i), torch.nn.Linear(pre_size, layer_size))
pre_size = layer_size
self.prediction = torch.nn.Linear(pre_size, 1)
def predict(self, feed_dict):
(check_list, embedding_l2) = ([], [])
lengths = feed_dict[K_S_LENGTH]
sents = feed_dict[X]
valid_words = sents.gt(0).long()
sent_lengths = valid_words.sum(dim=(- 1))
sents_vectors = (self.feature_embeddings(sents) * valid_words.unsqueeze(dim=(- 1)).float())
(sort_sent_lengths, sort_idx) = torch.topk(sent_lengths, k=len(lengths))
sort_sent_vectors = sents_vectors.index_select(dim=0, index=sort_idx)
sents_packed = torch.nn.utils.rnn.pack_padded_sequence(sort_sent_vectors, sort_sent_lengths, batch_first=True)
if (self.rnn_type == 'lstm'):
(output_rnn, (hidden_rnn, _)) = self.encoder(sents_packed, None)
else:
(output_rnn, hidden_rnn) = self.encoder(sents_packed, None)
if (self.rnn_bi == 1):
sort_pre_layer = torch.cat((hidden_rnn[0], hidden_rnn[1]), dim=(- 1))
else:
sort_pre_layer = hidden_rnn[0]
unsort_idx = torch.topk(sort_idx, k=len(lengths), largest=False)[1]
pre_layer = sort_pre_layer.index_select(dim=0, index=unsort_idx)
for i in range(0, len(self.layers)):
pre_layer = getattr(self, ('layer_%d' % i))(pre_layer)
pre_layer = F.relu(pre_layer)
pre_layer = torch.nn.Dropout(p=feed_dict[DROPOUT])(pre_layer)
prediction = self.prediction(pre_layer).sigmoid().view([(- 1)])
out_dict = {PREDICTION: prediction, CHECK: check_list, EMBEDDING_L2: embedding_l2}
return out_dict
def forward(self, feed_dict):
"\n 除了预测之外,还计算loss\n :param feed_dict: 型输入,是个dict\n :return: 输出,是个dict,prediction是预测值,check是需要检查的中间结果,loss是损失\n \n Except for making predictions, also compute the loss\n :param feed_dict: model input, it's a dict\n :return: output, it's a dict, prediction is the predicted value, check means needs to check the intermediate result, loss is the loss\n "
out_dict = self.predict(feed_dict)
check_list = out_dict[CHECK]
(prediction, label) = (out_dict[PREDICTION], feed_dict[Y])
check_list.append(('prediction', prediction))
check_list.append(('label', label))
if (self.loss_sum == 1):
loss = torch.nn.BCELoss(reduction='sum')(prediction, label)
else:
loss = torch.nn.MSELoss(reduction='mean')(prediction, label)
out_dict[LOSS] = loss
out_dict[LOSS_L2] = self.l2(out_dict)
out_dict[CHECK] = check_list
return out_dict
|
class RecModel(BaseModel):
include_id = False
include_user_features = False
include_item_features = False
include_context_features = False
@staticmethod
def parse_model_args(parser, model_name='RecModel'):
parser.add_argument('--u_vector_size', type=int, default=64, help='Size of user vectors.')
parser.add_argument('--i_vector_size', type=int, default=64, help='Size of item vectors.')
return BaseModel.parse_model_args(parser, model_name)
def __init__(self, user_num, item_num, u_vector_size, i_vector_size, *args, **kwargs):
(self.u_vector_size, self.i_vector_size) = (u_vector_size, i_vector_size)
assert (self.u_vector_size == self.i_vector_size)
self.ui_vector_size = self.u_vector_size
self.user_num = user_num
self.item_num = item_num
BaseModel.__init__(self, *args, **kwargs)
def _init_weights(self):
self.uid_embeddings = torch.nn.Embedding(self.user_num, self.ui_vector_size)
self.iid_embeddings = torch.nn.Embedding(self.item_num, self.ui_vector_size)
self.l2_embeddings = ['uid_embeddings', 'iid_embeddings']
def predict(self, feed_dict):
(check_list, embedding_l2) = ([], [])
u_ids = feed_dict[UID]
i_ids = feed_dict[IID]
cf_u_vectors = self.uid_embeddings(u_ids)
cf_i_vectors = self.iid_embeddings(i_ids)
embedding_l2.extend([cf_u_vectors, cf_i_vectors])
prediction = (cf_u_vectors * cf_i_vectors).sum(dim=1).view([(- 1)])
out_dict = {PREDICTION: prediction, CHECK: check_list, EMBEDDING_L2: embedding_l2}
return out_dict
|
class SVDPP(RecModel):
data_processor = 'HistoryDP'
def _init_weights(self):
self.uid_embeddings = torch.nn.Embedding(self.user_num, self.ui_vector_size)
self.iid_embeddings = torch.nn.Embedding(self.item_num, self.ui_vector_size)
self.iid_embeddings_implicit = torch.nn.Embedding(self.item_num, self.ui_vector_size)
self.user_bias = torch.nn.Embedding(self.user_num, 1)
self.item_bias = torch.nn.Embedding(self.item_num, 1)
self.global_bias = torch.nn.Parameter(torch.tensor(0.1))
self.l2_embeddings = ['uid_embeddings', 'iid_embeddings', 'iid_embeddings_implicit', 'user_bias', 'item_bias']
def predict(self, feed_dict):
(check_list, embedding_l2) = ([], [])
u_ids = feed_dict[UID]
i_ids = feed_dict[IID]
history = feed_dict[C_HISTORY]
if ('sparse' in str(history.type())):
his_vector = history.mm(self.iid_embeddings_implicit.weight)
his_length = torch.sparse.sum(history, dim=(- 1)).to_dense()
else:
valid_his = history.gt(0).long()
his_vector = self.iid_embeddings_implicit((history * valid_his))
his_vector = (his_vector * valid_his.unsqueeze(dim=(- 1)).float()).sum(dim=1)
his_length = valid_his.sum(dim=(- 1))
valid_his = his_length.gt(0).float()
tmp_length = ((his_length.float() * valid_his) + ((1 - valid_his) * 1))
his_vector = (his_vector / tmp_length.sqrt().view([(- 1), 1]))
u_bias = self.user_bias(u_ids).view([(- 1)])
i_bias = self.item_bias(i_ids).view([(- 1)])
cf_u_vectors = self.uid_embeddings(u_ids)
cf_i_vectors = self.iid_embeddings(i_ids)
check_list.append(('cf_u_vectors', cf_u_vectors))
check_list.append(('his_vector', his_vector))
embedding_l2.extend([u_bias, i_bias, cf_u_vectors, cf_i_vectors])
prediction = ((cf_u_vectors + his_vector) * cf_i_vectors).sum(dim=1).view([(- 1)])
prediction = (((prediction + u_bias) + i_bias) + self.global_bias)
check_list.append(('prediction', prediction))
out_dict = {PREDICTION: prediction, CHECK: check_list, EMBEDDING_L2: embedding_l2}
return out_dict
|
class BaseRunner(object):
@staticmethod
def parse_runner_args(parser):
'\n 跑模型的命令行参数\n :param parser:\n :return:\n \n Command-line parameters to run the model\n :param parser:\n :return:\n '
parser.add_argument('--load', type=int, default=0, help='Whether load model and continue to train')
parser.add_argument('--epoch', type=int, default=100, help='Number of epochs.')
parser.add_argument('--check_epoch', type=int, default=1, help='Check every epochs.')
parser.add_argument('--early_stop', type=int, default=0, help='whether to early-stop.')
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate.')
parser.add_argument('--batch_size', type=int, default=128, help='Batch size during training.')
parser.add_argument('--eval_batch_size', type=int, default=(128 * 128), help='Batch size during testing.')
parser.add_argument('--dropout', type=float, default=0.2, help='Dropout probability for each deep layer')
parser.add_argument('--l2_bias', type=int, default=0, help='Whether add l2 regularizer on bias.')
parser.add_argument('--l2', type=float, default=1e-05, help='Weight of l2_regularize in pytorch optimizer.')
parser.add_argument('--l2s', type=float, default=0.0, help='Weight of l2_regularize in loss.')
parser.add_argument('--grad_clip', type=float, default=10, help='clip_grad_value_ para, -1 means, no clip')
parser.add_argument('--optimizer', type=str, default='Adam', help='optimizer: GD, Adam, Adagrad')
parser.add_argument('--metrics', type=str, default='RMSE', help='metrics: RMSE, MAE, AUC, F1, Accuracy, Precision, Recall')
parser.add_argument('--pre_gpu', type=int, default=0, help='Whether put all batches to gpu before run batches. If 0, dynamically put gpu for each batch.')
return parser
def __init__(self, optimizer='GD', lr=0.01, epoch=100, batch_size=128, eval_batch_size=(128 * 128), dropout=0.2, l2=1e-05, l2s=1e-05, l2_bias=0, grad_clip=10, metrics='RMSE', check_epoch=10, early_stop=1, pre_gpu=0):
'\n 初始化\n :param optimizer: 优化器名字\n :param lr: 学习率\n :param epoch: 总共跑几轮\n :param batch_size: 训练batch大小\n :param eval_batch_size: 测试batch大小\n :param dropout: dropout比例\n :param l2: l2权重\n :param metrics: 评价指标,逗号分隔\n :param check_epoch: 每几轮输出check一次模型中间的一些tensor\n :param early_stop: 是否自动提前终止训练\n \n Initialization\n :param optimizer: name of optimizer\n :param lr: learning rate\n :param epoch: how many epochs to run\n :param batch_size: training batch size\n :param eval_batch_size: testing batch size\n :param dropout: dropout ratio\n :param l2: wight of l2 regularizer\n :param metrics: evaluation metrics, seperated by comma\n :param check_epoch: every check_epoch rounds, output the intermediate result tensor of the model\n :param early_stop: if or not to do early stopping\n '
self.optimizer_name = optimizer
self.lr = lr
self.epoch = epoch
self.batch_size = batch_size
self.eval_batch_size = eval_batch_size
self.dropout = dropout
self.no_dropout = 0.0
self.l2_weight = l2
self.l2s_weight = l2s
self.l2_bias = l2_bias
self.grad_clip = grad_clip
self.pre_gpu = pre_gpu
self.metrics = metrics.lower().split(',')
self.check_epoch = check_epoch
self.early_stop = early_stop
self.time = None
(self.train_results, self.valid_results, self.test_results) = ([], [], [])
def _build_optimizer(self, model):
'\n 创建优化器\n :param model: 模型\n :return: 优化器\n \n Create the optimizer\n :param model: model\n :return: optimizer\n '
(weight_p, bias_p) = ([], [])
for (name, p) in model.named_parameters():
if (not p.requires_grad):
continue
if ('bias' in name):
bias_p.append(p)
else:
weight_p.append(p)
if (self.l2_bias == 1):
optimize_dict = [{'params': (weight_p + bias_p), 'weight_decay': self.l2_weight}]
else:
optimize_dict = [{'params': weight_p, 'weight_decay': self.l2_weight}, {'params': bias_p, 'weight_decay': 0.0}]
optimizer_name = self.optimizer_name.lower()
if (optimizer_name == 'gd'):
logging.info('Optimizer: GD')
optimizer = torch.optim.SGD(optimize_dict, lr=self.lr)
elif (optimizer_name == 'adagrad'):
logging.info('Optimizer: Adagrad')
optimizer = torch.optim.Adagrad(optimize_dict, lr=self.lr)
elif (optimizer_name == 'adam'):
logging.info('Optimizer: Adam')
optimizer = torch.optim.Adam(optimize_dict, lr=self.lr)
else:
logging.error(('Unknown Optimizer: ' + self.optimizer_name))
assert (self.optimizer_name in ['GD', 'Adagrad', 'Adam'])
optimizer = torch.optim.SGD(optimize_dict, lr=self.lr)
return optimizer
def _check_time(self, start=False):
'\n 记录时间用,self.time保存了[起始时间,上一步时间]\n :param start: 是否开始计时\n :return: 上一步到当前位置的时间\n \n Record the time, self.time records [starting time, time of last step]\n :param start: if or not to start time counting\n :return: the time to reach current position in the previous step\n '
if ((self.time is None) or start):
self.time = ([time()] * 2)
return self.time[0]
tmp_time = self.time[1]
self.time[1] = time()
return (self.time[1] - tmp_time)
def batches_add_control(self, batches, train):
'\n 向所有batch添加一些控制信息比如DROPOUT\n :param batches: 所有batch的list,由DataProcessor产生\n :param train: 是否是训练阶段\n :return: 所有batch的list\n \n Add some control information into all batches, such as DROPOUT\n :param batches: list of all batches, produced by DataProcessor\n :param train: if or not this is training stage\n :return: list of all batches\n '
for batch in batches:
batch[TRAIN] = train
batch[DROPOUT] = (self.dropout if train else self.no_dropout)
return batches
def predict(self, model, data, data_processor):
'\n 预测,不训练\n :param model: 模型\n :param data: 数据dict,由DataProcessor的self.get_*_data()和self.format_data_dict()系列函数产生\n :param data_processor: DataProcessor实例\n :return: prediction 拼接好的 np.array\n \n Predict, not training\n :param model: model\n :param data: data dict,produced by the self.get_*_data() and self.format_data_dict() function of DataProcessor\n :param data_processor: DataProcessor instance\n :return: prediction the concatenated np.array\n '
gc.collect()
batches = data_processor.prepare_batches(data, self.eval_batch_size, train=False, model=model)
batches = self.batches_add_control(batches, train=False)
if (self.pre_gpu == 1):
batches = [data_processor.batch_to_gpu(b) for b in batches]
model.eval()
predictions = []
for batch in tqdm(batches, leave=False, ncols=100, mininterval=1, desc='Predict'):
if (self.pre_gpu == 0):
batch = data_processor.batch_to_gpu(batch)
prediction = model.predict(batch)[PREDICTION]
predictions.append(prediction.detach().cpu().data.numpy())
predictions = np.concatenate(predictions)
sample_ids = np.concatenate([b[SAMPLE_ID] for b in batches])
reorder_dict = dict(zip(sample_ids, predictions))
predictions = np.array([reorder_dict[i] for i in data[SAMPLE_ID]])
gc.collect()
return predictions
def fit(self, model, data, data_processor, epoch=(- 1)):
'\n 训练\n :param model: 模型\n :param data: 数据dict,由DataProcessor的self.get_*_data()和self.format_data_dict()系列函数产生\n :param data_processor: DataProcessor实例\n :param epoch: 第几轮\n :return: 返回最后一轮的输出,可供self.check函数检查一些中间结果\n \n Training\n :param model: model\n :param data: data dict,produced by the self.get_*_data() and self.format_data_dict() function of DataProcessor\n :param data_processor: DataProcessor instance\n :param epoch: number of epoch\n :return: return the output of the last round,can be used by self.check function to check some intermediate results\n '
gc.collect()
if (model.optimizer is None):
model.optimizer = self._build_optimizer(model)
batches = data_processor.prepare_batches(data, self.batch_size, train=True, model=model)
batches = self.batches_add_control(batches, train=True)
if (self.pre_gpu == 1):
batches = [data_processor.batch_to_gpu(b) for b in batches]
batch_size = (self.batch_size if (data_processor.rank == 0) else (self.batch_size * 2))
model.train()
(accumulate_size, prediction_list, output_dict) = (0, [], None)
(loss_list, loss_l2_list) = ([], [])
for (i, batch) in tqdm(list(enumerate(batches)), leave=False, desc=('Epoch %5d' % (epoch + 1)), ncols=100, mininterval=1):
if (self.pre_gpu == 0):
batch = data_processor.batch_to_gpu(batch)
accumulate_size += len(batch[Y])
model.optimizer.zero_grad()
output_dict = model(batch)
l2 = output_dict[LOSS_L2]
loss = (output_dict[LOSS] + (l2 * self.l2s_weight))
loss.backward()
loss_list.append(loss.detach().cpu().data.numpy())
loss_l2_list.append(l2.detach().cpu().data.numpy())
prediction_list.append(output_dict[PREDICTION].detach().cpu().data.numpy()[:batch[REAL_BATCH_SIZE]])
if (self.grad_clip > 0):
torch.nn.utils.clip_grad_value_(model.parameters(), self.grad_clip)
if ((accumulate_size >= batch_size) or (i == (len(batches) - 1))):
model.optimizer.step()
accumulate_size = 0
model.eval()
gc.collect()
predictions = np.concatenate(prediction_list)
sample_ids = np.concatenate([b[SAMPLE_ID][:b[REAL_BATCH_SIZE]] for b in batches])
reorder_dict = dict(zip(sample_ids, predictions))
predictions = np.array([reorder_dict[i] for i in data[SAMPLE_ID]])
return (predictions, output_dict, np.mean(loss_list), np.mean(loss_l2_list))
def eva_termination(self, model):
'\n 检查是否终止训练,基于验证集\n :param model: 模型\n :return: 是否终止训练\n \n Check if or not to stop training, based on validation set\n :param model: model\n :return: if or not to stop training\n '
metric = self.metrics[0]
valid = self.valid_results
if ((len(valid) > 20) and (metric in utils.LOWER_METRIC_LIST) and utils.strictly_increasing(valid[(- 5):])):
return True
elif ((len(valid) > 20) and (metric not in utils.LOWER_METRIC_LIST) and utils.strictly_decreasing(valid[(- 5):])):
return True
elif ((len(valid) - valid.index(utils.best_result(metric, valid))) > 20):
return True
return False
def train(self, model, data_processor):
'\n 训练模型\n :param model: 模型\n :param data_processor: DataProcessor实例\n :return:\n \n Model training\n :param model: model\n :param data_processor: DataProcessor instance\n :return:\n '
train_data = data_processor.get_train_data(epoch=(- 1), model=model)
validation_data = data_processor.get_validation_data(model=model)
test_data = (data_processor.get_test_data(model=model) if (data_processor.unlabel_test == 0) else None)
self._check_time(start=True)
init_train = (self.evaluate(model, train_data, data_processor) if (train_data is not None) else ([(- 1.0)] * len(self.metrics)))
init_valid = (self.evaluate(model, validation_data, data_processor) if (validation_data is not None) else ([(- 1.0)] * len(self.metrics)))
init_test = (self.evaluate(model, test_data, data_processor) if ((test_data is not None) and (data_processor.unlabel_test == 0)) else ([(- 1.0)] * len(self.metrics)))
logging.info((('Init: \t train= %s validation= %s test= %s [%.1f s] ' % (utils.format_metric(init_train), utils.format_metric(init_valid), utils.format_metric(init_test), self._check_time())) + ','.join(self.metrics)))
try:
for epoch in range(self.epoch):
self._check_time()
epoch_train_data = data_processor.get_train_data(epoch=epoch, model=model)
(train_predictions, last_batch, mean_loss, mean_loss_l2) = self.fit(model, epoch_train_data, data_processor, epoch=epoch)
if ((self.check_epoch > 0) and ((epoch == 1) or ((epoch % self.check_epoch) == 0))):
last_batch['mean_loss'] = mean_loss
last_batch['mean_loss_l2'] = mean_loss_l2
self.check(model, last_batch)
training_time = self._check_time()
train_result = ([mean_loss] + model.evaluate_method(train_predictions, train_data, metrics=['rmse']))
valid_result = (self.evaluate(model, validation_data, data_processor) if (validation_data is not None) else ([(- 1.0)] * len(self.metrics)))
test_result = (self.evaluate(model, test_data, data_processor) if ((test_data is not None) and (data_processor.unlabel_test == 0)) else ([(- 1.0)] * len(self.metrics)))
testing_time = self._check_time()
self.train_results.append(train_result)
self.valid_results.append(valid_result)
self.test_results.append(test_result)
logging.info((('Epoch %5d [%.1f s]\t train= %s validation= %s test= %s [%.1f s] ' % ((epoch + 1), training_time, utils.format_metric(train_result), utils.format_metric(valid_result), utils.format_metric(test_result), testing_time)) + ','.join(self.metrics)))
if (utils.best_result(self.metrics[0], self.valid_results) == self.valid_results[(- 1)]):
model.save_model()
if (self.eva_termination(model) and (self.early_stop == 1)):
logging.info(('Early stop at %d based on validation result.' % (epoch + 1)))
break
except KeyboardInterrupt:
logging.info('Early stop manually')
save_here = input('Save here? (1/0) (default 0):')
if str(save_here).lower().startswith('1'):
model.save_model()
best_valid_score = utils.best_result(self.metrics[0], self.valid_results)
best_epoch = self.valid_results.index(best_valid_score)
logging.info((('Best Iter(validation)= %5d\t train= %s valid= %s test= %s [%.1f s] ' % ((best_epoch + 1), utils.format_metric(self.train_results[best_epoch]), utils.format_metric(self.valid_results[best_epoch]), utils.format_metric(self.test_results[best_epoch]), (self.time[1] - self.time[0]))) + ','.join(self.metrics)))
best_test_score = utils.best_result(self.metrics[0], self.test_results)
best_epoch = self.test_results.index(best_test_score)
logging.info((('Best Iter(test)= %5d\t train= %s valid= %s test= %s [%.1f s] ' % ((best_epoch + 1), utils.format_metric(self.train_results[best_epoch]), utils.format_metric(self.valid_results[best_epoch]), utils.format_metric(self.test_results[best_epoch]), (self.time[1] - self.time[0]))) + ','.join(self.metrics)))
model.load_model()
def evaluate(self, model, data, data_processor, metrics=None):
'\n evaluate模型效果\n :param model: 模型\n :param data: 数据dict,由DataProcessor的self.get_*_data()和self.format_data_dict()系列函数产生\n :param data_processor: DataProcessor\n :param metrics: list of str\n :return: list of float 每个对应一个 metric\n \n evaluate the model performance\n :param model: model\n :param data: data dict,produced by the self.get_*_data() and self.format_data_dict() function of DataProcessor\n :param data_processor: DataProcessor\n :param metrics: list of str\n :return: list of float, each corresponding to a metric\n '
if (metrics is None):
metrics = self.metrics
predictions = self.predict(model, data, data_processor)
return model.evaluate_method(predictions, data, metrics=metrics)
def check(self, model, out_dict):
'\n 检查模型中间结果\n :param model: 模型\n :param out_dict: 某一个batch的模型输出结果\n :return:\n \n Check the intermediate result of the model\n :param model: model\n :param out_dict: model output of a certain batch\n :return:\n '
check = out_dict
logging.info(os.linesep)
for (i, t) in enumerate(check[CHECK]):
d = np.array(t[1].detach().cpu())
logging.info((os.linesep.join([((t[0] + '\t') + str(d.shape)), np.array2string(d, threshold=20)]) + os.linesep))
(loss, l2) = (check['mean_loss'], check['mean_loss_l2'])
logging.info(('mean loss = %.4f, l2 = %.4f, %.4f' % (loss, (l2 * self.l2_weight), (l2 * self.l2s_weight))))
def run_some_tensors(self, model, data, data_processor, dict_keys):
'\n 预测,不训练\n :param model: 模型\n :param data: 数据dict,由DataProcessor的self.get_*_data()和self.format_data_dict()系列函数产生\n :param data_processor: DataProcessor实例\n :return: prediction 拼接好的 np.array\n \n Predict, no training\n :param model: model\n :param data: data dict,produced by the self.get_*_data() and self.format_data_dict() functions of DataProcessor\n :param data_processor: DataProcessor instance\n :return: prediction, concatenated np.array\n '
gc.collect()
if (type(dict_keys) == str):
dict_keys = [dict_keys]
batches = data_processor.prepare_batches(data, self.eval_batch_size, train=False, model=model)
batches = self.batches_add_control(batches, train=False)
if (self.pre_gpu == 1):
batches = [data_processor.batch_to_gpu(b) for b in batches]
result_dict = {}
for key in dict_keys:
result_dict[key] = []
model.eval()
for batch in tqdm(batches, leave=False, ncols=100, mininterval=1, desc='Predict'):
if (self.pre_gpu == 0):
batch = data_processor.batch_to_gpu(batch)
out_dict = model.predict(batch)
for key in dict_keys:
if (key in out_dict):
result_dict[key].append(out_dict[key].detach().cpu().data.numpy())
sample_ids = np.concatenate([b[SAMPLE_ID] for b in batches])
for key in dict_keys:
try:
result_array = np.concatenate(result_dict[key])
except ValueError as e:
logging.warning(('run_some_tensors: %s %s' % (key, str(e))))
result_array = np.array([d for b in result_dict[key] for d in b])
if (len(sample_ids) == len(result_array)):
reorder_dict = dict(zip(sample_ids, result_array))
result_dict[key] = np.array([reorder_dict[i] for i in data[SAMPLE_ID]])
gc.collect()
return result_dict
|
def qk_attention(query, key, value, valid=None, beta=1):
'\n :param query: ? * l * a\n :param key: ? * l * a\n :param value: ? * l * v\n :param valid: ? * l\n :param beta: smooth softmax\n :return: ? * v\n '
ele_valid = (1 if (valid is None) else valid.unsqueeze(dim=(- 1)))
att_v = (query * key).sum(dim=(- 1), keepdim=True)
att_exp = ((att_v - att_v.max(dim=(- 2), keepdim=True)[0]).exp() * ele_valid.float())
att_sum = att_exp.sum(dim=(- 2), keepdim=True)
sum_valid = (1 if (valid is None) else ele_valid.sum(dim=(- 2), keepdim=True).gt(0).float())
att_w = (att_exp / (((att_sum * sum_valid) + 1) - sum_valid).pow(beta))
result = (att_w * value).sum(dim=(- 2))
return result
|
def seq_rnn(seq_vectors, valid, rnn, lstm=False, init=None):
'\n :param seq_vectors: b * l * v\n :param valid: b * l\n :param rnn: pytorch RNN object\n :param lstm:\n :param init:\n :return:\n '
seq_lengths = valid.sum(dim=(- 1))
n_samples = seq_lengths.size()[0]
seq_lengths_valid = seq_lengths.gt(0).float().unsqueeze(dim=0).unsqueeze(dim=(- 1))
seq_lengths_clamped = seq_lengths.clamp(min=1)
(sort_seq_lengths, sort_idx) = torch.topk(seq_lengths_clamped, k=n_samples)
sort_seq_vectors = seq_vectors.index_select(dim=0, index=sort_idx)
seq_packed = torch.nn.utils.rnn.pack_padded_sequence(sort_seq_vectors, sort_seq_lengths, batch_first=True)
if lstm:
if (init is not None):
init = [(i.index_select(dim=1, index=sort_idx) if (i is not None) else i) for i in init]
(sort_output, (sort_hidden, _)) = rnn(seq_packed, init)
else:
if (init is not None):
init = init.index_select(dim=1, index=sort_idx)
(sort_output, sort_hidden) = rnn(seq_packed, init)
(sort_output, _) = torch.nn.utils.rnn.pad_packed_sequence(sort_output, batch_first=True, total_length=valid.size()[1])
unsort_idx = torch.topk(sort_idx, k=n_samples, largest=False)[1]
output = (sort_output.index_select(dim=0, index=unsort_idx) * valid.unsqueeze(dim=(- 1)).float())
hidden = (sort_hidden.index_select(dim=1, index=unsort_idx) * seq_lengths_valid)
return (output, hidden)
|
def rank_loss(prediction, label, real_batch_size, loss_sum):
"\n 计算rank loss,类似BPR-max,参考论文:\n @inproceedings{hidasi2018recurrent,\n title={Recurrent neural networks with top-k gains for session-based recommendations},\n author={Hidasi, Bal{'a}zs and Karatzoglou, Alexandros},\n booktitle={Proceedings of the 27th ACM International Conference on Information and Knowledge Management},\n pages={843--852},\n year={2018},\n organization={ACM}\n }\n :param prediction: 预测值 [None]\n :param label: 标签 [None]\n :param real_batch_size: 观测值batch大小,不包括sample\n :param loss_sum: 1=sum, other= mean\n :return:\n \n Compute rank loss,similar to BPR-max,see reference:\n @inproceedings{hidasi2018recurrent,\n title={Recurrent neural networks with top-k gains for session-based recommendations},\n author={Hidasi, Bal{'a}zs and Karatzoglou, Alexandros},\n booktitle={Proceedings of the 27th ACM International Conference on Information and Knowledge Management},\n pages={843--852},\n year={2018},\n organization={ACM}\n }\n :param prediction: predicted value [None]\n :param label: label [None]\n :param real_batch_size: batch size of observation data, excluding sample\n :param loss_sum: 1=sum, other= mean\n :return:\n "
pos_neg_tag = ((label - 0.5) * 2)
(observed, sample) = (prediction[:real_batch_size], prediction[real_batch_size:])
sample = (sample.view([(- 1), real_batch_size]) * pos_neg_tag.view([1, real_batch_size]))
sample_softmax = (sample - sample.max(dim=0)[0]).softmax(dim=0)
sample = (sample * sample_softmax).sum(dim=0)
loss = F.softplus(((- pos_neg_tag) * (observed - sample)))
if (loss_sum == 1):
return loss.sum()
return loss.mean()
|
def cold_sampling(vectors, cs_ratio):
'\n :param vectors: ? * v\n :param cs_ratio: 0 < cs_ratio < 1\n :return:\n '
cs_p = torch.empty(vectors.size()[:(- 1)]).fill_(cs_ratio).unsqueeze(dim=(- 1))
drop_pos = utils.tensor_to_gpu(torch.bernoulli(cs_p))
random_vectors = utils.tensor_to_gpu(torch.empty(vectors.size()).normal_(0, 0.01))
cs_vectors = ((random_vectors * drop_pos) + (vectors * (1 - drop_pos)))
return cs_vectors
|
def group_user_interactions_csv(in_csv, out_csv, label=LABEL, sep=SEP):
print('group_user_interactions_csv', out_csv)
all_data = pd.read_csv(in_csv, sep=sep)
group_inters = group_user_interactions_df(in_df=all_data, label=label)
group_inters.to_csv(out_csv, sep=sep, index=False)
return group_inters
|
def group_user_interactions_df(in_df, pos_neg, label=LABEL, seq_sep=SEQ_SEP):
all_data = in_df
if (label in all_data.columns):
if (pos_neg == 1):
all_data = all_data[(all_data[label] > 0)]
elif (pos_neg == 0):
all_data = all_data[(all_data[label] <= 0)]
(uids, inters) = ([], [])
for (name, group) in all_data.groupby(UID):
uids.append(name)
inters.append(seq_sep.join(group[IID].astype(str).tolist()))
group_inters = pd.DataFrame()
group_inters[UID] = uids
group_inters[IIDS] = inters
return group_inters
|
def random_split_data(all_data_file, dataset_name, vt_ratio=0.1, copy_files=None, copy_suffixes=None):
'\n 随机切分已经生成的数据集文件 *.all.csv -> *.train.csv,*.validation.csv,*.test.csv\n :param all_data_file: 数据预处理完的文件 *.all.csv\n :param dataset_name: 给数据集起个名字\n :param vt_ratio: 验证集和测试集比例\n :param copy_files: 需要复制的文件\n :param copy_suffixes: 要复制的文件的生成的后缀名\n :return: pandas dataframe 训练集,验证集,测试集\n \n Randomly split the already generated datasets *.all.csv -> *.train.csv,*.validation.csv,*.test.csv\n :param all_data_file: data files after pre-processing *.all.csv\n :param dataset_name: create a name for the dataset\n :param vt_ratio: ratio between validation and testing set\n :param copy_files: files to copy\n :param copy_suffixes: suffix of the filename of the files to be copied\n :return: pandas dataframe training set, validation set, testing set\n '
dir_name = os.path.join(DATASET_DIR, dataset_name)
print('random_split_data', dir_name)
if (not os.path.exists(dir_name)):
os.mkdir(dir_name)
all_data = pd.read_csv(all_data_file, sep=SEP)
vt_size = int((len(all_data) * vt_ratio))
validation_set = all_data.sample(n=vt_size).sort_index()
all_data = all_data.drop(validation_set.index)
test_set = all_data.sample(n=vt_size).sort_index()
train_set = all_data.drop(test_set.index)
train_set.to_csv(os.path.join(dir_name, (dataset_name + TRAIN_SUFFIX)), index=False, sep=SEP)
validation_set.to_csv(os.path.join(dir_name, (dataset_name + VALIDATION_SUFFIX)), index=False, sep=SEP)
test_set.to_csv(os.path.join(dir_name, (dataset_name + TEST_SUFFIX)), index=False, sep=SEP)
if (copy_files is not None):
if (type(copy_files) is str):
copy_files = [copy_files]
if (type(copy_suffixes) is str):
copy_suffixes = [copy_suffixes]
assert ((copy_suffixes is None) or (len(copy_files) == len(copy_suffixes)))
for (i, copy_file) in enumerate(copy_files):
copyfile(copy_file, os.path.join(dir_name, (dataset_name + copy_suffixes[i])))
return (train_set, validation_set, test_set)
|
def leave_out_by_time_df(all_df, leave_n=1, warm_n=5, split_n=1, max_user=(- 1)):
min_label = all_df[LABEL].min()
if (min_label > 0):
leave_df = all_df.groupby(UID).head(warm_n)
all_df = all_df.drop(leave_df.index)
split_dfs = []
for i in range(split_n):
total_uids = all_df[UID].unique()
if (0 < max_user < len(total_uids)):
total_uids = np.random.choice(total_uids, size=max_user, replace=False).tolist()
gb_uid = all_df.groupby(UID)
split_df = []
for uid in total_uids:
split_df.append(gb_uid.get_group(uid).tail(leave_n))
split_df = pd.concat(split_df).sort_index()
else:
split_df = all_df.groupby(UID).tail(leave_n).sort_index()
all_df = all_df.drop(split_df.index)
split_dfs.append(split_df)
else:
leave_df = []
for (uid, group) in all_df.groupby(UID):
(found, found_idx) = (0, (- 1))
for idx in group.index:
if (group.loc[(idx, LABEL)] > 0):
found_idx = idx
found += 1
if (found >= warm_n):
break
if (found > 0):
leave_df.append(group.loc[:(found_idx + 1)])
leave_df = pd.concat(leave_df)
all_df = all_df.drop(leave_df.index)
split_dfs = []
for i in range(split_n):
total_uids = all_df[(all_df[LABEL] > 0)][UID].unique()
if (0 < max_user < len(total_uids)):
total_uids = np.random.choice(total_uids, size=max_user, replace=False).tolist()
gb_uid = all_df.groupby(UID)
split_df = []
for uid in total_uids:
group = gb_uid.get_group(uid)
(found, found_idx) = (0, (- 1))
for idx in reversed(group.index):
if (group.loc[(idx, LABEL)] > 0):
found_idx = idx
found += 1
if (found >= leave_n):
break
if (found > 0):
split_df.append(group.loc[found_idx:])
split_df = pd.concat(split_df).sort_index()
all_df = all_df.drop(split_df.index)
split_dfs.append(split_df)
leave_df = pd.concat([leave_df, all_df]).sort_index()
return (leave_df, split_dfs)
|
def leave_out_by_time_csv(all_data_file, dataset_name, leave_n=1, warm_n=5, u_f=None, i_f=None):
'\n 默认all_data里的交互是按时间顺序排列的,按交互顺序,把最后的交互划分到验证集和测试集里\n :param all_data_file: 数据预处理完的文件 *.all.csv,交互按时间顺序排列\n :param dataset_name: 给数据集起个名字\n :param leave_n: 验证和测试集保留几个用户交互\n :param warm_n: 保证测试用户在训练集中至少有warm_n个交互,否则交互全部放在训练集中\n :param u_f: 用户特征文件 *.user.csv\n :param i_f: 物品特征文件 *.item.csv\n :return: pandas dataframe 训练集,验证集,测试集\n \n By default, the interaction history in all_data are sorting according to timestamp, according to the interaction time, put the last interactions into validation and testing set\n :param all_data_file: data file after pre-processing *.all.csv, interactions are sorted according to timestamp\n :param dataset_name: create a name for the dataset\n :param leave_n: how many interactions to leave out in validation and testing set\n :param warm_n: guranttee that the testing user has at least warn_n number of interactions in training set, otherwise put all interactions into training set\n :param u_f: user feature vector *.user.csv\n :param i_f: item feature vector *.item.csv\n :return: pandas dataframe training set, validation set, testing set\n '
dir_name = os.path.join(DATASET_DIR, dataset_name)
print('leave_out_by_time_csv', dir_name, leave_n, warm_n)
if (not os.path.exists(dir_name)):
os.mkdir(dir_name)
all_data = pd.read_csv(all_data_file, sep=SEP)
(train_set, (test_set, validation_set)) = leave_out_by_time_df(all_data, warm_n=warm_n, leave_n=leave_n, split_n=2, max_user=MAX_VT_USER)
print(('train=%d validation=%d test=%d' % (len(train_set), len(validation_set), len(test_set))))
if (UID in train_set.columns):
print(('train_user=%d validation_user=%d test_user=%d' % (len(train_set[UID].unique()), len(validation_set[UID].unique()), len(test_set[UID].unique()))))
train_set.to_csv(os.path.join(dir_name, (dataset_name + TRAIN_SUFFIX)), index=False, sep=SEP)
validation_set.to_csv(os.path.join(dir_name, (dataset_name + VALIDATION_SUFFIX)), index=False, sep=SEP)
test_set.to_csv(os.path.join(dir_name, (dataset_name + TEST_SUFFIX)), index=False, sep=SEP)
if (u_f is not None):
copyfile(u_f, os.path.join(dir_name, (dataset_name + USER_SUFFIX)))
if (i_f is not None):
copyfile(i_f, os.path.join(dir_name, (dataset_name + ITEM_SUFFIX)))
return (train_set, validation_set, test_set)
|
def mean_reciprocal_rank(rs):
"Score is reciprocal of the rank of the first relevant item\n First element is 'rank 1'. Relevance is binary (nonzero is relevant).\n Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank\n >>> rs = [[0, 0, 1], [0, 1, 0], [1, 0, 0]]\n >>> mean_reciprocal_rank(rs)\n 0.61111111111111105\n >>> rs = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]])\n >>> mean_reciprocal_rank(rs)\n 0.5\n >>> rs = [[0, 0, 0, 1], [1, 0, 0], [1, 0, 0]]\n >>> mean_reciprocal_rank(rs)\n 0.75\n Args:\n rs: Iterator of relevance scores (list or numpy) in rank order\n (first element is the first item)\n Returns:\n Mean reciprocal rank\n "
rs = (np.asarray(r).nonzero()[0] for r in rs)
return np.mean([((1.0 / (r[0] + 1)) if r.size else 0.0) for r in rs])
|
def r_precision(r):
'Score is precision after all relevant documents have been retrieved\n Relevance is binary (nonzero is relevant).\n >>> r = [0, 0, 1]\n >>> r_precision(r)\n 0.33333333333333331\n >>> r = [0, 1, 0]\n >>> r_precision(r)\n 0.5\n >>> r = [1, 0, 0]\n >>> r_precision(r)\n 1.0\n Args:\n r: Relevance scores (list or numpy) in rank order\n (first element is the first item)\n Returns:\n R Precision\n '
r = (np.asarray(r) != 0)
z = r.nonzero()[0]
if (not z.size):
return 0.0
return np.mean(r[:(z[(- 1)] + 1)])
|
def precision_at_k(r, k):
'Score is precision @ k\n Relevance is binary (nonzero is relevant).\n >>> r = [0, 0, 1]\n >>> precision_at_k(r, 1)\n 0.0\n >>> precision_at_k(r, 2)\n 0.0\n >>> precision_at_k(r, 3)\n 0.33333333333333331\n >>> precision_at_k(r, 4)\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n ValueError: Relevance score length < k\n Args:\n r: Relevance scores (list or numpy) in rank order\n (first element is the first item)\n Returns:\n Precision @ k\n Raises:\n ValueError: len(r) must be >= k\n '
assert (k >= 1)
r = (np.asarray(r)[:k] != 0)
return np.mean(r)
|
def average_precision(r):
'Score is average precision (area under PR curve)\n Relevance is binary (nonzero is relevant).\n >>> r = [1, 1, 0, 1, 0, 1, 0, 0, 0, 1]\n >>> delta_r = 1. / sum(r)\n >>> sum([sum(r[:x + 1]) / (x + 1.) * delta_r for x, y in enumerate(r) if y])\n 0.7833333333333333\n >>> average_precision(r)\n 0.78333333333333333\n Args:\n r: Relevance scores (list or numpy) in rank order\n (first element is the first item)\n Returns:\n Average precision\n '
r = (np.asarray(r) != 0)
out = [precision_at_k(r, (k + 1)) for k in range(r.size) if r[k]]
if (not out):
return 0.0
return np.mean(out)
|
def mean_average_precision(rs):
'Score is mean average precision\n Relevance is binary (nonzero is relevant).\n >>> rs = [[1, 1, 0, 1, 0, 1, 0, 0, 0, 1]]\n >>> mean_average_precision(rs)\n 0.78333333333333333\n >>> rs = [[1, 1, 0, 1, 0, 1, 0, 0, 0, 1], [0]]\n >>> mean_average_precision(rs)\n 0.39166666666666666\n Args:\n rs: Iterator of relevance scores (list or numpy) in rank order\n (first element is the first item)\n Returns:\n Mean average precision\n '
return np.mean([average_precision(r) for r in rs])
|
def dcg_at_k(r, k, method=0):
'Score is discounted cumulative gain (dcg)\n Relevance is positive real values. Can use binary\n as the previous methods.\n Example from\n http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf\n >>> r = [3, 2, 3, 0, 0, 1, 2, 2, 3, 0]\n >>> dcg_at_k(r, 1)\n 3.0\n >>> dcg_at_k(r, 1, method=1)\n 3.0\n >>> dcg_at_k(r, 2)\n 5.0\n >>> dcg_at_k(r, 2, method=1)\n 4.2618595071429155\n >>> dcg_at_k(r, 10)\n 9.6051177391888114\n >>> dcg_at_k(r, 11)\n 9.6051177391888114\n Args:\n r: Relevance scores (list or numpy) in rank order\n (first element is the first item)\n k: Number of results to consider\n method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...]\n If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...]\n Returns:\n Discounted cumulative gain\n '
r = np.asfarray(r)[:k]
if r.size:
if (method == 0):
return (r[0] + np.sum((r[1:] / np.log2(np.arange(2, (r.size + 1))))))
elif (method == 1):
return np.sum((r / np.log2(np.arange(2, (r.size + 2)))))
else:
raise ValueError('method must be 0 or 1.')
return 0.0
|
def ndcg_at_k(r, k, method=0):
'Score is normalized discounted cumulative gain (ndcg)\n Relevance is positive real values. Can use binary\n as the previous methods.\n Example from\n http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf\n >>> r = [3, 2, 3, 0, 0, 1, 2, 2, 3, 0]\n >>> ndcg_at_k(r, 1)\n 1.0\n >>> r = [2, 1, 2, 0]\n >>> ndcg_at_k(r, 4)\n 0.9203032077642922\n >>> ndcg_at_k(r, 4, method=1)\n 0.96519546960144276\n >>> ndcg_at_k([0], 1)\n 0.0\n >>> ndcg_at_k([1], 2)\n 1.0\n Args:\n r: Relevance scores (list or numpy) in rank order\n (first element is the first item)\n k: Number of results to consider\n method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...]\n If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...]\n Returns:\n Normalized discounted cumulative gain\n '
dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
if (not dcg_max):
return 0.0
return (dcg_at_k(r, k, method) / dcg_max)
|
def parse_global_args(parser):
'\n 全局命令行参数\n :param parser:\n :return:\n \n Global command-line parameters\n :param parser:\n :return:\n '
parser.add_argument('--gpu', type=str, default='0', help='Set CUDA_VISIBLE_DEVICES')
parser.add_argument('--verbose', type=int, default=logging.INFO, help='Logging Level, 0, 10, ..., 50')
parser.add_argument('--log_file', type=str, default=os.path.join(LOG_DIR, 'log.txt'), help='Logging file path')
parser.add_argument('--result_file', type=str, default=os.path.join(RESULT_DIR, 'result.npy'), help='Result file path')
parser.add_argument('--random_seed', type=int, default=DEFAULT_SEED, help='Random seed of numpy and tensorflow.')
parser.add_argument('--train', type=int, default=1, help='To train the model or not.')
return parser
|
def balance_data(data):
'\n 让正负样本数接近,正负样本数差距太大时使用\n :param data:\n :return:\n \n Make the number of positive and negative examples close, use when the difference between the number of positive/negative examples is too large\n :param data:\n :return:\n '
pos_indexes = np.where((data['Y'] == 1))[0]
copy_num = int(((len(data['Y']) - len(pos_indexes)) / len(pos_indexes)))
if (copy_num > 1):
copy_indexes = np.tile(pos_indexes, copy_num)
sample_index = np.concatenate([np.arange(0, len(data['Y'])), copy_indexes])
for k in data:
data[k] = data[k][sample_index]
return data
|
def input_data_is_list(data):
'\n 如果data是一个dict的list,则合并这些dict,在测试多个数据集比如验证测试同时计算时\n :param data: dict or list\n :return:\n \n If data is a list of dict, then merge these dict, when testing multiple datasets, e.g., when validation and testing are done concurrently\n :param data: dict or list\n :return:\n '
if ((type(data) is list) or (type(data) is tuple)):
print('input_data_is_list')
new_data = {}
for k in data[0]:
new_data[k] = np.concatenate([d[k] for d in data])
return new_data
return data
|
def format_metric(metric):
'\n 把计算出的评价指标转化为str,float保留四位小数\n :param metric:\n :return:\n \n Convert the evaluation measures into str, keep four decimal places for float\n :param metric:\n :return:\n '
if ((type(metric) is not tuple) and (type(metric) is not list)):
metric = [metric]
format_str = []
if ((type(metric) is tuple) or (type(metric) is list)):
for m in metric:
if ((type(m) is float) or (type(m) is np.float) or (type(m) is np.float32) or (type(m) is np.float64)):
format_str.append(('%.4f' % m))
elif ((type(m) is int) or (type(m) is np.int) or (type(m) is np.int32) or (type(m) is np.int64)):
format_str.append(('%d' % m))
return ','.join(format_str)
|
def shuffle_in_unison_scary(data):
'\n shuffle整个数据集dict的内容\n :param data:\n :return:\n \n shuffle the contents of the dict of whole dataset\n :param data:\n :return:\n '
rng_state = np.random.get_state()
for d in data:
np.random.set_state(rng_state)
np.random.shuffle(data[d])
return data
|
def best_result(metric, results_list):
'\n 求一个结果list中最佳的结果\n :param metric:\n :param results_list:\n :return:\n \n Compute the best result in a list of results\n :param metric:\n :param results_list:\n :return:\n '
if ((type(metric) is list) or (type(metric) is tuple)):
metric = metric[0]
if (metric in LOWER_METRIC_LIST):
return min(results_list)
return max(results_list)
|
def strictly_increasing(l):
'\n 判断是否严格单调增\n :param l:\n :return:\n \n Test if monotonically increasing\n :param l:\n :return:\n '
return all(((x < y) for (x, y) in zip(l, l[1:])))
|
def strictly_decreasing(l):
'\n 判断是否严格单调减\n :param l:\n :return:\n \n Test if monotonically decreasing\n :param l:\n :return:\n '
return all(((x > y) for (x, y) in zip(l, l[1:])))
|
def non_increasing(l):
'\n 判断是否单调非增\n :param l:\n :return:\n \n Test if monotonically non-increasing\n :param l:\n :return:\n '
return all(((x >= y) for (x, y) in zip(l, l[1:])))
|
def non_decreasing(l):
'\n 判断是否单调非减\n :param l:\n :return:\n \n Test if monotonically non-decreasing\n :param l:\n :return:\n '
return all(((x <= y) for (x, y) in zip(l, l[1:])))
|
def monotonic(l):
'\n 判断是否单调\n :param l:\n :return:\n \n Test if monotonic\n :param l:\n :return:\n '
return (non_increasing(l) or non_decreasing(l))
|
def numpy_to_torch(d, gpu=True, requires_grad=True):
'\n numpy array转化为pytorch tensor,有gpu则放到gpu\n :param d:\n :param gpu: whether put tensor to gpu\n :param requires_grad: whether the tensor requires grad\n :return:\n \n Convert numpy array to pytorch tensor, if there is gpu then put into gpu\n :param d:\n :param gpu: whether put tensor to gpu\n :param requires_grad: whether the tensor requires grad\n :return:\n '
t = torch.from_numpy(d)
if (d.dtype is np.float):
t.requires_grad = requires_grad
if gpu:
t = tensor_to_gpu(t)
return t
|
def tensor_to_gpu(t):
if (torch.cuda.device_count() > 0):
t = t.cuda()
return t
|
def get_init_paras_dict(class_name, paras_dict):
base_list = inspect.getmro(class_name)
paras_list = []
for base in base_list:
paras = inspect.getfullargspec(base.__init__)
paras_list.extend(paras.args)
paras_list = sorted(list(set(paras_list)))
out_dict = {}
for para in paras_list:
if (para == 'self'):
continue
out_dict[para] = paras_dict[para]
return out_dict
|
def check_dir_and_mkdir(path):
if ((os.path.basename(path).find('.') == (- 1)) or path.endswith('/')):
dirname = path
else:
dirname = os.path.dirname(path)
if (not os.path.exists(dirname)):
print('make dirs:', dirname)
os.makedirs(dirname)
return
|
def main():
'\n Main entry.\n '
print(('pid %i: Hello' % os.getpid()))
print('Python version:', sys.version)
print('Env:')
for (key, value) in sorted(os.environ.items()):
print(('%s=%s' % (key, value)))
print()
if os.environ.get('PE_HOSTFILE', ''):
try:
print(('PE_HOSTFILE, %s:' % os.environ['PE_HOSTFILE']))
with open(os.environ['PE_HOSTFILE'], 'r') as f:
print(f.read())
except Exception as exc:
print(exc)
if os.environ.get('SGE_JOB_SPOOL_DIR', ''):
print(('SGE_JOB_SPOOL_DIR, %s:' % os.environ['SGE_JOB_SPOOL_DIR']))
try:
for name in os.listdir(os.environ['SGE_JOB_SPOOL_DIR']):
print(name)
print()
except Exception as exc:
print(exc)
if os.environ.get('OMPI_FILE_LOCATION', ''):
print(('OMPI_FILE_LOCATION, %s:' % os.environ['OMPI_FILE_LOCATION']))
d = os.path.dirname(os.path.dirname(os.environ['OMPI_FILE_LOCATION']))
try:
print('dir:', d)
for name in os.listdir(d):
print(name)
print()
print('contact.txt:')
with open(('%s/contact.txt' % d), 'r') as f:
print(f.read())
print()
except Exception as exc:
print(exc)
try:
import ctypes
ctypes.CDLL('libhwloc.so', mode=ctypes.RTLD_GLOBAL)
except Exception as exc:
print('Exception while loading libhwloc.so, ignoring...', exc)
print('sys.path:')
for p in list(sys.path):
print(p)
print()
try:
from mpi4py import MPI
name = MPI.Get_processor_name()
comm = MPI.COMM_WORLD
print('mpi4py:', ('name: %s,' % name), ('rank: %i,' % comm.Get_rank()), ('size: %i' % comm.Get_size()))
hosts = comm.allgather((comm.Get_rank(), name))
print(' all hosts:', {key: item for (key, item) in hosts})
except ImportError:
print('mpi4py not available')
print('Import TF now...')
import tensorflow as tf
print('TF version:', tf.__version__)
import horovod
print('Horovod version:', horovod.__version__)
import horovod.tensorflow as hvd
hvd.init()
print(('pid %i: hvd: rank: %i, size: %i, local_rank %i, local_size %i' % (os.getpid(), hvd.rank(), hvd.size(), hvd.local_rank(), hvd.local_size())))
|
def iterate_dataset(dataset, recurrent_net, batch_size, max_seqs):
'\n :type dataset: Dataset.Dataset\n :type recurrent_net: bool\n :type batch_size: int\n :type max_seqs: int\n '
batch_gen = dataset.generate_batches(recurrent_net=recurrent_net, batch_size=batch_size, max_seqs=max_seqs)
while batch_gen.has_more():
batches = batch_gen.peek_next_n(dev_num_batches)
for batch in batches:
dataset.load_seqs(batch.start_seq, batch.end_seq)
batch_gen.advance(len(batches))
|
def iterate_epochs():
'\n Iterate through epochs.\n '
start_epoch = 1
final_epoch = EngineBase.config_get_final_epoch(config)
print(('Starting with epoch %i.' % (start_epoch,)), file=log.v3)
print(('Final epoch is: %i' % final_epoch), file=log.v3)
recurrent_net = ('lstm' in config.value('hidden_type', ''))
batch_size = config.int('batch_size', 1)
max_seqs = config.int('max_seqs', (- 1))
for epoch in range(start_epoch, (final_epoch + 1)):
print(('Epoch %i.' % epoch), file=log.v3)
rnn.train_data.init_seq_order(epoch)
iterate_dataset(rnn.train_data, recurrent_net=recurrent_net, batch_size=batch_size, max_seqs=max_seqs)
print('Finished all epochs.', file=log.v3)
|
def init(config_filename, command_line_options):
'\n :param str config_filename:\n :param list[str] command_line_options:\n '
rnn.init_better_exchook()
rnn.init_thread_join_hack()
rnn.init_config(config_filename, command_line_options)
global config
config = rnn.config
rnn.init_log()
print('RETURNN demo-dataset starting up', file=log.v3)
rnn.init_faulthandler()
rnn.init_data()
rnn.print_task_properties()
|
def main(argv):
'\n Main entry.\n '
assert (len(argv) >= 2), ('usage: %s <config>' % argv[0])
init(config_filename=argv[1], command_line_options=argv[2:])
iterate_epochs()
rnn.finalize()
|
def dump_devs(tf_session_opts, use_device_lib=False, filter_gpu=True):
'\n :param dict[str] tf_session_opts:\n :param bool use_device_lib:\n :param bool filter_gpu:\n '
s = os.environ.get('CUDA_VISIBLE_DEVICES', None)
cuda_num_visible = None
if (s is not None):
cuda_num_visible = len(s.split(','))
tf_num_visible = None
if tf_session_opts.get('gpu_options', {}).get('visible_device_list'):
tf_num_visible = len(tf_session_opts.get('gpu_options', {}).get('visible_device_list').split(','))
if use_device_lib:
devs = list(device_lib.list_local_devices())
else:
devs = get_tf_list_local_devices()
if filter_gpu:
devs = [dev for dev in devs if (dev.device_type == 'GPU')]
print(('num devs %i, CUDA num visible %r, TF num visible %r' % (len(devs), cuda_num_visible, tf_num_visible)))
print('devs:')
pprint(devs)
with tf_compat.v1.Session(config=tf_compat.v1.ConfigProto(**tf_session_opts)) as session:
for dev in devs:
print('dev name:', dev.name)
print('dev attribs:', session.run(get_device_attr(dev.name)))
|
def main():
'\n Main entry.\n '
arg_parser = ArgumentParser()
arg_parser.add_argument('--try_subsets', action='store_true')
arg_parser.add_argument('--visible_device_list')
arg_parser.add_argument('--use_device_lib', action='store_true')
args = arg_parser.parse_args()
orig_cuda_visible_devs_str = os.environ.get('CUDA_VISIBLE_DEVICES', None)
print('original CUDA_VISIBLE_DEVICES:', orig_cuda_visible_devs_str)
tf_session_opts = {}
if args.visible_device_list:
tf_session_opts.setdefault('gpu_options', {})['visible_device_list'] = args.visible_device_list
print(('Using TF gpu_options.visible_device_list %r' % args.visible_device_list))
setup_tf_thread_pools(tf_session_opts=tf_session_opts)
print_available_devices(tf_session_opts=tf_session_opts)
dump_devs(tf_session_opts=tf_session_opts, use_device_lib=args.use_device_lib, filter_gpu=False)
if args.try_subsets:
print('Trying subsets of CUDA_VISIBLE_DEVICES to see whether list_local_devices is cached.')
cuda_visible_devs_str = orig_cuda_visible_devs_str
while cuda_visible_devs_str:
cuda_visible_devs_str = ','.join(cuda_visible_devs_str.split(',')[:(- 1)])
print('set CUDA_VISIBLE_DEVICES:', cuda_visible_devs_str)
os.environ['CUDA_VISIBLE_DEVICES'] = cuda_visible_devs_str
dump_devs(tf_session_opts=tf_session_opts, use_device_lib=args.use_device_lib)
os.environ['CUDA_VISIBLE_DEVICES'] = orig_cuda_visible_devs_str
print('Recovered original CUDA_VISIBLE_DEVICES')
dump_devs(tf_session_opts=tf_session_opts, use_device_lib=args.use_device_lib)
print('Quit.')
|
def get_curl_cmd():
'\n :rtype: list[str]\n '
return ['curl', '-F', ('file=@%s' % fn), args.http_host]
|
def _get_dataset_opts(name: str):
opts = {'class': 'TaskNumberBaseConvertDataset', 'input_base': data_feature_dim.dimension, 'output_base': targets_dim.dimension}
if (name == 'train'):
opts['num_seqs'] = 10000
else:
opts['num_seqs'] = 1000
opts['fixed_random_seed'] = sum(map(ord, name))
return opts
|
def get_model(**_kwargs):
'get model, RETURNN config callback'
return Model(in_dim=data_feature_dim, encoder_in_dim=encoder_in_dim, num_enc_layers=12, enc_model_dim=Dim(name='enc', dimension=512, kind=Dim.Types.Feature), enc_ff_dim=Dim(name='enc-ff', dimension=2048, kind=Dim.Types.Feature), enc_att_num_heads=8, enc_conformer_layer_opts=dict(conv_norm_opts=dict(use_mask=True), self_att_opts=dict(with_bias=False, with_linear_pos=False, with_pos_bias=False, learnable_pos_emb=True, separate_pos_emb_per_head=False), ff_activation=(lambda x: (rf.relu(x) ** 2.0))), target_dim=targets_ext_dim, bos_idx=targets_eos_idx, eos_idx=targets_eos_idx)
|
class Model(rf.Module):
'Model definition'
def __init__(self, in_dim: Dim, encoder_in_dim: Dim, *, num_enc_layers: int=12, target_dim: Dim, eos_idx: int, bos_idx: int, enc_model_dim: Dim=Dim(name='enc', dimension=512), enc_ff_dim: Dim=Dim(name='enc-ff', dimension=2048), enc_att_num_heads: int=4, enc_conformer_layer_opts: Optional[Dict[(str, Any)]]=None, enc_key_total_dim: Dim=Dim(name='enc_key_total_dim', dimension=1024), att_num_heads: Dim=Dim(name='att_num_heads', dimension=1), att_dropout: float=0.1, enc_dropout: float=0.1, enc_att_dropout: float=0.1, l2: float=0.0001):
super(Model, self).__init__()
self.encoder_in_dim = encoder_in_dim
self.embedding = rf.Embedding(in_dim, encoder_in_dim)
self.encoder = ConformerEncoder(encoder_in_dim, enc_model_dim, ff_dim=enc_ff_dim, input_layer=ConformerConvSubsample(encoder_in_dim, out_dims=[Dim(32, name='conv1'), Dim(64, name='conv2'), Dim(64, name='conv3')], filter_sizes=[(3, 3), (3, 3), (3, 3)], pool_sizes=[(1, 2)], strides=[(1, 1), (3, 1), (2, 1)]), encoder_layer_opts=enc_conformer_layer_opts, num_layers=num_enc_layers, num_heads=enc_att_num_heads, dropout=enc_dropout, att_dropout=enc_att_dropout)
self.target_dim = target_dim
self.eos_idx = eos_idx
self.bos_idx = bos_idx
self.enc_key_total_dim = enc_key_total_dim
self.enc_key_per_head_dim = enc_key_total_dim.div_left(att_num_heads)
self.att_num_heads = att_num_heads
self.att_dropout = att_dropout
self.dropout_broadcast = rf.dropout_broadcast_default()
self.enc_ctx = rf.Linear(self.encoder.out_dim, enc_key_total_dim)
self.enc_ctx_dropout = 0.2
self.enc_win_dim = Dim(name='enc_win_dim', dimension=5)
self.inv_fertility = rf.Linear(self.encoder.out_dim, att_num_heads, with_bias=False)
self.target_embed = rf.Embedding(target_dim, Dim(name='target_embed', dimension=640))
self.s = rf.ZoneoutLSTM((self.target_embed.out_dim + (att_num_heads * self.encoder.out_dim)), Dim(name='lstm', dimension=1024), zoneout_factor_cell=0.15, zoneout_factor_output=0.05, use_zoneout_output=False, parts_order='jifo', forget_bias=0.0)
self.weight_feedback = rf.Linear(att_num_heads, enc_key_total_dim, with_bias=False)
self.s_transformed = rf.Linear(self.s.out_dim, enc_key_total_dim, with_bias=False)
self.energy = rf.Linear(enc_key_total_dim, att_num_heads, with_bias=False)
self.readout_in = rf.Linear(((self.s.out_dim + self.target_embed.out_dim) + (att_num_heads * self.encoder.out_dim)), Dim(name='readout', dimension=1024))
self.output_prob = rf.Linear((self.readout_in.out_dim // 2), target_dim)
for p in self.parameters():
p.weight_decay = l2
def encode(self, source: Tensor, *, in_spatial_dim: Dim, collected_outputs: Optional[Dict[(str, Tensor)]]=None) -> Tuple[(Dict[(str, Tensor)], Dim)]:
'encode, and extend the encoder output for things we need in the decoder'
source = self.embedding(source)
source = rf.audio.specaugment(source, spatial_dim=in_spatial_dim, feature_dim=self.encoder_in_dim)
(enc, enc_spatial_dim) = self.encoder(source, in_spatial_dim=in_spatial_dim, collected_outputs=collected_outputs)
enc_ctx = self.enc_ctx(enc)
inv_fertility = rf.sigmoid(self.inv_fertility(enc))
return (dict(enc=enc, enc_ctx=enc_ctx, inv_fertility=inv_fertility), enc_spatial_dim)
def decoder_default_initial_state(self, *, batch_dims: Sequence[Dim], enc_spatial_dim: Dim) -> rf.State:
'Default initial state'
state = rf.State(s=self.s.default_initial_state(batch_dims=batch_dims), att=rf.zeros((list(batch_dims) + [(self.att_num_heads * self.encoder.out_dim)])), accum_att_weights=rf.zeros((list(batch_dims) + [enc_spatial_dim, self.att_num_heads]), feature_dim=self.att_num_heads))
state.att.feature_dim_axis = (len(state.att.dims) - 1)
return state
def loop_step_output_templates(self, batch_dims: List[Dim]) -> Dict[(str, Tensor)]:
'loop step out'
return {'s': Tensor('s', dims=(batch_dims + [self.s.out_dim]), dtype=rf.get_default_float_dtype(), feature_dim_axis=(- 1)), 'att': Tensor('att', dims=(batch_dims + [(self.att_num_heads * self.encoder.out_dim)]), dtype=rf.get_default_float_dtype(), feature_dim_axis=(- 1))}
def loop_step(self, *, enc: rf.Tensor, enc_ctx: rf.Tensor, inv_fertility: rf.Tensor, enc_spatial_dim: Dim, input_embed: rf.Tensor, state: Optional[rf.State]=None) -> Tuple[(Dict[(str, rf.Tensor)], rf.State)]:
'step of the inner loop'
if (state is None):
batch_dims = enc.remaining_dims(remove=((enc.feature_dim, enc_spatial_dim) if (enc_spatial_dim != single_step_dim) else (enc.feature_dim,)))
state = self.decoder_default_initial_state(batch_dims=batch_dims, enc_spatial_dim=enc_spatial_dim)
state_ = rf.State()
prev_att = state.att
(s, state_.s) = self.s(rf.concat_features(input_embed, prev_att), state=state.s, spatial_dim=single_step_dim)
weight_feedback = self.weight_feedback(state.accum_att_weights)
s_transformed = self.s_transformed(s)
energy_in = ((enc_ctx + weight_feedback) + s_transformed)
energy = self.energy(rf.tanh(energy_in))
att_weights = rf.softmax(energy, axis=enc_spatial_dim)
state_.accum_att_weights = (state.accum_att_weights + ((att_weights * inv_fertility) * 0.5))
att0 = rf.dot(att_weights, enc, reduce=enc_spatial_dim, use_mask=False)
att0.feature_dim = self.encoder.out_dim
(att, _) = rf.merge_dims(att0, dims=(self.att_num_heads, self.encoder.out_dim))
state_.att = att
return ({'s': s, 'att': att}, state_)
def decode_logits(self, *, s: Tensor, input_embed: Tensor, att: Tensor) -> Tensor:
'logits for the decoder'
readout_in = self.readout_in(rf.concat_features(s, input_embed, att))
readout = rf.reduce_out(readout_in, mode='max', num_pieces=2, out_dim=self.output_prob.in_dim)
readout = rf.dropout(readout, drop_prob=0.3, axis=(self.dropout_broadcast and readout.feature_dim))
logits = self.output_prob(readout)
return logits
|
def train_step(*, model: Model, extern_data: TensorDict, **_kwargs):
'Function is run within RETURNN.'
data = extern_data[extern_data_inputs_name]
targets = extern_data[extern_data_targets_name]
(enc_args, enc_spatial_dim) = model.encode(data, in_spatial_dim=data_spatial_dim)
batch_dims = data.remaining_dims(data_spatial_dim)
targets = targets.copy()
targets.sparse_dim = targets_ext_dim
targets = rf.shift_right(targets, axis=targets_spatial_dim, pad_value=targets_eos_idx)
input_embeddings = model.target_embed(targets)
def _body(input_embed: Tensor, state: rf.State):
new_state = rf.State()
(loop_out_, new_state.decoder) = model.loop_step(**enc_args, enc_spatial_dim=enc_spatial_dim, input_embed=input_embed, state=state.decoder)
return (loop_out_, new_state)
(loop_out, _, _) = rf.scan(spatial_dim=targets_spatial_dim, xs=input_embeddings, ys=model.loop_step_output_templates(batch_dims=batch_dims), initial=rf.State(decoder=model.decoder_default_initial_state(batch_dims=batch_dims, enc_spatial_dim=enc_spatial_dim)), body=_body)
logits = model.decode_logits(input_embed=input_embeddings, **loop_out)
(logits_packed, pack_dim) = rf.pack_padded(logits, dims=(batch_dims + [targets_spatial_dim]), enforce_sorted=False)
(targets_packed, _) = rf.pack_padded(targets, dims=(batch_dims + [targets_spatial_dim]), enforce_sorted=False, out_dim=pack_dim)
log_prob = rf.log_softmax(logits_packed, axis=model.target_dim)
log_prob = rf.label_smoothed_log_prob_gradient(log_prob, 0.1, axis=model.target_dim)
loss = rf.cross_entropy(target=targets_packed, estimated=log_prob, estimated_type='log-probs', axis=model.target_dim)
loss.mark_as_loss('ce')
best = rf.reduce_argmax(logits_packed, axis=model.target_dim)
frame_error = (best != targets_packed)
frame_error.mark_as_loss(name='fer', as_error=True)
|
def forward_step(*, model: Model, extern_data: TensorDict, **_kwargs) -> Tuple[(Tensor, Tensor, Dim, Dim)]:
'\n Function is run within RETURNN.\n\n Earlier we used the generic beam_search function,\n but now we just directly perform the search here,\n as this is overall simpler and shorter.\n\n :return:\n recog results including beam {batch, beam, out_spatial},\n log probs {batch, beam},\n out_spatial_dim,\n final beam_dim\n '
data = extern_data[extern_data_inputs_name]
batch_dims = data.remaining_dims((data_spatial_dim, data.feature_dim))
(enc_args, enc_spatial_dim) = model.encode(data, in_spatial_dim=data_spatial_dim)
beam_size = 12
length_normalization_exponent = 1.0
max_seq_len = enc_spatial_dim.get_size_tensor()
print('** max seq len:', max_seq_len.raw_tensor)
beam_dim = Dim(1, name='initial-beam')
batch_dims_ = ([beam_dim] + batch_dims)
decoder_state = model.decoder_default_initial_state(batch_dims=batch_dims_, enc_spatial_dim=enc_spatial_dim)
target = rf.constant(model.bos_idx, dims=batch_dims_, sparse_dim=model.target_dim)
ended = rf.constant(False, dims=batch_dims_)
out_seq_len = rf.constant(0, dims=batch_dims_)
seq_log_prob = rf.constant(0.0, dims=batch_dims_)
i = 0
seq_targets = []
seq_backrefs = []
while True:
input_embed = model.target_embed(target)
(step_out, decoder_state) = model.loop_step(**enc_args, enc_spatial_dim=enc_spatial_dim, input_embed=input_embed, state=decoder_state)
logits = model.decode_logits(input_embed=input_embed, **step_out)
label_log_prob = rf.log_softmax(logits, axis=model.target_dim)
label_log_prob = rf.where(ended, rf.sparse_to_dense(model.eos_idx, axis=model.target_dim, label_value=0.0, other_value=(- 1e+30)), label_log_prob)
seq_log_prob = (seq_log_prob + label_log_prob)
(seq_log_prob, (backrefs, target), beam_dim) = rf.top_k(seq_log_prob, k_dim=Dim(beam_size, name=f'dec-step{i}-beam'), axis=[beam_dim, model.target_dim])
seq_targets.append(target)
seq_backrefs.append(backrefs)
decoder_state = tree.map_structure((lambda s: rf.gather(s, indices=backrefs)), decoder_state)
ended = rf.gather(ended, indices=backrefs)
out_seq_len = rf.gather(out_seq_len, indices=backrefs)
i += 1
ended = rf.logical_or(ended, (target == model.eos_idx))
ended = rf.logical_or(ended, rf.copy_to_device((i >= max_seq_len)))
if bool(rf.reduce_all(ended, axis=ended.dims).raw_tensor):
break
out_seq_len = (out_seq_len + rf.where(ended, 0, 1))
if ((i > 1) and (length_normalization_exponent != 0)):
seq_log_prob *= rf.where(ended, ((i / (i - 1)) ** length_normalization_exponent), 1.0)
if ((i > 0) and (length_normalization_exponent != 0)):
seq_log_prob *= ((1 / i) ** length_normalization_exponent)
seq_targets_ = []
indices = rf.range_over_dim(beam_dim)
for (backrefs, target) in zip(seq_backrefs[::(- 1)], seq_targets[::(- 1)]):
seq_targets_.insert(0, rf.gather(target, indices=indices))
indices = rf.gather(backrefs, indices=indices)
seq_targets__ = TensorArray(seq_targets_[0])
for target in seq_targets_:
seq_targets__ = seq_targets__.push_back(target)
out_spatial_dim = Dim(out_seq_len, name='out-spatial')
seq_targets = seq_targets__.stack(axis=out_spatial_dim)
return (seq_targets, seq_log_prob, out_spatial_dim, beam_dim)
|
def main():
'main'
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--bench-action', choices=('run', 'multi-run', 'profile'), default='run')
(args, remaining_args) = arg_parser.parse_known_args()
try:
multiprocessing.set_start_method('spawn')
except Exception as exc:
print("multiprocessing.set_start_method 'spawn' exception:", exc)
print('Ignoring this...')
if (args.bench_action == 'run'):
__main__.main(((sys.argv[:1] + [_my_file]) + remaining_args))
elif (args.bench_action == 'profile'):
_custom_loop(remaining_args)
elif (args.bench_action == 'multi-run'):
with tempfile.TemporaryDirectory(prefix='returnn-tmp-checkout-') as returnn_tmp_dir:
_subproc_check_call('git', 'clone', '--shared', _returnn_root_dir, returnn_tmp_dir)
os.chdir(returnn_tmp_dir)
_subproc_check_call('git', 'config', '--local', 'advice.detachedHead', 'false')
os.environ['PYTHONUNBUFFERED'] = '1'
for commit in _interesting_commits:
_subproc_check_call('git', 'checkout', commit)
_subproc_check_call_filter_returnn_out(sys.executable, 'rnn.py', _my_file, *remaining_args)
else:
raise ValueError(f'invalid --bench-action {args.bench_action!r}')
|
def _custom_loop(argv):
from returnn.log import log
from returnn.util.basic import hms
from returnn.datasets import init_dataset
from returnn.torch.data import pipeline as data_pipeline
from returnn.torch.data import returnn_dataset_wrapper
from returnn.torch.data import extern_data as extern_data_util
from returnn.torch.engine import get_device_from_config_opt
from returnn.torch.frontend.bridge import rf_module_to_pt_module
import torch
from torch.utils.data import DataLoader
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--device', default=None)
arg_parser.add_argument('--tb-dir', default='tb-log')
args = arg_parser.parse_args(argv)
rf.select_backend_torch()
extern_data_template = extern_data_util.extern_data_template_from_config_opts(extern_data)
device_with_reason = get_device_from_config_opt(args.device)
device = device_with_reason.result
print('Using device:', device, f'({device_with_reason.reason})', file=log.v2)
model = get_model()
pt_model = rf_module_to_pt_module(model)
pt_model.to(device)
pt_model.train()
optimizer = torch.optim.Adam(pt_model.parameters(), lr=config['learning_rate'])
dataset = init_dataset(train)
wrapped_dataset = returnn_dataset_wrapper.ReturnnDatasetIterDataPipe(dataset)
batch_size = config['batch_size']
max_seqs = config['max_seqs']
batches_dataset = data_pipeline.BatchingIterDataPipe(wrapped_dataset, batch_size=batch_size, max_seqs=max_seqs)
data_loader = DataLoader(batches_dataset, batch_size=None, collate_fn=data_pipeline.collate_batch)
data_iter = iter(data_loader)
with torch.profiler.profile(schedule=torch.profiler.schedule(wait=1, warmup=4, active=3, repeat=2), on_trace_ready=torch.profiler.tensorboard_trace_handler(args.tb_dir), record_shapes=True, profile_memory=True, with_stack=True, experimental_config=torch._C._profiler._ExperimentalConfig(verbose=True)) as prof:
step_idx = 0
epoch_start_time = time.time()
elapsed_computation_time = 0
while True:
with torch.no_grad():
extern_data_raw = next(data_iter, None)
if (extern_data_raw is None):
break
step_begin_time = time.time()
optimizer.zero_grad()
extern_data_ = extern_data_util.raw_dict_to_extern_data(extern_data_raw, extern_data_template=extern_data_template, device=device)
rf.init_train_step_run_ctx(train_flag=True, step=step_idx, epoch=1)
with rf.set_default_device_ctx(device):
train_step(model=model, extern_data=extern_data_)
train_ctx = rf.get_run_ctx()
total_loss = train_ctx.total_loss()
total_loss.raw_tensor.backward()
optimizer.step()
print(('step %i, loss %f' % (step_idx, total_loss.raw_tensor.detach().cpu())), file=log.v3)
elapsed_computation_time += (time.time() - step_begin_time)
step_idx += 1
prof.step()
elapsed = (time.time() - epoch_start_time)
elapsed_computation_percentage = (elapsed_computation_time / elapsed)
print(('Trained %i steps, %s elapsed (%.1f%% computing time)' % (step_idx, hms(elapsed), (elapsed_computation_percentage * 100.0))), file=log.v3)
|
def _subproc_check_call(*args):
print('$', *args)
subprocess.check_call(args)
|
def _subproc_check_call_filter_returnn_out(*args):
print('$', *args)
line_count = 0
need_newline = False
with subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as p:
while True:
line = p.stdout.readline()
if (not line):
break
if (line_count < 5):
sys.stdout.buffer.write(line)
elif (b'elapsed' in line):
if need_newline:
sys.stdout.buffer.write(b'\n')
sys.stdout.buffer.write(line)
else:
sys.stdout.buffer.write(b'.')
need_newline = True
sys.stdout.buffer.flush()
line_count += 1
if need_newline:
sys.stdout.buffer.write(b'\n')
sys.stdout.buffer.flush()
if (p.returncode != 0):
raise subprocess.CalledProcessError(p.returncode, args)
|
def main():
'\n Main entry.\n '
tmp_dir = tempfile.mkdtemp()
os.symlink(('%s/returnn' % _base_dir), ('%s/returnn' % tmp_dir))
config_fn = ('%s/returnn.config' % tmp_dir)
with open(config_fn, 'w') as f:
f.write('#!rnn.py\n')
f.write('use_tensorflow = True\n')
f.write('num_inputs, num_outputs = 3, 5\n')
f.write("network = {'output': {'class': 'softmax', 'target': 'classes'}}\n")
f.write(("model = %r + '/model'\n" % tmp_dir))
open(('%s/model.001.meta' % tmp_dir), 'w').close()
sys.path.insert(0, tmp_dir)
print('Import SprintInterface (relative import).')
import returnn.sprint.interface
print('SprintInterface.init')
returnn.sprint.interface.init(inputDim=3, outputDim=5, cudaEnabled=0, targetMode='forward-only', config=('epoch:1,action:nop,configfile:%s' % config_fn))
print('Ok.')
|
def make_config_dict(lstm_unit, use_gpu):
'\n :param str lstm_unit: "NativeLSTM", "LSTMBlock", "LSTMBlockFused", "CudnnLSTM", etc, one of LstmCellTypes\n :param bool use_gpu:\n :return: config dict\n :rtype: dict[str]\n '
num_layers = base_settings['num_layers']
network = {}
for i in range(num_layers):
for direction in [(- 1), 1]:
dir_str = {(- 1): 'bwd', 1: 'fwd'}[direction]
layer = {'class': 'rec', 'unit': lstm_unit, 'n_out': base_settings['n_hidden'], 'direction': direction}
if (i > 0):
layer['from'] = [('lstm%i_fwd' % i), ('lstm%i_bwd' % i)]
network[('lstm%i_%s' % ((i + 1), dir_str))] = layer
network['output'] = {'class': 'softmax', 'loss': 'ce', 'target': 'classes', 'from': [('lstm%i_fwd' % num_layers), ('lstm%i_bwd' % num_layers)]}
return {'device': ('gpu' if use_gpu else 'cpu'), 'train': {'class': 'Task12AXDataset', 'num_seqs': base_settings['num_seqs']}, 'num_inputs': _input_dim, 'num_outputs': _output_dim, 'num_epochs': 1, 'model': None, 'tf_log_dir': None, 'tf_log_memory_usage': True, 'network': network, 'batch_size': base_settings['batch_size'], 'max_seqs': base_settings['max_seqs'], 'chunking': base_settings['chunking'], 'optimizer': {'class': 'adam'}, 'learning_rate': 0.01}
|
def benchmark(lstm_unit, use_gpu):
'\n :param str lstm_unit: e.g. "LSTMBlock", one of LstmCellTypes\n :param bool use_gpu:\n :return: runtime in seconds of the training itself, excluding initialization\n :rtype: float\n '
device = {True: 'GPU', False: 'CPU'}[use_gpu]
key = ('%s:%s' % (device, lstm_unit))
print(('>>> Start benchmark for %s.' % key))
config = Config()
config.update(make_config_dict(lstm_unit=lstm_unit, use_gpu=use_gpu))
dataset_kwargs = config.typed_value('train')
Dataset.kwargs_update_from_config(config, dataset_kwargs)
dataset = init_dataset(dataset_kwargs)
engine = Engine(config=config)
engine.init_train_from_config(config=config, train_data=dataset)
print(('>>> Start training now for %s.' % key))
start_time = time.time()
engine.train()
runtime = (time.time() - start_time)
print(('>>> Runtime of %s: %s' % (key, hms_fraction(runtime))))
engine.finalize()
return runtime
|
def main():
'\n Main entry.\n '
global LstmCellTypes
print('Benchmarking LSTMs.')
better_exchook.install()
print('Args:', ' '.join(sys.argv))
arg_parser = ArgumentParser()
arg_parser.add_argument('cfg', nargs='*', help=('opt=value, opt in %r' % sorted(base_settings.keys())))
arg_parser.add_argument('--no-cpu', action='store_true')
arg_parser.add_argument('--no-gpu', action='store_true')
arg_parser.add_argument('--selected', help=('comma-separated list from %r' % LstmCellTypes))
arg_parser.add_argument('--no-setup-tf-thread-pools', action='store_true')
args = arg_parser.parse_args()
for opt in args.cfg:
(key, value) = opt.split('=', 1)
assert (key in base_settings)
value_type = type(base_settings[key])
base_settings[key] = value_type(value)
print('Settings:')
pprint(base_settings)
log.initialize(verbosity=[4])
print('Returnn:', describe_returnn_version(), file=log.v3)
print('TensorFlow:', describe_tensorflow_version(), file=log.v3)
print('Python:', sys.version.replace('\n', ''), sys.platform)
if (not args.no_setup_tf_thread_pools):
setup_tf_thread_pools(log_file=log.v2)
else:
print('Not setting up the TF thread pools. Will be done automatically by TF to number of CPU cores.')
if args.no_gpu:
print('GPU will not be used.')
else:
print(('GPU available: %r' % is_gpu_available()))
print_available_devices()
if args.selected:
LstmCellTypes = args.selected.split(',')
benchmarks = {}
if ((not args.no_gpu) and is_gpu_available()):
for lstm_unit in LstmCellTypes:
benchmarks[('GPU:' + lstm_unit)] = benchmark(lstm_unit=lstm_unit, use_gpu=True)
if (not args.no_cpu):
for lstm_unit in LstmCellTypes:
if (lstm_unit in GpuOnlyCellTypes):
continue
benchmarks[('CPU:' + lstm_unit)] = benchmark(lstm_unit=lstm_unit, use_gpu=False)
print(('-' * 20))
print('Settings:')
pprint(base_settings)
print('Final results:')
for (t, lstm_unit) in sorted([(t, lstm_unit) for (lstm_unit, t) in sorted(benchmarks.items())]):
print((' %s: %s' % (lstm_unit, hms_fraction(t))))
print('Done.')
|
class Hyp():
'\n Represents a hypothesis in a given decoder step, including the label sequence so far.\n '
def __init__(self, idx):
'\n :param int idx: hyp idx (to identify it in a beam)\n '
self.idx = idx
self.source_idx = None
self.score = 0.0
self.seq = []
def expand(self, idx, label, score):
'\n :param int idx:\n :param int label:\n :param float score:\n :rtype: Hyp\n '
new_hyp = Hyp(idx=idx)
new_hyp.source_idx = self.idx
new_hyp.seq = (list(self.seq) + [label])
new_hyp.score = score
return new_hyp
|
def main():
'\n Main entry.\n '
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--graph', help='compiled TF graph', required=True)
arg_parser.add_argument('--chkpt', help='TF checkpoint (model params)', required=True)
arg_parser.add_argument('--beam_size', type=int, default=12)
arg_parser.add_argument('--rec_step_by_step_json', required=True)
args = arg_parser.parse_args()
def make_initial_feed_dict():
'\n :return: whatever placeholders we have for input features...\n :rtype: dict\n '
return {}
info = json.load(open(args.rec_step_by_step_json))
assert isinstance(info, dict)
if (os.path.splitext(args.graph)[1] in ['.meta', '.metatxt']):
saver = tf.compat.v1.train.import_meta_graph(args.graph)
else:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(open(args.graph, 'rb').read())
tf.import_graph_def(graph_def)
saver = tf.compat.v1.train.Saver()
with tf.compat.v1.Session() as session:
saver.restore(session, args.chkpt)
initial_feed_dict = make_initial_feed_dict()
session.run(info['init_op'], feed_dict=initial_feed_dict)
hyps = [Hyp(idx=0)]
max_dec_len = 100
for i in range(max_dec_len):
for stochastic_var in info['stochastic_var_order']:
assert isinstance(stochastic_var, str)
session.run(info['stochastic_vars'][stochastic_var]['calc_scores_op'])
scores = session.run(info['state_vars'][('stochastic_var_scores_%s' % stochastic_var)])
assert (isinstance(scores, numpy.ndarray) and (scores.ndim == 2) and (scores.shape[0] == len(hyps)))
all_possibilities = [((hyp.score + scores[(i, j)]), j, hyp) for (i, hyp) in enumerate(hyps) for j in range(scores.shape[1])]
best_possibilities = sorted(all_possibilities)[:args.beam_size]
assert (len(best_possibilities) == args.beam_size)
hyps = [hyp.expand(idx=i, label=label, score=score) for (i, (score, label, hyp)) in enumerate(best_possibilities)]
session.run((info['state_vars'][('stochastic_var_scores_%s' % stochastic_var)] + '/Assign...?'), feed_dict={(info['state_vars'][('stochastic_var_scores_%s' % stochastic_var)] + '/Initial...?'): [[hyp.seq[(- 1)] for hyp in hyps]]})
session.run(info['select_src_beams']['op'], feed_dict={info['select_src_beams']['src_beams_placeholder']: [[hyp.source_idx] for hyp in hyps]})
session.run(info['next_step_op'])
print('Best hypotheses:')
for hyp in hyps:
print(('score %.2f: %r' % (hyp.score, hyp.seq)))
|
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if ((exc.errno == errno.EEXIST) and os.path.isdir(path)):
pass
else:
raise
|
def hdf5_strings(handle, name, data):
try:
S = max([len(d) for d in data])
dset = handle.create_dataset(name, (len(data),), dtype=('S' + str(S)))
dset[...] = data
except Exception:
dt = h5py.special_dtype(vlen=str)
del handle[name]
dset = handle.create_dataset(name, (len(data),), dtype=dt)
dset[...] = data
|
def load_char_list(char_list_path):
charlist = []
with open(char_list_path) as f:
for l in f:
charlist.append(l.strip())
return charlist
|
def load_file_list_and_transcriptions_and_sizes_and_n_labels(file_list_path, char_list_path, pad_whitespace, base_path):
charlist = load_char_list(char_list_path)
file_list = []
transcription_list = []
size_list = []
with open(file_list_path) as f:
for l in f:
if l.startswith('#'):
continue
sp = l.split()
status = sp[1]
assert (status in ('ok', 'err')), status
assert (len(sp) >= 9), l
name = sp[0]
text = ''.join(sp[8:])
text = re.sub("([^|])'", "\\g<1>|'", text)
width = int(sp[6])
height = int(sp[7])
if ((height < 1) or (width < 1)):
continue
size_list.append((height, width))
s = name.split('-')
name = ((base_path + name) + '.png')
text = [charlist.index(c) for c in text]
if pad_whitespace:
text = (([charlist.index('|')] + text) + [charlist.index('|')])
file_list.append(name)
transcription_list.append(text)
return (file_list, transcription_list, size_list, len(charlist))
|
def write_to_hdf(file_list, transcription_list, charlist, n_labels, out_file_name, dataset_prefix, pad_y=15, pad_x=15, compress=True):
with h5py.File(out_file_name, 'w') as f:
f.attrs['inputPattSize'] = 1
f.attrs['numDims'] = 1
f.attrs['numSeqs'] = len(file_list)
classes = charlist
inputs = []
sizes = []
seq_lengths = []
targets = []
for (i, (img_name, transcription)) in enumerate(zip(file_list, transcription_list)):
targets += transcription
img = imread(img_name)
img = (255 - img)
img = numpy.pad(img, ((pad_y, pad_y), (pad_x, pad_x)), 'constant')
sizes.append(img.shape)
img = img.reshape(img.size, 1)
inputs.append(img)
seq_lengths.append([[img.size, len(transcription), 2]])
if ((i % 100) == 0):
print(i, '/', len(file_list))
inputs = numpy.concatenate(inputs, axis=0)
sizes = numpy.concatenate(numpy.array(sizes, dtype='int32'), axis=0)
seq_lengths = numpy.concatenate(numpy.array(seq_lengths, dtype='int32'), axis=0)
targets = numpy.array(targets, dtype='int32')
f.attrs['numTimesteps'] = inputs.shape[0]
if compress:
f.create_dataset('inputs', compression='gzip', data=(inputs.astype('float32') / 255.0))
else:
f['inputs'] = (inputs.astype('float32') / 255.0)
hdf5_strings(f, 'labels', classes)
f['seqLengths'] = seq_lengths
seq_tags = [((dataset_prefix + '/') + tag.split('/')[(- 1)].split('.png')[0]) for tag in file_list]
hdf5_strings(f, 'seqTags', seq_tags)
f['targets/data/classes'] = targets
f['targets/data/sizes'] = sizes
hdf5_strings(f, 'targets/labels/classes', classes)
hdf5_strings(f, 'targets/labels/sizes', ['foo'])
g = f.create_group('targets/size')
g.attrs['classes'] = len(classes)
g.attrs['sizes'] = 2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.