File size: 5,635 Bytes
8304f29 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 |
from base.recommender import Recommender
from data.ui_graph import Interaction
from util.algorithm import find_k_largest
from time import strftime, localtime, time
from data.loader import FileIO
from os.path import abspath
from util.evaluation import ranking_evaluation
import sys
from util.conf import OptionConf
class GraphRecommender(Recommender):
def __init__(self, conf, training_set, test_set, **kwargs):
super(GraphRecommender, self).__init__(conf, training_set, test_set, **kwargs)
self.data = Interaction(conf, training_set, test_set)
self.bestPerformance = []
top = self.ranking['-topN'].split(',')
self.topN = [int(num) for num in top]
self.max_N = max(self.topN)
def print_model_info(self):
super(GraphRecommender, self).print_model_info()
# # print dataset statistics
print('Training Set Size: (user number: %d, item number %d, interaction number: %d)' % (self.data.training_size()))
print('Test Set Size: (user number: %d, item number %d, interaction number: %d)' % (self.data.test_size()))
print('=' * 80)
def build(self):
pass
def train(self):
pass
def predict(self, u):
pass
def test(self):
def process_bar(num, total):
rate = float(num) / total
ratenum = int(50 * rate)
r = '\rProgress: [{}{}]{}%'.format('+' * ratenum, ' ' * (50 - ratenum), ratenum*2)
sys.stdout.write(r)
sys.stdout.flush()
# predict
rec_list = {}
user_count = len(self.data.test_set)
for i, user in enumerate(self.data.test_set):
candidates = self.predict(user)
# predictedItems = denormalize(predictedItems, self.data.rScale[-1], self.data.rScale[0])
rated_list, li = self.data.user_rated(user)
for item in rated_list:
candidates[self.data.item[item]] = -10e8
ids, scores = find_k_largest(self.max_N, candidates)
item_names = [self.data.id2item[iid] for iid in ids]
rec_list[user] = list(zip(item_names, scores))
if i % 1000 == 0:
process_bar(i, user_count)
process_bar(user_count, user_count)
print('')
return rec_list
def evaluate(self, rec_list):
self.recOutput.append('userId: recommendations in (itemId, ranking score) pairs, * means the item is hit.\n')
for user in self.data.test_set:
line = user + ':'
for item in rec_list[user]:
line += ' (' + item[0] + ',' + str(item[1]) + ')'
if item[0] in self.data.test_set[user]:
line += '*'
line += '\n'
self.recOutput.append(line)
current_time = strftime("%Y-%m-%d %H-%M-%S", localtime(time()))
# output prediction result
out_dir = self.output['-dir']
file_name = self.config['model.name'] + '@' + current_time + '-top-' + str(self.max_N) + 'items' + '.txt'
FileIO.write_file(out_dir, file_name, self.recOutput)
print('The result has been output to ', abspath(out_dir), '.')
file_name = self.config['model.name'] + '@' + current_time + '-performance' + '.txt'
args = OptionConf(self.config['SimGCL'])
self.result += ranking_evaluation(self.data.test_set, rec_list, self.topN)
self.model_log.add('###Evaluation Results###')
self.model_log.add(self.result)
FileIO.write_file(out_dir, file_name, self.result)
print('The result of %s:\n%s' % (self.model_name, ''.join(self.result)))
def fast_evaluation(self, epoch):
print('evaluating the model...')
rec_list = self.test()
measure = ranking_evaluation(self.data.test_set, rec_list, [self.max_N])
if len(self.bestPerformance) > 0:
count = 0
performance = {}
for m in measure[1:]:
k, v = m.strip().split(':')
performance[k] = float(v)
for k in self.bestPerformance[1]:
if self.bestPerformance[1][k] > performance[k]:
count += 1
else:
count -= 1
if count < 0:
self.bestPerformance[1] = performance
self.bestPerformance[0] = epoch + 1
self.save()
else:
self.bestPerformance.append(epoch + 1)
performance = {}
for m in measure[1:]:
k, v = m.strip().split(':')
performance[k] = float(v)
self.bestPerformance.append(performance)
self.save()
print('-' * 120)
print('Real-Time Ranking Performance ' + ' (Top-' + str(self.max_N) + ' Item Recommendation)')
measure = [m.strip() for m in measure[1:]]
print('*Current Performance*')
print('Epoch:', str(epoch + 1) + ',', ' | '.join(measure))
bp = ''
# for k in self.bestPerformance[1]:
# bp+=k+':'+str(self.bestPerformance[1][k])+' | '
bp += 'Hit Ratio' + ':' + str(self.bestPerformance[1]['Hit Ratio']) + ' | '
bp += 'Precision' + ':' + str(self.bestPerformance[1]['Precision']) + ' | '
bp += 'Recall' + ':' + str(self.bestPerformance[1]['Recall']) + ' | '
# bp += 'F1' + ':' + str(self.bestPerformance[1]['F1']) + ' | '
bp += 'MDCG' + ':' + str(self.bestPerformance[1]['NDCG'])
print('*Best Performance* ')
print('Epoch:', str(self.bestPerformance[0]) + ',', bp)
print('-' * 120)
return measure
|