code
stringlengths
17
6.64M
class MeterLogger(object): ' A class to package and print meters. ' def __init__(self, modes=('train', 'val')): self.modes = list(modes) self.meter = {} self.logger = {} for mode in modes: self.meter[mode] = {} self.logger[mode] = {} self.timer = Meter.TimeMeter(None) self.metername_to_ptype = {} def _ver2tensor(self, target): target_mat = torch.zeros(target.shape[0], self.nclass) for (i, j) in enumerate(target): target_mat[i][j] = 1 return target_mat def _to_tensor(self, var): if isinstance(var, torch.autograd.Variable): var = var.data if (not torch.is_tensor(var)): if isinstance(var, np.ndarray): var = torch.from_numpy(var) else: var = torch.Tensor([var]) return var def add_meter(self, meter_name, meter): for mode in self.modes: self.meter[mode][meter_name] = copy.deepcopy(meter) def update_meter(self, output, target=None, meters={'METER_NAME_HERE'}, phase='train'): for meter_name in meters: assert (meter_name in self.meter[phase].keys()), 'Unrecognized meter name {}'.format(meter_name) meter = self.meter[phase][meter_name] if (not isinstance(meter, Meter.SingletonMeter)): output = self._to_tensor(output) if (target is not None): target = self._to_tensor(target) if (isinstance(meter, Meter.APMeter) or isinstance(meter, Meter.mAPMeter) or isinstance(meter, Meter.ConfusionMeter)): assert (target is not None), "Meter '{}' of type {} requires 'target' is not None".format(meter_name, type(meter)) target_th = self._ver2tensor(target) meter.add(output, target_th) elif (target is not None): meter.add(output, target) else: meter.add(output) def peek_meter(self, phase='train'): 'Returns a dict of all meters and their values.' result = {} for key in self.meter[phase].keys(): val = self.meter[phase][key].value() val = (val[0] if isinstance(val, (list, tuple)) else val) result[key] = val return result def reset_meter(self, meterlist=None, phase='train'): self.timer.reset() if (meterlist is None): meterlist = self.meter[phase].keys() for meter_name in meterlist: assert (meter_name in self.meter[phase].keys()), 'Unrecognized meter name {}'.format(meter_name) self.meter[phase][meter_name].reset() def print_meter(self, mode, iepoch, ibatch=1, totalbatch=1, meterlist=None): assert (mode in self.modes), f'{mode} is not any phase' pstr = '%s:\t[%d][%d/%d] \t' tval = [] tval.extend([mode, iepoch, ibatch, totalbatch]) if (meterlist is None): meterlist = self.meter[mode].keys() for meter_name in meterlist: assert (meter_name in self.meter[mode].keys()), 'Unrecognized meter name {}'.format(meter_name) meter = self.meter[mode][meter_name] if isinstance(meter, Meter.ConfusionMeter): continue if isinstance(meter, Meter.ClassErrorMeter): pstr += (('Acc@1 %.2f%% \t Acc@' + str(self.topk)) + ' %.2f%% \t') tval.extend([self.meter[mode][meter_name].value()[0], self.meter[mode][meter_name].value()[1]]) elif isinstance(meter, Meter.mAPMeter): pstr += 'mAP %.3f \t' tval.extend([self.meter[mode][meter_name].value()]) elif isinstance(meter, Meter.AUCMeter): pstr += 'AUC %.3f \t' tval.extend([self.meter[mode][meter_name].value()]) elif (isinstance(meter, Meter.ValueSummaryMeter) or isinstance(meter, Meter.MSEMeter)): pstr += '{}: {}'.format(meter_name, self.meter[mode][meter_name]) elif isinstance(meter, Meter.MultiValueSummaryMeter): pstr += '{}: {}'.format(meter_name, self.meter[mode][meter_name]) else: warnings.warn("Can't print meter '{}' of type {}".format(meter_name, type(meter)), RuntimeWarning) pstr += ' %.2fs/its\t' tval.extend([self.timer.value()]) print((pstr % tuple(tval)), flush=True) def flush(self): warnings.warn('Is flush implemented/necessary for your meterlogger?')
class TensorboardMeterLogger(MeterLogger): " A class to package and visualize meters.\n\n Args:\n log_dir: Directory to write events to (log_dir/env)\n env: Tensorboard environment to log to.\n plotstylecombined: Whether to plot curves in the same window.\n loggers: All modes: defaults to ['train', 'val']. If plotstylecombined, these will be superimposed in one plot.\n " def __init__(self, env, log_dir=None, plotstylecombined=True, loggers=('train', 'val')): super().__init__(modes=loggers) self.env = env self.log_dir = os.path.join(log_dir, env) self.logger = {} self.writer = {} for logger in loggers: self.logger[logger] = {} self.writer[logger] = tensorboardX.SummaryWriter(logdir=(self.log_dir + '-{}'.format(logger))) self.metername_to_ptype = {} self.plotstylecombined = plotstylecombined def __addlogger(self, meter, ptype, kwargs={}): for key in self.writer.keys(): self.metername_to_ptype[meter] = ptype if (ptype == 'stacked_line'): raise NotImplementedError('stacked_line not yet implemented for TensorboardX meter') elif (ptype == 'line'): if self.plotstylecombined: for key in self.writer.keys(): self.logger[key][meter] = functools.partial(self.writer[key].add_scalar, tag=meter) else: for key in self.writer.keys(): self.logger[key][meter] = functools.partial(self.writer[key].add_scalar, tag=meter) elif (ptype == 'image'): if self.plotstylecombined: for key in self.writer.keys(): self.logger[key][meter] = functools.partial(self.writer[key].add_image, tag=meter) else: for key in self.writer.keys(): self.logger[key][meter] = functools.partial(self.writer[key].add_image, tag=meter) elif (ptype == 'histogram'): if self.plotstylecombined: for key in self.writer.keys(): self.logger[key][meter] = functools.partial(self.writer[key].add_histogram, tag=meter) else: for key in self.writer.keys(): self.logger[key][meter] = functools.partial(self.writer[key].add_histogram, tag=meter) elif (ptype == 'heatmap'): raise NotImplementedError('heatmap not yet implemented for TensorboardX meter') elif (ptype == 'text'): for key in self.writer.keys(): self.logger[key][meter] = functools.partial(self.writer[key].add_text, tag=meter) elif (ptype == 'video'): for key in self.writer.keys(): self.logger[key][meter] = functools.partial(self.writer[key].add_video, tag=meter, **kwargs) def add_meter(self, meter_name, meter, ptype=None, kwargs={}): super().add_meter(meter_name, meter) if ptype: self.__addlogger(meter_name, ptype, kwargs) elif isinstance(meter, Meter.ClassErrorMeter): self.__addlogger(meter_name, 'line') elif isinstance(meter, Meter.mAPMeter): self.__addlogger(meter_name, 'line') elif isinstance(meter, Meter.AUCMeter): self.__addlogger(meter_name, 'line') elif isinstance(meter, Meter.ConfusionMeter): self.__addlogger(meter_name, 'heatmap') elif isinstance(meter, Meter.MSEMeter): self.__addlogger(meter_name, 'line') elif (type(meter) == Meter.ValueSummaryMeter): self.__addlogger(meter_name, 'line') elif isinstance(meter, Meter.MultiValueSummaryMeter): self.__addlogger(meter_name, 'stacked_line') else: raise NotImplementedError('Unknown meter type (and pytpe): {} ({})'.format(type(meter), ptype)) def reset_meter(self, iepoch, mode='train', meterlist=None): self.timer.reset() for (meter_name, meter) in self.meter[mode].items(): if ((meterlist is not None) and (meter_name not in meterlist)): continue val = self.meter[mode][meter_name].value() val = (val[0] if isinstance(val, (list, tuple)) else val) should_reset_and_continue = False if (isinstance(val, str) or (val is None)): should_reset_and_continue = (val is None) elif isinstance(val, np.ndarray): should_reset_and_continue = np.isnan(val).any() elif isinstance(val, torch.Tensor): should_reset_and_continue = torch.isnan(val).any() else: should_reset_and_continue = np.isnan(val) if should_reset_and_continue: self.meter[mode][meter_name].reset() continue if isinstance(meter, Meter.ConfusionMeter): self.logger[mode][meter_name].log(val, global_step=iepoch) elif ('image' == self.metername_to_ptype[meter_name]): try: self.logger[mode][meter_name](img_tensor=val, global_step=iepoch) except ValueError as e: print(f'trouble logging {meter_name} {e}') print('probably due to fake 0 data the data is all at 0') elif ('histogram' == self.metername_to_ptype[meter_name]): try: self.logger[mode][meter_name](values=val, global_step=iepoch) except ValueError as e: print(f'trouble logging {meter_name} {e}') print('probably due to fake 0 data the data is all at 0') elif ('text' == self.metername_to_ptype[meter_name]): if (val is not None): self.logger[mode][meter_name](text_string=val, global_step=iepoch) elif ('video' == self.metername_to_ptype[meter_name]): if (val is not None): self.logger[mode][meter_name](vid_tensor=val, global_step=iepoch) elif isinstance(self.meter[mode][meter_name], Meter.MultiValueSummaryMeter): self.logger[mode][meter_name](scalar_val=np.array(np.cumsum(val), global_step=iepoch)) else: self.logger[mode][meter_name](scalar_value=val, global_step=iepoch) self.meter[mode][meter_name].reset() def flush(self): for k in self.writer: self.writer[k].flush()
class VisdomConnections(object): '\n Keeps global track of connections to visdom. This prevents us from new connections each time we want to log a value.\n ' def __init__(self): self.connections = {} self.log_connections = {} def add(self, server, port, log_to_filename): if ((server, port) in self.connections): assert (self.log_connections[(server, port)] == log_to_filename), 'Cannot set log for {} to {}. Already set to {}!'.format((server, port), log_to_filename, self.log_connections[(server, port)]) else: self.connections[(server, port)] = visdom.Visdom(server=('http://' + server), port=port, log_to_filename=log_to_filename) self.log_connections[(server, port)] = log_to_filename return self.connections[(server, port)]
class BaseVisdomLogger(Logger): '\n The base class for logging output to Visdom.\n\n ***THIS CLASS IS ABSTRACT AND MUST BE SUBCLASSED***\n\n Note that the Visdom server is designed to also handle a server architecture,\n and therefore the Visdom server must be running at all times. The server can\n be started with\n $ python -m visdom.server\n and you probably want to run it from screen or tmux.\n ' @property def viz(self): return self._viz def __init__(self, fields=None, win=None, env=None, opts={}, port=8097, server='localhost', log_to_filename=None): super(BaseVisdomLogger, self).__init__(fields) self.win = win self.env = env self.opts = opts self._viz = VISDOM_CONNECTIONS.add(server, port, log_to_filename) def log(self, *args, **kwargs): raise NotImplementedError('log not implemented for BaseVisdomLogger, which is an abstract class.') def _viz_prototype(self, vis_fn): ' Outputs a function which will log the arguments to Visdom in an appropriate way.\n\n Args:\n vis_fn: A function, such as self.vis.image\n ' def _viz_logger(*args, **kwargs): self.win = vis_fn(*args, win=self.win, env=self.env, opts=self.opts, **kwargs) return _viz_logger def log_state(self, state): ' Gathers the stats from self.trainer.stats and passes them into\n self.log, as a list ' results = [] for (field_idx, field) in enumerate(self.fields): (parent, stat) = (None, state) for f in field: (parent, stat) = (stat, stat[f]) results.append(stat) self.log(*results)
class VisdomSaver(object): ' Serialize the state of the Visdom server to disk.\n Unless you have a fancy schedule, where different are saved with different frequencies,\n you probably only need one of these.\n ' def __init__(self, envs=None, port=8097, server='localhost', log_to_filename=None): super(VisdomSaver, self).__init__() self.envs = envs self.viz = VISDOM_CONNECTIONS.add(server, port, log_to_filename) def save(self, *args, **kwargs): self.viz.save(self.envs)
class VisdomLogger(BaseVisdomLogger): '\n A generic Visdom class that works with the majority of Visdom plot types.\n ' def __init__(self, plot_type, fields=None, win=None, env=None, opts={}, port=8097, server='localhost', log_to_filename=None): "\n Args:\n fields: Currently unused\n plot_type: The name of the plot type, in Visdom\n\n Examples:\n >>> # Image example\n >>> img_to_use = skimage.data.coffee().swapaxes(0,2).swapaxes(1,2)\n >>> image_logger = VisdomLogger('image')\n >>> image_logger.log(img_to_use)\n\n >>> # Histogram example\n >>> hist_data = np.random.rand(10000)\n >>> hist_logger = VisdomLogger('histogram', , opts=dict(title='Random!', numbins=20))\n >>> hist_logger.log(hist_data)\n " super(VisdomLogger, self).__init__(fields, win, env, opts, port, server, log_to_filename) self.plot_type = plot_type self.chart = getattr(self.viz, plot_type) self.viz_logger = self._viz_prototype(self.chart) def log(self, *args, **kwargs): self.viz_logger(*args, **kwargs)
class VisdomPlotLogger(BaseVisdomLogger): def __init__(self, plot_type, fields=None, win=None, env=None, opts={}, port=8097, server='localhost', name=None, log_to_filename=None): '\n Multiple lines can be added to the same plot with the "name" attribute (see example)\n Args:\n fields: Currently unused\n plot_type: {scatter, line}\n\n Examples:\n >>> scatter_logger = VisdomPlotLogger(\'line\')\n >>> scatter_logger.log(stats[\'epoch\'], loss_meter.value()[0], name="train")\n >>> scatter_logger.log(stats[\'epoch\'], loss_meter.value()[0], name="test")\n ' super(VisdomPlotLogger, self).__init__(fields, win, env, opts, port, server, log_to_filename) valid_plot_types = {'scatter': self.viz.scatter, 'line': self.viz.line, 'stacked_line': self.viz.line} self.plot_type = plot_type if (plot_type not in valid_plot_types.keys()): raise ValueError("plot_type '{}' not found. Must be one of {}".format(plot_type, valid_plot_types.keys())) self.chart = valid_plot_types[plot_type] def log(self, *args, **kwargs): if ((self.win is not None) and self.viz.win_exists(win=self.win, env=self.env)): if (len(args) != 2): raise ValueError('When logging to {}, must pass in x and y values (and optionally z).'.format(type(self))) (x, y) = args (x, y) = ([x], [y]) if (self.plot_type == 'stacked_line'): name = kwargs.pop('name') for (i, (x, y)) in enumerate(zip(*args)): self.chart(X=np.array([x]), Y=np.array([y]), update='append', name=self.opts['legend'][i], win=self.win, env=self.env, opts=self.update_opts, **kwargs) else: self.chart(X=np.array(x), Y=np.array(y), update='append', win=self.win, env=self.env, opts=self.opts, **kwargs) else: if (self.plot_type == 'scatter'): chart_args = {'X': np.array([args])} elif (self.plot_type == 'line'): chart_args = {'X': np.array([args[0]]), 'Y': np.array([args[1]])} elif (self.plot_type == 'stacked_line'): chart_args = {'X': np.array([args[0]]), 'Y': np.array([args[1]])} self.update_opts = {k: v for (k, v) in self.opts.items()} self.update_opts.pop('legend') else: raise NotImplementedError('Plot type: {}'.format(self.plot_type)) self.win = self.chart(win=self.win, env=self.env, opts=self.opts, **chart_args) self.log(*args, **kwargs)
class VisdomTextLogger(BaseVisdomLogger): 'Creates a text window in visdom and logs output to it.\n\n The output can be formatted with fancy HTML, and it new output can\n be set to \'append\' or \'replace\' mode.\n\n Args:\n fields: Currently not used\n update_type: One of {\'REPLACE\', \'APPEND\'}. Default \'REPLACE\'.\n\n For examples, make sure that your visdom server is running.\n\n Example:\n >>> notes_logger = VisdomTextLogger(update_type=\'APPEND\')\n >>> for i in range(10):\n >>> notes_logger.log("Printing: {} of {}".format(i+1, 10))\n # results will be in Visdom environment (default: http://localhost:8097)\n\n ' valid_update_types = ['REPLACE', 'APPEND'] def __init__(self, fields=None, win=None, env=None, opts={}, update_type=valid_update_types[0], port=8097, server='localhost', log_to_filename=None): super(VisdomTextLogger, self).__init__(fields, win, env, opts, port, server, log_to_filename) self.text = '' if (update_type not in self.valid_update_types): raise ValueError("update type '{}' not found. Must be one of {}".format(update_type, self.valid_update_types)) self.update_type = update_type self.viz_logger = self._viz_prototype(self.viz.text) def log(self, msg, *args, **kwargs): text = msg if ((self.update_type == 'APPEND') and self.text): self.text = '<br>'.join([self.text, text]) else: self.text = text self.viz_logger([self.text]) def _log_all(self, stats, log_fields, prefix=None, suffix=None, require_dict=False): results = [] for (field_idx, field) in enumerate(self.fields): (parent, stat) = (None, stats) for f in field: (parent, stat) = (stat, stat[f]) (name, output) = self._gather_outputs(field, log_fields, parent, stat, require_dict) if (not output): continue self._align_output(field_idx, output) results.append((name, output)) if (not results): return output = self._join_results(results) if (prefix is not None): self.log(prefix) self.log(output) if (suffix is not None): self.log(suffix) def _align_output(self, field_idx, output): for (output_idx, o) in enumerate(output): if (len(o) < self.field_widths[field_idx][output_idx]): num_spaces = (self.field_widths[field_idx][output_idx] - len(o)) output[output_idx] += (' ' * num_spaces) else: self.field_widths[field_idx][output_idx] = len(o) def _join_results(self, results): joined_out = map((lambda i: (i[0], ' '.join(i[1]))), results) joined_fields = map((lambda i: '{}: {}'.format(i[0], i[1])), joined_out) return '\t'.join(joined_fields) def _gather_outputs(self, field, log_fields, stat_parent, stat, require_dict=False): output = [] name = '' if isinstance(stat, dict): log_fields = stat.get(log_fields, []) name = stat.get('log_name', '.'.join(field)) for f in log_fields: output.append(f.format(**stat)) elif (not require_dict): name = '.'.join(field) number_format = stat_parent.get('log_format', '') unit = stat_parent.get('log_unit', '') fmt = ((('{' + number_format) + '}') + unit) output.append(fmt.format(stat)) return (name, output)
class VisdomMeterLogger(MeterLogger): ' A class to package and visualize meters.\n\n Args:\n server: The uri of the Visdom server\n env: Visdom environment to log to.\n port: Port of the visdom server.\n title: The title of the MeterLogger. This will be used as a prefix for all plots.\n plotstylecombined: Whether to plot train/test curves in the same window.\n ' def __init__(self, server='localhost', env='main', port=8097, title='DNN', nclass=21, plotstylecombined=True, log_to_filename=None, loggers=('train', 'val')): super(VisdomMeterLogger, self).__init__() self.server = server self.env = env self.port = port self.title = title self.logger = {} for logger in loggers: self.logger[logger] = {} self.plotstylecombined = plotstylecombined self.log_to_filename = log_to_filename self.metername_to_ptype = {} def __addlogger(self, meter, ptype): first_logger = None for (logger_name, logger) in self.logger.items(): if (ptype == 'stacked_line'): opts = {'title': '{} {} ({})'.format(self.title, meter, logger_name), 'fillarea': True, 'legend': self.meter[logger_name][meter].keys} logger[meter] = VisdomPlotLogger(ptype, env=self.env, server=self.server, port=self.port, log_to_filename=self.log_to_filename, opts=opts) elif (ptype == 'line'): if self.plotstylecombined: if (first_logger is None): opts = {'title': ((self.title + ' ') + meter)} logger[meter] = VisdomPlotLogger(ptype, env=self.env, server=self.server, port=self.port, log_to_filename=self.log_to_filename, opts=opts) else: logger[meter] = self.logger[first_logger][meter] else: opts = {'title': ((self.title + '{} '.format(logger_name)) + meter)} logger[meter] = VisdomPlotLogger(ptype, env=self.env, server=self.server, port=self.port, log_to_filename=self.log_to_filename, opts=opts) elif (ptype == 'heatmap'): names = list(range(self.nclass)) opts = {'title': ('{} {} {}'.format(self.title, logger_name, meter) + meter), 'columnnames': names, 'rownames': names} logger[meter] = VisdomLogger('heatmap', env=self.env, server=self.server, port=self.port, log_to_filename=self.log_to_filename, opts=opts) elif (ptype == 'image'): opts = {'title': ('{} {} {}'.format(self.title, logger_name, meter) + meter)} logger[meter] = VisdomLogger(ptype, env=self.env, server=self.server, port=self.port, log_to_filename=self.log_to_filename, opts=opts) elif (ptype == 'histogram'): opts = {'title': ('{} {} {}'.format(self.title, logger_name, meter) + meter), 'numbins': 20} logger[meter] = VisdomLogger(ptype, env=self.env, server=self.server, port=self.port, log_to_filename=self.log_to_filename, opts=opts) elif (ptype == 'text'): opts = {'title': ('{} {} {}'.format(self.title, logger_name, meter) + meter)} logger[meter] = VisdomTextLogger(env=self.env, server=self.server, port=self.port, log_to_filename=self.log_to_filename, update_type='APPEND', opts=opts) elif (ptype == 'video'): opts = {'title': ('{} {} {}'.format(self.title, logger_name, meter) + meter)} logger[meter] = VisdomLogger(ptype, env=self.env, server=self.server, port=self.port, log_to_filename=self.log_to_filename, opts=opts) def add_meter(self, meter_name, meter, ptype=None): super(VisdomMeterLogger, self).add_meter(meter_name, meter) self.metername_to_ptype[meter_name] = ptype if ptype: self.__addlogger(meter_name, ptype) elif isinstance(meter, Meter.ClassErrorMeter): self.__addlogger(meter_name, 'line') elif isinstance(meter, Meter.mAPMeter): self.__addlogger(meter_name, 'line') elif isinstance(meter, Meter.AUCMeter): self.__addlogger(meter_name, 'line') elif isinstance(meter, Meter.ConfusionMeter): self.__addlogger(meter_name, 'heatmap') elif isinstance(meter, Meter.MSEMeter): self.__addlogger(meter_name, 'line') elif (type(meter) == Meter.ValueSummaryMeter): self.__addlogger(meter_name, 'line') elif isinstance(meter, Meter.MultiValueSummaryMeter): self.__addlogger(meter_name, 'stacked_line') else: raise NotImplementedError('Unknown meter type (and pytpe): {} ({})'.format(type(meter), ptype)) def reset_meter(self, iepoch, mode='train', meterlist=None): self.timer.reset() for (meter_name, meter) in self.meter[mode].items(): if ((meterlist is not None) and (meter_name not in meterlist)): continue val = self.meter[mode][meter_name].value() val = (val[0] if isinstance(val, (list, tuple)) else val) should_reset_and_continue = False if (isinstance(val, str) or (val is None)): should_reset_and_continue = (val is None) elif isinstance(val, np.ndarray): should_reset_and_continue = np.isnan(val).any() elif isinstance(val, torch.Tensor): should_reset_and_continue = torch.isnan(val).any() else: should_reset_and_continue = np.isnan(val) if should_reset_and_continue: self.meter[mode][meter_name].reset() continue if (isinstance(meter, Meter.ConfusionMeter) or (self.metername_to_ptype[meter_name] in ['histogram', 'image', 'text'])): self.logger[mode][meter_name].log(val) elif isinstance(self.meter[mode][meter_name], Meter.MultiValueSummaryMeter): self.logger[mode][meter_name].log(np.array(([iepoch] * len(val))), np.array(np.cumsum(val)), name=mode) elif ((meter_name in self.metername_to_ptype) and (self.metername_to_ptype[meter_name] == 'video')): self.logger[mode][meter_name].log(videofile=val) else: self.logger[mode][meter_name].log(iepoch, val, name=mode) self.meter[mode][meter_name].reset()
class AUCMeter(meter.Meter): '\n The AUCMeter measures the area under the receiver-operating characteristic\n (ROC) curve for binary classification problems. The area under the curve (AUC)\n can be interpreted as the probability that, given a randomly selected positive\n example and a randomly selected negative example, the positive example is\n assigned a higher score by the classification model than the negative example.\n\n The AUCMeter is designed to operate on one-dimensional Tensors `output`\n and `target`, where (1) the `output` contains model output scores that ought to\n be higher when the model is more convinced that the example should be positively\n labeled, and smaller when the model believes the example should be negatively\n labeled (for instance, the output of a signoid function); and (2) the `target`\n contains only values 0 (for negative examples) and 1 (for positive examples).\n ' def __init__(self): super(AUCMeter, self).__init__() self.reset() def reset(self): self.scores = torch.DoubleTensor(torch.DoubleStorage()).numpy() self.targets = torch.LongTensor(torch.LongStorage()).numpy() def add(self, output, target): if torch.is_tensor(output): output = output.cpu().squeeze().numpy() if torch.is_tensor(target): target = target.cpu().squeeze().numpy() elif isinstance(target, numbers.Number): target = np.asarray([target]) assert (np.ndim(output) == 1), 'wrong output size (1D expected)' assert (np.ndim(target) == 1), 'wrong target size (1D expected)' assert (output.shape[0] == target.shape[0]), 'number of outputs and targets does not match' assert np.all(np.add(np.equal(target, 1), np.equal(target, 0))), 'targets should be binary (0, 1)' self.scores = np.append(self.scores, output) self.targets = np.append(self.targets, target) def value(self): if (self.scores.shape[0] == 0): return 0.5 (scores, sortind) = torch.sort(torch.from_numpy(self.scores), dim=0, descending=True) scores = scores.numpy() sortind = sortind.numpy() tpr = np.zeros(shape=(scores.size + 1), dtype=np.float64) fpr = np.zeros(shape=(scores.size + 1), dtype=np.float64) for i in range(1, (scores.size + 1)): if (self.targets[sortind[(i - 1)]] == 1): tpr[i] = (tpr[(i - 1)] + 1) fpr[i] = fpr[(i - 1)] else: tpr[i] = tpr[(i - 1)] fpr[i] = (fpr[(i - 1)] + 1) tpr /= (self.targets.sum() * 1.0) fpr /= ((self.targets - 1.0).sum() * (- 1.0)) n = tpr.shape[0] h = (fpr[1:n] - fpr[0:(n - 1)]) sum_h = np.zeros(fpr.shape) sum_h[0:(n - 1)] = h sum_h[1:n] += h area = ((sum_h * tpr).sum() / 2.0) return (area, tpr, fpr)
class AverageValueMeter(ValueSummaryMeter): def __init__(self): warnings.warn('AverageValueMeter is deprecated in favor of ValueSummaryMeter and will be removed in a future version', FutureWarning) super(AverageValueMeter, self).__init__()
class mAPMeter(meter.Meter): '\n The mAPMeter measures the mean average precision over all classes.\n\n The mAPMeter is designed to operate on `NxK` Tensors `output` and\n `target`, and optionally a `Nx1` Tensor weight where (1) the `output`\n contains model output scores for `N` examples and `K` classes that ought to\n be higher when the model is more convinced that the example should be\n positively labeled, and smaller when the model believes the example should\n be negatively labeled (for instance, the output of a sigmoid function); (2)\n the `target` contains only values 0 (for negative examples) and 1\n (for positive examples); and (3) the `weight` ( > 0) represents weight for\n each sample.\n ' def __init__(self): super(mAPMeter, self).__init__() self.apmeter = APMeter() def reset(self): self.apmeter.reset() def add(self, output, target, weight=None): self.apmeter.add(output, target, weight) def value(self): return self.apmeter.value().mean()
class Meter(object): 'Meters provide a way to keep track of important statistics in an online manner.\n\n This class is abstract, but provides a standard interface for all meters to follow.\n\n ' def reset(self): 'Resets the meter to default settings.' pass def add(self, value): 'Log a new value to the meter\n\n Args:\n value: Next restult to include.\n\n ' pass def value(self): 'Get the value of the meter in the current state.' pass
class MovingAverageValueMeter(meter.Meter): def __init__(self, windowsize): super(MovingAverageValueMeter, self).__init__() self.windowsize = windowsize self.valuequeue = torch.Tensor(windowsize) self.reset() def reset(self): self.sum = 0.0 self.n = 0 self.var = 0.0 self.valuequeue.fill_(0) def add(self, value): queueid = (self.n % self.windowsize) oldvalue = self.valuequeue[queueid] self.sum += (value - oldvalue) self.var += ((value * value) - (oldvalue * oldvalue)) self.valuequeue[queueid] = value self.n += 1 def value(self): n = min(self.n, self.windowsize) mean = (self.sum / max(1, n)) std = math.sqrt(max(((self.var - ((n * mean) * mean)) / max(1, (n - 1))), 0)) return (mean, std)
class MSEMeter(meter.Meter): def __init__(self, root=False): super(MSEMeter, self).__init__() self.reset() self.root = root def reset(self): self.n = 0 self.sesum = 0.0 def add(self, output, target): if ((not torch.is_tensor(output)) and (not torch.is_tensor(target))): output = torch.from_numpy(output) target = torch.from_numpy(target) self.n += output.numel() self.sesum += torch.sum(((output - target) ** 2)) def value(self): mse = (self.sesum / max(1, self.n)) return (math.sqrt(mse) if self.root else mse) def __str__(self): res = ('RMSE' if self.root else 'MSE') res += ' %.3f\t' tval = [self.value()] return (res % tuple(tval))
class MultiValueSummaryMeter(ValueSummaryMeter): def __init__(self, keys): '\n Args:\n keys: An iterable of keys\n ' super(MultiValueSummaryMeter, self).__init__() self.keys = list(keys)
class SingletonMeter(meter.Meter): 'Stores exactly one value which can be regurgitated' def __init__(self, maxlen=1): super(SingletonMeter, self).__init__() self.__val = None def reset(self): 'Resets the meter to default settings.' old_val = self.__val self.__val = None return old_val def add(self, value): 'Log a new value to the meter\n\n Args:\n value: Next restult to include.\n ' self.__val = value def value(self): 'Get the value of the meter in the current state.' return self.__val
class TimeMeter(meter.Meter): '\n <a name="TimeMeter">\n #### tnt.TimeMeter(@ARGP)\n @ARGT\n\n The `tnt.TimeMeter` is designed to measure the time between events and can be\n used to measure, for instance, the average processing time per batch of data.\n It is different from most other meters in terms of the methods it provides:\n\n The `tnt.TimeMeter` provides the following methods:\n\n * `reset()` resets the timer, setting the timer and unit counter to zero.\n * `value()` returns the time passed since the last `reset()`; divided by the counter value when `unit=true`.\n ' def __init__(self, unit): super(TimeMeter, self).__init__() self.unit = unit self.reset() def reset(self): self.n = 0 self.time = time.time() def value(self): return (time.time() - self.time)
class ValueSummaryMeter(meter.Meter): def __init__(self): super(ValueSummaryMeter, self).__init__() self.reset() self.val = 0 def add(self, value, n=1): self.val = value self.sum += value self.var += (value * value) self.n += n if (self.n == 0): (self.mean, self.std) = (np.nan, np.nan) elif (self.n == 1): self.mean = (self.sum + 0.0) self.min = (self.mean + 0.0) self.max = (self.mean + 0.0) self.std = np.inf self.mean_old = self.mean self.m_s = 0.0 else: self.mean = (self.mean_old + ((value - (n * self.mean_old)) / float(self.n))) self.m_s += ((value - self.mean_old) * (value - self.mean)) self.mean_old = self.mean self.std = np.sqrt((self.m_s / (self.n - 1.0))) self.min = np.minimum(self.min, value) self.max = np.maximum(self.max, value) def value(self): return (self.mean, self.std) def reset(self): self.n = 0 self.sum = 0.0 self.var = 0.0 self.val = 0.0 self.mean = np.nan self.mean_old = 0.0 self.m_s = 0.0 self.std = np.nan self.min = np.nan self.max = np.nan def __str__(self): old_po = np.get_printoptions() np.set_printoptions(precision=3) res = 'mean(std) {} ({}) \tmin/max {}/{}\t'.format(*[np.array(v) for v in [self.mean, self.std, self.min, self.max]]) np.set_printoptions(**old_po) return res
def compose(transforms): assert isinstance(transforms, list) for tr in transforms: assert callable(tr), 'list of functions expected' def composition(z): for tr in transforms: z = tr(z) return z return composition
def tablemergekeys(): def mergekeys(tbl): mergetbl = {} if isinstance(tbl, dict): for (idx, elem) in tbl.items(): for (key, value) in elem.items(): if (key not in mergetbl): mergetbl[key] = {} mergetbl[key][idx] = value elif isinstance(tbl, list): for elem in tbl: for (key, value) in elem.items(): if (key not in mergetbl): mergetbl[key] = [] mergetbl[key].append(value) return mergetbl return mergekeys
def tableapply(f): return (lambda d: dict(map((lambda kv: (kv[0], f(kv[1]))), iteritems(d))))
def makebatch(merge=None): if merge: makebatch = compose([tablemergekeys(), merge]) else: makebatch = compose([tablemergekeys(), tableapply((lambda field: (mergetensor(field) if canmerge(field) else field)))]) return (lambda samples: makebatch(samples))
class MultiTaskDataLoader(object): 'Loads batches simultaneously from multiple datasets.\n\n The MultiTaskDataLoader is designed to make multi-task learning simpler. It is\n ideal for jointly training a model for multiple tasks or multiple datasets.\n MultiTaskDataLoader is initialzes with an iterable of :class:`Dataset` objects,\n and provides an iterator which will return one batch that contains an equal number\n of samples from each of the :class:`Dataset` s.\n\n Specifically, it returns batches of ``[(B_0, 0), (B_1, 1), ..., (B_k, k)]``\n from datasets ``(D_0, ..., D_k)``, where each `B_i` has :attr:`batch_size` samples\n\n\n Args:\n datasets: A list of :class:`Dataset` objects to serve batches from\n batch_size: Each batch from each :class:`Dataset` will have this many samples\n use_all (bool): If True, then the iterator will return batches until all\n datasets are exhausted. If False, then iteration stops as soon as one dataset\n runs out\n loading_kwargs: These are passed to the children dataloaders\n\n\n Example:\n >>> train_loader = MultiTaskDataLoader([dataset1, dataset2], batch_size=3)\n >>> for ((datas1, labels1), task1), (datas2, labels2), task2) in train_loader:\n >>> print(task1, task2)\n 0 1\n 0 1\n ...\n 0 1\n\n ' def __init__(self, datasets, batch_size=1, use_all=False, **loading_kwargs): self.loaders = [] self.batch_size = batch_size self.use_all = use_all self.loading_kwargs = loading_kwargs for dataset in datasets: loader = torch.utils.data.DataLoader(dataset, batch_size=self.batch_size, **self.loading_kwargs) self.loaders.append(loader) self.min_loader_size = min([len(l) for l in self.loaders]) self.current_loader = 0 def __iter__(self): 'Returns an iterator that simultaneously returns batches from each dataset.\n Specifically, it returns batches of\n [(B_0, 0), (B_1, 1), ..., (B_k, k)]\n from datasets\n (D_0, ..., D_k),\n\n ' return zip_batches(*[zip(iter(l), repeat(loader_num)) for (loader_num, l) in enumerate(self.loaders)], use_all=self.use_all) def __len__(self): if self.use_all: return max([len(l) for loader in self.loaders]) else: return self.min_loader_size
def zip_batches(*iterables, **kwargs): use_all = kwargs.pop('use_all', False) if use_all: try: from itertools import izip_longest as zip_longest except ImportError: from itertools import zip_longest return zip_longest(*iterables, fillvalue=None) else: return zip(*iterables)
def canmergetensor(tbl): if (not isinstance(tbl, list)): return False if torch.is_tensor(tbl[0]): sz = tbl[0].numel() for v in tbl: if (v.numel() != sz): return False return True return False
def mergetensor(tbl): sz = ([len(tbl)] + list(tbl[0].size())) res = tbl[0].new(torch.Size(sz)) for (i, v) in enumerate(tbl): res[i].copy_(v) return res
def _get_version(): import phantom return phantom.__version__
@ph.msg_payload() class ImpressionRequest(): '\n The message indicating that a user is visiting a website and might be\n interested in an advertisement offer\n\n Attributes:\n -----------\n timestamp (int): the time of the impression\n user_id (int): the unique and anonymous identifier of the user\n\n Methods:\n --------\n generate_random(): helper method to generate random impressions\n\n ' timestamp: float user_id: int @classmethod def generate_random(cls): return cls(timestamp=datetime.datetime.now().timestamp(), user_id=np.random.choice([1, 2]))
@ph.msg_payload() class Bid(): '\n The message sent by the advertiser to the exchange\n to win the impression\n\n Attributes:\n -----------\n bid (float): the cost charged to the advertiser\n theme (str): the theme of the ad that will be displayed\n user_id(str): the user identifier\n\n ' bid: float theme: str user_id: int
@ph.msg_payload() class AuctionResult(): "\n The message sent by the exchange to the advertiser\n to inform her of the auction's result\n\n Attributes:\n -----------\n cost (float): the cost charged to the advertiser\n winning_bid (float): the highest bid during this auction\n\n " cost: float winning_bid: float
@ph.msg_payload() class Ads(): '\n The message sent by an advertisers containing the ads to show to the user.\n For simplicity, it only contains a theme.\n\n Attributes:\n -----------\n advertiser_id (str): the theme of the ads\n theme (str): the theme of the ads\n user_id (int): the user_id that will receive the ads\n\n ' advertiser_id: str theme: str user_id: int
@ph.msg_payload() class ImpressionResult(): '\n The result of the ad display. i.e whether or not the user clicked\n on the ad\n\n Attributes:\n -----------\n clicked (bool): whether or not the user clicked on the ad\n\n ' clicked: bool
class PublisherPolicy(ph.Policy): def compute_action(self, obs: np.ndarray) -> np.ndarray: return np.array([0])
class PublisherAgent(ph.Agent): '\n A `PublisherAgent` generates `ImpressionRequest` which corresponds to\n real-estate on their website rented to advertisers to display their ads.\n\n Attributes:\n -----------\n _USER_CLICK_PROBABILITIES (dict): helper dictionary containing the probability\n to click on the ads for each user. For simplicity, we hardcode these values,\n however more a advanced logic could also be implemented\n ' _USER_CLICK_PROBABILITIES = {1: {'sport': 0.0, 'travel': 1.0, 'science': 0.2, 'tech': 0.8}, 2: {'sport': 1.0, 'travel': 0.0, 'science': 0.7, 'tech': 0.1}} def __init__(self, agent_id: str, exchange_id: str, user_click_proba: dict=None): super().__init__(agent_id) self.exchange_id = exchange_id self.user_click_proba = (user_click_proba or self._USER_CLICK_PROBABILITIES) def generate_messages(self, ctx: ph.Context): return [(self.exchange_id, ImpressionRequest.generate_random())] @ph.agents.msg_handler(Ads) def handle_ads(self, _ctx: ph.Context, msg: ph.Message): '\n Method to process messages with the payload type `Ads`\n\n Note:\n -----\n We register the type of payload to process via the `ph.agents.handle_msg` decorator\n\n Params:\n -------\n ctx (ph.Context): the partially observable context available for\n the agent\n msg (ph.MsgPayload): the message received by the agent.\n\n Returns:\n --------\n receiver_id (ph.AgentID): the unique identifier of the agent the messages are\n intended to\n messages ([ph.MsgPayload]): the messages to send\n\n ' logger.debug('PublisherAgent %s ads: %s', self.id, msg.payload) clicked = np.random.binomial(1, self.user_click_proba[msg.payload.user_id][msg.payload.theme]) return [(msg.payload.advertiser_id, ImpressionResult(clicked=clicked))]
class AdvertiserAgent(ph.StrategicAgent): '\n An `AdvertiserAgent` learns to bid efficiently and within its budget limit, on an impression\n in order to maximize the number of clicks it gets.\n For this implementation an advertiser is associated with a `theme` which will impact the\n probability of a user to click on the ad.\n\n Observation Space:\n - budget left\n - user id\n # - user age\n # - user zipcode\n\n Action Space:\n - bid amount\n ' @dataclass class Supertype(ph.Supertype): budget: float def __init__(self, agent_id: str, exchange_id: str, theme: str='generic'): self.exchange_id = exchange_id self.theme = theme self.action_space = gym.spaces.Box(low=np.array([0.0]), high=np.array([1.0])) super().__init__(agent_id) @property def observation_space(self): return gym.spaces.Dict({'type': self.type.to_obs_space(), 'budget_left': gym.spaces.Box(low=0.0, high=1.0, shape=(1,), dtype=np.float64), 'user_id': gym.spaces.Discrete(2)}) def pre_message_resolution(self, _ctx: ph.Context): '@override\n The `pre_resolution` method is called at the beginning of each step.\n We use this method to reset the number of clicks received during the step.\n ' self.step_clicks = 0 self.step_wins = 0 @ph.agents.msg_handler(ImpressionRequest) def handle_impression_request(self, ctx: ph.Context, msg: ph.Message): '\n Once an `ImpressionRequest` is received we cache the information about the user.\n\n Note:\n -----\n We receive the user id in the message but we collect extra user information from\n the `ctx` object.\n ' logger.debug('AdvertiserAgent %s impression request: %s', self.id, msg.payload) self._current_user_id = msg.payload.user_id self._current_age = ctx[self.exchange_id].users_info[self._current_user_id]['age'] self._current_zipcode = ctx[self.exchange_id].users_info[self._current_user_id]['zipcode'] self.total_requests[self._current_user_id] += 1 @ph.agents.msg_handler(AuctionResult) def handle_auction_result(self, _ctx: ph.Context, msg: ph.MsgPayload): '\n If the `AdvertiserAgent` wins the auction it needs to update its budget left.\n ' logger.debug('AdvertiserAgent %s auction result: %s', self.id, msg.payload) self.step_wins += int((msg.payload.cost != 0.0)) self.total_wins[self._current_user_id] += int((msg.payload.cost != 0.0)) self.left -= msg.payload.cost @ph.agents.msg_handler(ImpressionResult) def handle_impression_result(self, _ctx: ph.Context, msg: ph.MsgPayload): '\n When the result of the ad display is received, update the number of clicks.\n ' logger.debug('AdvertiserAgent %s impression result: %s', self.id, msg.payload) self.step_clicks += int(msg.payload.clicked) self.total_clicks[self._current_user_id] += int(msg.payload.clicked) def encode_observation(self, _ctx: ph.Context): "@override\n The observation will help learn the policy.\n\n For this use case we pass:\n - the budget the agent has left\n - the user id the impression will be for\n - the user's age\n - the user's zipcode\n " if (self._current_user_id != 0): return {'type': self.type.to_obs_space_compatible_type(), 'budget_left': np.array([(self.left / self.type.budget)], dtype=np.float64), 'user_id': (self._current_user_id - 1)} def decode_action(self, ctx: ph.Context, action: np.ndarray): '@override\n We receive the "optimal" bid from the learnt Policy and send a message to the\n exchange to try to win the impression.\n ' logger.debug('AdvertiserAgent %s decode action: %s', self.id, action) msgs = [] self.bid = min((action[0] * self.type.budget), self.left) if (self.bid > 0.0): msg = Bid(bid=self.bid, theme=self.theme, user_id=self._current_user_id) msgs.append((self.exchange_id, msg)) return msgs def compute_reward(self, _ctx: ph.Context) -> float: '@override\n The goal is to maximize the number of clicks so the per-step reward\n is the number of clicks received at the current timestep.\n ' risk_aversion = 0.0 return (((1 - risk_aversion) * self.step_clicks) + ((risk_aversion * self.left) / self.type.budget)) def is_terminated(self, _ctx: ph.Context) -> bool: '@override\n This agent cannot perform any more bids if its budget is 0.\n ' return (self.left <= 0) def reset(self): '@override\n Reset method called before each episode to clear the state of the agent.\n ' super().reset() self.left = self.type.budget self.step_clicks = 0 self.step_wins = 0 self.total_clicks = defaultdict(int) self.total_requests = defaultdict(int) self.total_wins = defaultdict(int) self.bid = 0.0 self._current_user_id = 0.0 self._current_age = 0.0 self._current_zipcode = 0.0
class AdExchangeAgent(ph.Agent): "\n The `AdExchangeAgent` is actually just an actor who reacts to messages reveived.\n It doesn't perform any action on its own.\n " @dataclass(frozen=True) class AdExchangeView(ph.AgentView): '\n The view is used to expose additional information to other actors in the system.\n It is accessible via the `ph.Context` object passed as a parameters\n in the appropriate methods.\n\n For this use case we want to expose users information to the advertisers to help them\n decide on their bid\n ' users_info: dict def __init__(self, agent_id: str, publisher_id: str, advertiser_ids: Iterable=tuple(), strategy: str='first'): super().__init__(agent_id) self.publisher_id = publisher_id self.advertiser_ids = advertiser_ids self.strategy = strategy def view(self, neighbour_id=None) -> ph.View: '@override\n Method to provide extra information about the user. This information\n is made available only for advertisers in a pull fashion, i.e the\n advertiser needs to access the information explicitely via the `ctx`\n object if it wants to use it.\n ' if (neighbour_id and neighbour_id.startswith('ADV')): return self.AdExchangeView(users_info={1: {'age': 18, 'zipcode': 94025}, 2: {'age': 40, 'zipcode': 90250}}) else: return super().view(neighbour_id) @ph.agents.msg_handler(ImpressionRequest) def handle_impression_request(self, _ctx: ph.Context, msg: ph.Message[ImpressionRequest]): '\n The exchange acts as an intermediary between the publisher and the\n advertisers, upon the reception of an `ImpressionRequest`, the exchange\n simply forward that request to the advertisers\n ' logger.debug('AdExchange impression request %s', msg) return [(adv_id, msg.payload) for adv_id in self.advertiser_ids] def handle_batch(self, ctx: ph.Context, batch: Sequence[ph.Message]): '@override\n We override the method `handle_batch` to consume all the bids messages\n as one block in order to perform the auction. The batch object contains\n all the messages that were sent to the actor.\n\n Note:\n -----\n The default logic is to consume each message individually.\n ' bids = [] msgs = [] for message in batch: if isinstance(message.payload, Bid): bids.append(message) else: msgs += self.handle_message(ctx, message) if (len(bids) > 0): msgs += self.auction(bids) return msgs def auction(self, bids: Sequence[ph.Message[Bid]]): '\n Classic auction mechanism. We implement two types of auctions here:\n - first price: the cost corresponds to the highest bid\n - second price: the cost corresponds to the second highest bid\n In both cases the highest bid wins.\n ' if (self.strategy == 'first'): (winner, cost) = self._first_price_auction(bids) elif (self.strategy == 'second'): (winner, cost) = self._second_price_auction(bids) else: raise ValueError(f'Unknown auction strategy: {self.strategy}') logger.debug('AdExchange auction done winner: %s cost: %s', winner, cost) msgs = [] advertiser_ids = [m.sender_id for m in bids] msgs.append((self.publisher_id, Ads(advertiser_id=winner.sender_id, theme=winner.payload.theme, user_id=winner.payload.user_id))) for adv_id in advertiser_ids: adv_cost = (cost if (adv_id == winner.sender_id) else 0.0) msgs.append((adv_id, AuctionResult(cost=adv_cost, winning_bid=winner.payload.bid))) return msgs def _first_price_auction(self, bids: Sequence[ph.Message[Bid]]): sorted_bids = sorted(bids, key=(lambda m: m.payload.bid), reverse=True) winner = sorted_bids[0] cost = sorted_bids[0].payload.bid return (winner, cost) def _second_price_auction(self, bids: Sequence[ph.Message[Bid]]): sorted_bids = sorted(bids, key=(lambda m: m.payload.bid), reverse=True) winner = sorted_bids[0] cost = (sorted_bids[1].payload.bid if (len(bids) > 1) else sorted_bids[0].payload.bid) return (winner, cost)
class DigitalAdsEnv(ph.FiniteStateMachineEnv): def __init__(self, num_steps=20, num_agents_theme=None, **kwargs): self.exchange_id = 'ADX' self.publisher_id = 'PUB' USER_CLICK_PROBABILITIES = {1: {'sport': 0.0, 'travel': 1.0, 'science': 0.2, 'tech': 0.5}, 2: {'sport': 1.0, 'travel': 0.0, 'science': 0.7, 'tech': 0.5}} publisher_agent = PublisherAgent(self.publisher_id, exchange_id=self.exchange_id, user_click_proba=USER_CLICK_PROBABILITIES) advertiser_agents = [] i = 1 for (theme, num_agents) in num_agents_theme.items(): for _ in range(num_agents): advertiser_agents.extend([AdvertiserAgent(f'ADV_{i}', self.exchange_id, theme=theme)]) i += 1 self.advertiser_ids = [a.id for a in advertiser_agents] exchange_agent = AdExchangeAgent(self.exchange_id, publisher_id=self.publisher_id, advertiser_ids=self.advertiser_ids) actors = ([exchange_agent, publisher_agent] + advertiser_agents) network = ph.StochasticNetwork(actors, ph.resolvers.BatchResolver(round_limit=5), ignore_connection_errors=True) network.add_connections_between([self.exchange_id], [self.publisher_id]) network.add_connections_between([self.exchange_id], self.advertiser_ids) network.add_connections_between([self.publisher_id], self.advertiser_ids) super().__init__(num_steps=num_steps, network=network, initial_stage='publisher_step', stages=[ph.FSMStage(stage_id='publisher_step', next_stages=['advertiser_step'], acting_agents=[self.publisher_id], rewarded_agents=[self.publisher_id]), ph.FSMStage(stage_id='advertiser_step', next_stages=['publisher_step'], acting_agents=self.advertiser_ids, rewarded_agents=self.advertiser_ids)], **kwargs)
class AdvertiserBidUser(ph.metrics.Metric[float]): def __init__(self, agent_id: str, user_id: int) -> None: self.agent_id: str = agent_id self.user_id: int = user_id def extract(self, env: ph.PhantomEnv) -> float: '@override\n Extracts the per-step value to track\n ' if (env[self.agent_id]._current_user_id == self.user_id): return env[self.agent_id].bid return np.nan def reduce(self, values, mode=None) -> float: '@override\n The default logic returns the last step value,\n here we are interested in the average bid value\n ' return np.nanmean(values)
class AdvertiserAverageHitRatioUser(ph.metrics.Metric[float]): def __init__(self, agent_id: str, user_id: int) -> None: self.agent_id: str = agent_id self.user_id: int = user_id def extract(self, env: ph.PhantomEnv) -> float: '@override\n Extracts the per-step value to track\n ' if (env[self.agent_id].total_wins[self.user_id] != 0.0): return (env[self.agent_id].total_clicks[self.user_id] / env[self.agent_id].total_wins[self.user_id]) return np.nan def reduce(self, values, mode=None) -> float: '@override\n The default logic returns the last step value,\n here we are interested in the average bid value\n ' return values[(- 1)]
class AdvertiserAverageWinProbaUser(ph.metrics.Metric[float]): def __init__(self, agent_id: str, user_id: int) -> None: self.agent_id: str = agent_id self.user_id: int = user_id def extract(self, env: ph.PhantomEnv) -> float: '@override\n Extracts the per-step value to track\n ' if (env[self.agent_id].total_requests[self.user_id] != 0.0): return (env[self.agent_id].total_wins[self.user_id] / env[self.agent_id].total_requests[self.user_id]) return np.nan def reduce(self, values, mode=None) -> float: '@override\n The default logic returns the last step value,\n here we are interested in the average bid value\n ' return values[(- 1)]
class AdvertiserTotalRequests(ph.metrics.Metric[float]): def __init__(self, agent_id: str, user_id: int) -> None: self.agent_id: str = agent_id self.user_id: int = user_id def extract(self, env: ph.PhantomEnv) -> float: return env[self.agent_id].total_requests[self.user_id] def reduce(self, values, mode=None) -> float: return values[(- 1)]
class AdvertiserTotalWins(ph.metrics.Metric[float]): def __init__(self, agent_id: str, user_id: int) -> None: self.agent_id: str = agent_id self.user_id: int = user_id def extract(self, env: ph.PhantomEnv) -> float: return env[self.agent_id].total_wins[self.user_id] def reduce(self, values, mode=None) -> float: return values[(- 1)]
def BuyerPolicy(obs): if (obs[1] and (obs[0] <= obs[2])): action = obs[1] else: action = 0 return action
def SellerPolicy(obs): action = np.random.uniform() return action
def rollout(env): (observations, _) = env.reset() rewards = {} while (env.current_step < env.num_steps): print(env.current_step) print('\nobservations:') print(observations) print('\nrewards:') print(rewards) actions = {} for (aid, obs) in observations.items(): agent = env.agents[aid] if isinstance(agent, BuyerAgent): actions[aid] = BuyerPolicy(obs) elif isinstance(agent, SellerAgent): actions[aid] = SellerPolicy(obs) print('\nactions:') print(actions) step = env.step(actions) observations = step.observations rewards = step.rewards
@dataclass(frozen=True) class Leak(): victim_id: str price: float
class MaybeSneakySeller(SellerAgent): def __init__(self, agent_id: ph.AgentID, victim_id=None): super().__init__(agent_id) self.victim_id = victim_id self.victims_price = 0 self.observation_space = Box(np.array([0, 0, 0]), np.array([np.Inf, 1, 1])) def encode_observation(self, ctx): obs = np.array([self.current_tx, ctx.env_view.avg_price, self.victims_price]) self.current_tx = 0 return obs @ph.agents.msg_handler(Order) def handle_order_message(self, ctx, message): self.current_revenue += (self.current_price * message.payload.vol) self.current_tx += message.payload.vol @ph.agents.msg_handler(Leak) def handle_leak_message(self, ctx, message): self.victims_price = message.payload.price print('received leak message') print(message)
class MaybeLeakyBuyer(BuyerAgent): def __init__(self, agent_id, demand_prob, supertype, victim_id=None, adv_id=None): super().__init__(agent_id, demand_prob, supertype) self.victim_id = victim_id self.adv_id = adv_id @ph.agents.msg_handler(Price) def handle_price_message(self, ctx, message): self.seller_prices[message.sender_id] = message.payload.price if (message.sender_id == self.victim_id): responses = [(self.adv_id, Leak(victim_id=self.victim_id, price=message.payload.price))] print(responses) return responses
@dataclass(frozen=True) class AdversarialSetup(): leaky_buyer: ph.AgentID victim_seller: ph.AgentID adv_seller: ph.AgentID
class LeakySimpleMarketEnv(SimpleMarketEnv): def __init__(self, num_steps, network, adv_setup=None): super().__init__(num_steps, network) self.leaky = False if adv_setup: self.adversarial_setup(adv_setup.leaky_buyer, adv_setup.adv_seller, adv_setup.victim_seller) def adversarial_setup(self, leaky_buyer, adv_seller, victim_seller, victim_reward_coeff=1.0, adv_reward_coeff=1.0): self.leaky = True self.leaky_buyer = leaky_buyer self.adv_seller = adv_seller self.victim_seller = victim_seller self.agents[leaky_buyer].victim_id = victim_seller self.agents[leaky_buyer].adv_id = adv_seller self.agents[adv_seller].victim_id = victim_seller self.victim_coeff = victim_reward_coeff self.adv_coeff = adv_reward_coeff def compute_adv_reward(self, attacker_reward, victim_reward): '\n Computing the adversarial rewards, which is a combination of\n the penalized reward and the original agent reward\n ' return (((- self.victim_coeff) * victim_reward) + (self.adv_coeff * attacker_reward)) def step(self, actions, verbose=False): step = super().step(actions) if (self.leaky and (self.current_stage == 'Sellers')): step.rewards[self.adv_seller] = self.compute_adv_reward(step.rewards[self.adv_seller], step.rewards[self.victim_seller]) return step
@dataclass(frozen=True) class Price(ph.MsgPayload): price: float
@dataclass(frozen=True) class Order(ph.MsgPayload): vol: int
@dataclass class BuyerSupertype(ph.Supertype): value: float
class BuyerAgent(ph.StrategicAgent): def __init__(self, agent_id, demand_prob, supertype): super().__init__(agent_id, supertype=supertype) self.seller_prices = {} self.demand_prob = demand_prob self.current_reward = 0 self.action_space = Discrete(2) self.observation_space = Box(low=0, high=1, shape=(3,)) def decode_action(self, ctx, action): msgs = [] min_price = min(self.seller_prices.values()) if action: min_sellers = [k for (k, v) in self.seller_prices.items() if (v == min_price)] seller = random.choice(min_sellers) msgs.append((seller, Order(action))) self.current_reward += (((- action) * min_price) + self.type.value) return msgs def encode_observation(self, ctx): min_price = min(self.seller_prices.values()) demand = np.random.binomial(1, self.demand_prob) return np.array([min_price, demand, self.type.value]) def compute_reward(self, ctx): reward = self.current_reward self.current_reward = 0 return reward @ph.agents.msg_handler(Price) def handle_price_message(self, ctx, message): self.seller_prices[message.sender_id] = message.payload.price def reset(self): super().reset() self.seller_prices = {} self.current_reward = 0
class SellerAgent(ph.StrategicAgent): def __init__(self, agent_id: ph.AgentID): super().__init__(agent_id) self.current_price = 0 self.current_revenue = 0 self.current_tx = 0 self.action_space = Box(low=0, high=1, shape=(1,)) self.observation_space = Box(np.array([0, 0]), np.array([np.inf, 1])) def decode_action(self, ctx, action): self.current_price = action return [(nid, Price(action)) for nid in ctx.neighbour_ids] def encode_observation(self, ctx): obs = np.array([self.current_tx, ctx.env_view.avg_price]) self.current_tx = 0 return obs def compute_reward(self, ctx): reward = self.current_revenue self.current_revenue = 0 return reward def reset(self): self.current_price = self.action_space.sample() self.current_revenue = 0 self.current_tx = 0 @ph.agents.msg_handler(Order) def handle_order_message(self, ctx, message): self.current_revenue += (self.current_price * message.payload.vol) self.current_tx += message.payload.vol
class SimpleMarketEnv(ph.FiniteStateMachineEnv): @dataclass(frozen=True) class View(ph.fsm.FSMEnvView): avg_price: float def __init__(self, num_steps, network): buyers = [aid for (aid, agent) in network.agents.items() if isinstance(agent, BuyerAgent)] sellers = [aid for (aid, agent) in network.agents.items() if isinstance(agent, SellerAgent)] stages = [ph.FSMStage(stage_id='Buyers', next_stages=['Sellers'], acting_agents=buyers, rewarded_agents=buyers), ph.FSMStage(stage_id='Sellers', next_stages=['Buyers'], acting_agents=sellers, rewarded_agents=sellers)] self.avg_price = 0.0 super().__init__(num_steps, network, stages=stages, initial_stage='Sellers') def view(self, neighbour_id=None) -> 'SimpleMarketEnv.View': return self.View(avg_price=self.avg_price, **super().view({}).__dict__) def post_message_resolution(self): super().post_message_resolution() seller_prices = [agent.current_price for agent in self.agents.values() if isinstance(agent, SellerAgent)] self.avg_price = np.mean(seller_prices)
@ph.msg_payload('CustomerAgent', 'ShopAgent') class OrderRequest(): size: int
@ph.msg_payload('ShopAgent', 'CustomerAgent') class OrderResponse(): size: int
@ph.msg_payload('ShopAgent', 'FactoryAgent') class StockRequest(): size: int
@ph.msg_payload('FactoryAgent', 'ShopAgent') class StockResponse(): size: int
class FactoryAgent(ph.Agent): def __init__(self, agent_id: str): super().__init__(agent_id) @ph.agents.msg_handler(StockRequest) def handle_stock_request(self, ctx: ph.Context, message: ph.Message): return [(message.sender_id, StockResponse(message.payload.size))]
class CustomerAgent(ph.Agent): def __init__(self, agent_id: ph.AgentID, shop_id: ph.AgentID): super().__init__(agent_id) self.shop_id: str = shop_id @ph.agents.msg_handler(OrderResponse) def handle_order_response(self, ctx: ph.Context, message: ph.Message): return def generate_messages(self, ctx: ph.Context): order_size = np.random.randint(CUSTOMER_MAX_ORDER_SIZE) return [(self.shop_id, OrderRequest(order_size))]
class ShopAgent(ph.StrategicAgent): def __init__(self, agent_id: str, factory_id: str): super().__init__(agent_id) self.factory_id: str = factory_id self.stock: int = 0 self.sales: int = 0 self.missed_sales: int = 0 self.observation_space = gym.spaces.Box(low=0.0, high=1.0, shape=(3,)) self.action_space = gym.spaces.Box(low=0.0, high=SHOP_MAX_STOCK, shape=(1,)) def pre_message_resolution(self, ctx: ph.Context): self.sales = 0 self.missed_sales = 0 @ph.agents.msg_handler(StockResponse) def handle_stock_response(self, ctx: ph.Context, message: ph.Message): self.delivered_stock = message.payload.size self.stock = min((self.stock + self.delivered_stock), SHOP_MAX_STOCK) @ph.agents.msg_handler(OrderRequest) def handle_order_request(self, ctx: ph.Context, message: ph.Message): amount_requested = message.payload.size if (amount_requested > self.stock): self.missed_sales += (amount_requested - self.stock) stock_to_sell = self.stock self.stock = 0 else: stock_to_sell = amount_requested self.stock -= amount_requested self.sales += stock_to_sell return [(message.sender_id, OrderResponse(stock_to_sell))] def encode_observation(self, ctx: ph.Context): max_sales_per_step = (NUM_CUSTOMERS * CUSTOMER_MAX_ORDER_SIZE) return np.array([(self.stock / SHOP_MAX_STOCK), (self.sales / max_sales_per_step), (self.missed_sales / max_sales_per_step)], dtype=np.float32) def decode_action(self, ctx: ph.Context, action: np.ndarray): stock_to_request = min(int(round(action[0])), (SHOP_MAX_STOCK - self.stock)) return [(self.factory_id, StockRequest(stock_to_request))] def compute_reward(self, ctx: ph.Context) -> float: return (self.sales - (0.1 * self.stock)) def reset(self): self.stock = 0
class SupplyChainEnv(ph.PhantomEnv): def __init__(self): factory_id = 'WAREHOUSE' customer_ids = [f'CUST{(i + 1)}' for i in range(NUM_CUSTOMERS)] shop_id = 'SHOP' factory_agent = FactoryAgent(factory_id) customer_agents = [CustomerAgent(cid, shop_id=shop_id) for cid in customer_ids] shop_agent = ShopAgent(shop_id, factory_id=factory_id) agents = ([shop_agent, factory_agent] + customer_agents) network = ph.Network(agents) network.add_connection(shop_id, factory_id) network.add_connections_between([shop_id], customer_ids) super().__init__(num_steps=NUM_EPISODE_STEPS, network=network)
class FixedCategorical(torch.distributions.Categorical): def sample(self): return super().sample().unsqueeze((- 1)) def log_probs(self, actions): return super().log_prob(actions.squeeze((- 1))).view(actions.size(0), (- 1)).sum((- 1)).unsqueeze((- 1)) def mode(self): return self.probs.argmax(dim=(- 1), keepdim=True)
class FixedNormal(torch.distributions.Normal): def log_probs(self, actions): return super().log_prob(actions).sum((- 1), keepdim=True) def entropy(self): return super().entropy().sum((- 1)) def mode(self): return self.mean
class FixedBernoulli(torch.distributions.Bernoulli): def log_probs(self, actions): return super.log_prob(actions).view(actions.size(0), (- 1)).sum((- 1)).unsqueeze((- 1)) def entropy(self): return super().entropy().sum((- 1)) def mode(self): return torch.gt(self.probs, 0.5).float()
class Categorical(nn.Module): def __init__(self, num_inputs: int, num_outputs: int) -> None: super().__init__() init_ = (lambda m: init(m, nn.init.orthogonal_, (lambda x: nn.init.constant_(x, 0)), gain=0.01)) self.linear = init_(nn.Linear(num_inputs, num_outputs)) def forward(self, x): x = self.linear(x) return FixedCategorical(logits=x)
class DiagGaussian(nn.Module): def __init__(self, num_inputs: int, num_outputs: int) -> None: super().__init__() init_ = (lambda m: init(m, nn.init.orthogonal_, (lambda x: nn.init.constant_(x, 0)))) self.fc_mean = init_(nn.Linear(num_inputs, num_outputs)) self.logstd = AddBias(torch.zeros(num_outputs)) def forward(self, x): action_mean = self.fc_mean(x) zeros = torch.zeros(action_mean.size()) if x.is_cuda: zeros = zeros.cuda() action_logstd = self.logstd(zeros) return FixedNormal(action_mean, action_logstd.exp())
class Bernoulli(nn.Module): def __init__(self, num_inputs: int, num_outputs: int) -> None: super().__init__() init_ = (lambda m: init(m, nn.init.orthogonal_, (lambda x: nn.init.constant_(x, 0)))) self.linear = init_(nn.Linear(num_inputs, num_outputs)) def forward(self, x): x = self.linear(x) return FixedBernoulli(logits=x)
class Flatten(nn.Module): def forward(self, x): return x.view(x.size(0), (- 1))
class PPOPolicy(nn.Module, Policy): def __init__(self, observation_space: gym.Space, action_space: gym.Space, base_type: Optional[Type['NNBase']]=None, base_kwargs: Optional[Dict[(str, Any)]]=None) -> None: nn.Module.__init__(self) Policy.__init__(self, observation_space, action_space) base_kwargs = (base_kwargs or {}) if (base_type is not None): self.base = base_type(**base_kwargs) elif (observation_space.__class__.__name__ == 'Discrete'): self.base = MLPBase(1, **base_kwargs) elif (observation_space.__class__.__name__ == 'Box'): self.base = MLPBase(reduce(mul, observation_space.shape, 1), **base_kwargs) else: raise NotImplementedError(observation_space.__class__.__name__) if (action_space.__class__.__name__ == 'Discrete'): num_outputs = action_space.n self.dist = Categorical(self.base.output_size, num_outputs) elif (action_space.__class__.__name__ == 'Box'): num_outputs = action_space.shape[0] self.dist = DiagGaussian(self.base.output_size, num_outputs) elif (action_space.__class__.__name__ == 'MultiBinary'): num_outputs = action_space.shape[0] self.dist = Bernoulli(self.base.output_size, num_outputs) else: raise NotImplementedError def compute_action(self, observation: Any) -> Any: raise NotImplementedError @property def is_recurrent(self) -> bool: return self.base.is_recurrent @property def recurrent_hidden_state_size(self) -> int: 'Size of rnn_hx.' return self.base.recurrent_hidden_state_size def forward(self, inputs, rnn_hxs, masks): return self.base.forward(inputs, rnn_hxs, masks) def act(self, inputs, rnn_hxs, masks, deterministic=False) -> Tuple: (value, actor_features, rnn_hxs) = self.base(inputs, rnn_hxs, masks) dist = self.dist(actor_features) action = (dist.mode() if deterministic else dist.sample()) action_log_probs = dist.log_probs(action) return (value, action, action_log_probs, rnn_hxs) def get_value(self, inputs, rnn_hxs, masks): (value, _, _) = self.base(inputs, rnn_hxs, masks) return value def evaluate_actions(self, inputs, rnn_hxs, masks, action): (value, actor_features, rnn_hxs) = self.base(inputs, rnn_hxs, masks) dist = self.dist(actor_features) action_log_probs = dist.log_probs(action) dist_entropy = dist.entropy().mean() return (value, action_log_probs, dist_entropy, rnn_hxs)
class NNBase(nn.Module): def __init__(self, recurrent: bool, recurrent_input_size: int, hidden_size: int): super().__init__() self._hidden_size = hidden_size self._recurrent = recurrent if recurrent: self.gru = nn.GRU(recurrent_input_size, hidden_size) for (name, param) in self.gru.named_parameters(): if ('bias' in name): nn.init.constant_(param, 0) elif ('weight' in name): nn.init.orthogonal_(param) @property def is_recurrent(self) -> bool: return self._recurrent @property def recurrent_hidden_state_size(self) -> int: if self._recurrent: return self._hidden_size return 1 @property def output_size(self): return self._hidden_size def _forward_gru(self, x, hxs, masks): if (x.size(0) == hxs.size(0)): (x, hxs) = self.gru(x.unsqueeze(0), (hxs * masks).unsqueeze(0)) x = x.squeeze(0) hxs = hxs.squeeze(0) else: N = hxs.size(0) T = int((x.size(0) / N)) x = x.view(T, N, x.size(1)) masks = masks.view(T, N) has_zeros = (masks[1:] == 0.0).any(dim=(- 1)).nonzero().squeeze().cpu() if (has_zeros.dim() == 0): has_zeros = [(has_zeros.item() + 1)] else: has_zeros = (has_zeros + 1).numpy().tolist() has_zeros = (([0] + has_zeros) + [T]) hxs = hxs.unsqueeze(0) outputs = [] for i in range((len(has_zeros) - 1)): start_idx = has_zeros[i] end_idx = has_zeros[(i + 1)] (rnn_scores, hxs) = self.gru(x[start_idx:end_idx], (hxs * masks[start_idx].view(1, (- 1), 1))) outputs.append(rnn_scores) x = torch.cat(outputs, dim=0) x = x.view((T * N), (- 1)) hxs = hxs.squeeze(0) return (x, hxs)
class CNNBase(NNBase): def __init__(self, num_inputs: int, recurrent: bool=False, hidden_size: int=512) -> None: super().__init__(recurrent, hidden_size, hidden_size) init_ = (lambda m: init(m, nn.init.orthogonal_, (lambda x: nn.init.constant_(x, 0)), nn.init.calculate_gain('relu'))) self.main = nn.Sequential(init_(nn.Conv2d(num_inputs, 32, 8, stride=4)), nn.ReLU(), init_(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(), init_(nn.Conv2d(64, 32, 3, stride=1)), nn.ReLU(), Flatten(), init_(nn.Linear(((32 * 7) * 7), hidden_size)), nn.ReLU()) init_ = (lambda m: init(m, nn.init.orthogonal_, (lambda x: nn.init.constant_(x, 0)))) self.critic_linear = init_(nn.Linear(hidden_size, 1)) self.train() def forward(self, inputs, rnn_hxs, masks): x = self.main((inputs / 255.0)) if self.is_recurrent: (x, rnn_hxs) = self._forward_gru(x, rnn_hxs, masks) return (self.critic_linear(x), x, rnn_hxs)
class MLPBase(NNBase): def __init__(self, num_inputs: int, recurrent: bool=False, hidden_size: int=64) -> None: super().__init__(recurrent, num_inputs, hidden_size) if recurrent: num_inputs = hidden_size init_ = (lambda m: init(m, nn.init.orthogonal_, (lambda x: nn.init.constant_(x, 0)), np.sqrt(2))) self.actor = nn.Sequential(init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(), init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh()) self.critic = nn.Sequential(init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(), init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh()) self.critic_linear = init_(nn.Linear(hidden_size, 1)) self.train() def forward(self, inputs, rnn_hxs, masks): x = inputs if self.is_recurrent: (x, rnn_hxs) = self._forward_gru(x, rnn_hxs, masks) hidden_critic = self.critic(x) hidden_actor = self.actor(x) return (self.critic_linear(hidden_critic), hidden_actor, rnn_hxs)
class PPOTrainer(Trainer): '\n Proximal Policy Optimisation (PPO) algorithm implementation derived from\n https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail.\n\n For performance and stability reasons, it is recommended that the RLlib\n implementation is used using the :func:`utils.rllib.train` function.\n\n Arguments:\n tensorboard_log_dir: If provided, will save metrics to the given directory\n in a format that can be viewed with tensorboard.\n ppo_epoch:\n num_mini_batch:\n clip_param:\n use_clipped_value_loss:\n use_linear_lr_decay:\n lr:\n eps:\n value_loss_coef:\n entropy_coef:\n max_grad_norm:\n use_gae:\n gamma:\n gae_lambda:\n use_proper_time_limits:\n ' policy_class = PPOPolicy def __init__(self, tensorboard_log_dir: Optional[str]=None, ppo_epoch: int=4, num_mini_batch: int=32, clip_param: float=0.2, use_clipped_value_loss: bool=True, use_linear_lr_decay: bool=False, lr: float=0.0007, eps: float=1e-05, value_loss_coef: float=0.5, entropy_coef: float=0.01, max_grad_norm: float=0.5, use_gae: bool=False, gamma: float=0.99, gae_lambda: float=0.95, use_proper_time_limits: bool=False) -> None: super().__init__(tensorboard_log_dir) self.ppo_epoch = ppo_epoch self.num_mini_batch = num_mini_batch self.clip_param = clip_param self.use_clipped_value_loss = use_clipped_value_loss self.use_linear_lr_decay = use_linear_lr_decay self.lr = lr self.eps = eps self.value_loss_coef = value_loss_coef self.entropy_coef = entropy_coef self.max_grad_norm = max_grad_norm self.use_gae = use_gae self.gamma = gamma self.gae_lambda = gae_lambda self.use_proper_time_limits = use_proper_time_limits def train(self, env_class: Type[PhantomEnv], num_iterations: int, policies: PolicyMapping, policies_to_train: Sequence[PolicyID], env_config: Optional[Mapping[(str, Any)]]=None, metrics: Optional[Mapping[(str, Metric)]]=None) -> TrainingResults: env_config = (env_config or {}) self.metrics = (metrics or {}) check_env_config(env_config) num_envs = 10 envs = [] observations = [] for _ in range(num_envs): env = env_class(**env_config) observations.append(env.reset()) envs.append(env) (policy_mapping, policy_instances) = self.setup_policy_specs_and_mapping(env, policies) assert (len(policies_to_train) == 1) policy_to_train = policies_to_train[0] training_policy = policy_instances[policy_to_train] training_agent = next((a for (a, p) in policy_mapping.items() if (p == policy_to_train))) assert isinstance(training_policy, self.policy_class) device = torch.device('cpu') self.actor_critic = PPOPolicy(training_policy.observation_space, training_policy.action_space, base_kwargs={'recurrent': False}) self.actor_critic.to(device) self.optimizer = torch.optim.Adam(self.actor_critic.parameters(), lr=self.lr, eps=self.eps) rollouts = RolloutStorage(envs[0].num_steps, num_envs, training_policy.observation_space, training_policy.action_space, self.actor_critic.recurrent_hidden_state_size) agent_obs = np.array([obs[training_agent] for obs in observations]) rollouts.obs[0].copy_(torch.FloatTensor(agent_obs)) rollouts.to(device) for i in rich.progress.track(range(num_iterations), description='Training...'): if self.use_linear_lr_decay: update_linear_schedule(self.optimizer, i, num_iterations, self.lr) episode_rewards = defaultdict(list) for step in range(env.num_steps): with torch.no_grad(): (value, trained_policy_actions, action_log_prob, recurrent_hidden_states) = self.actor_critic.act(rollouts.obs[step].reshape(((- 1), 1)), rollouts.recurrent_hidden_states[step], rollouts.masks[step]) new_observations: List[Dict[(AgentID, Any)]] = [] rewards: List[Dict[(AgentID, float)]] = [] terminations: List[Dict[(AgentID, bool)]] = [] truncations: List[Dict[(AgentID, bool)]] = [] infos: List[Dict[(AgentID, Any)]] = [] for (env, obs, tpa) in zip(envs, observations, trained_policy_actions): actions: Dict[(AgentID, Any)] = {} for (agent_id, agent_obs) in obs.items(): policy_name = policy_mapping[agent_id] policy = policy_instances[policy_name] if (policy_name == policy_to_train): if (len(tpa) == 1): actions[agent_id] = tpa[0] else: actions[agent_id] = np.array(tpa) else: actions[agent_id] = policy.compute_action(agent_obs) (o, r, te, tr, i_) = env.step(actions) new_observations.append(o) rewards.append(r) terminations.append(te) truncations.append(tr) infos.append(i_) observations = new_observations for agent_id in rewards[0].keys(): episode_rewards[agent_id].append(np.mean([r[agent_id] for r in rewards])) masks = torch.FloatTensor([([0.0] if (te[training_agent] or tr[training_agent]) else [1.0]) for (te, tr) in zip(terminations, truncations)]) bad_masks = torch.FloatTensor([([0.0] if ('bad_transition' in info[training_agent].keys()) else [1.0]) for info in infos]) training_observations = torch.FloatTensor([obs[training_agent] for obs in observations]) training_rewards = torch.FloatTensor([[rwd[training_agent]] for rwd in rewards]) rollouts.insert(training_observations, recurrent_hidden_states, trained_policy_actions, action_log_prob, value, training_rewards, masks, bad_masks) self.log_vec_rewards(rewards) self.log_vec_metrics(envs) with torch.no_grad(): next_value = self.actor_critic.get_value(rollouts.obs[(- 1)].reshape(((- 1), 1)), rollouts.recurrent_hidden_states[(- 1)], rollouts.masks[(- 1)]).detach() rollouts.compute_returns(next_value, self.use_gae, self.gamma, self.gae_lambda, self.use_proper_time_limits) self.update(rollouts) rollouts.after_update() self.tbx_write_values(i) return TrainingResults(policy_instances) def update(self, rollouts: RolloutStorage) -> Tuple[(float, float, float)]: advantages = (rollouts.returns[:(- 1)] - rollouts.value_preds[:(- 1)]) advantages = ((advantages - advantages.mean()) / (advantages.std() + 1e-05)) value_loss_epoch = 0.0 action_loss_epoch = 0.0 dist_entropy_epoch = 0.0 for _ in range(self.ppo_epoch): if self.actor_critic.is_recurrent: data_generator = rollouts.recurrent_generator(advantages, self.num_mini_batch) else: data_generator = rollouts.feed_forward_generator(advantages, self.num_mini_batch) for sample in data_generator: (obs_batch, recurrent_hidden_states_batch, actions_batch, value_preds_batch, return_batch, masks_batch, old_action_log_probs_batch, adv_targ) = sample (values, action_log_probs, dist_entropy, _) = self.actor_critic.evaluate_actions(obs_batch.reshape(((- 1), 1)), recurrent_hidden_states_batch, masks_batch, actions_batch) ratio = torch.exp((action_log_probs - old_action_log_probs_batch)) surr1 = (ratio * adv_targ) surr2 = (torch.clamp(ratio, (1.0 - self.clip_param), (1.0 + self.clip_param)) * adv_targ) action_loss = (- torch.min(surr1, surr2).mean()) if self.use_clipped_value_loss: value_pred_clipped = (value_preds_batch + (values - value_preds_batch).clamp((- self.clip_param), self.clip_param)) value_losses = (values - return_batch).pow(2) value_losses_clipped = (value_pred_clipped - return_batch).pow(2) value_loss = (0.5 * torch.max(value_losses, value_losses_clipped).mean()) else: value_loss = (0.5 * (return_batch - values).pow(2).mean()) self.optimizer.zero_grad() (((value_loss * self.value_loss_coef) + action_loss) - (dist_entropy * self.entropy_coef)).backward() torch.nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.max_grad_norm) self.optimizer.step() value_loss_epoch += value_loss.item() action_loss_epoch += action_loss.item() dist_entropy_epoch += dist_entropy.item() num_updates = (self.ppo_epoch * self.num_mini_batch) value_loss_epoch /= num_updates action_loss_epoch /= num_updates dist_entropy_epoch /= num_updates return (value_loss_epoch, action_loss_epoch, dist_entropy_epoch)
def init(module, weight_init, bias_init, gain=1): weight_init(module.weight.data, gain=gain) bias_init(module.bias.data) return module
def get_vec_normalize(venv): if isinstance(venv, VecNormalize): return venv if hasattr(venv, 'venv'): return get_vec_normalize(venv.venv) return None
class AddBias(torch.nn.Module): def __init__(self, bias): super().__init__() self._bias = torch.nn.Parameter(bias.unsqueeze(1)) def forward(self, x): if (x.dim() == 2): bias = self._bias.t().view(1, (- 1)) else: bias = self._bias.t().view(1, (- 1), 1, 1) return (x + bias)
def update_linear_schedule(optimizer, epoch, total_num_epochs, initial_lr): 'Decreases the learning rate linearly' lr = (initial_lr - (initial_lr * (epoch / float(total_num_epochs)))) for param_group in optimizer.param_groups: param_group['lr'] = lr
class VecNormalize(VecNormalize_): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.training = True def _obfilt(self, obs, update=True): if self.obs_rms: if (self.training and update): self.obs_rms.update(obs) obs = np.clip(((obs - self.obs_rms.mean) / np.sqrt((self.obs_rms.var + self.epsilon))), (- self.clip_obs), self.clip_obs) return obs else: return obs def train(self): self.training = True def eval(self): self.training = False
class QLearningPolicy(Policy): '\n Simple QLearning policy implementation.\n\n Arguments:\n observation_space: Observation space of the policy.\n action_space: Action space of the policy.\n ' def __init__(self, observation_space: gym.spaces.Discrete, action_space: gym.spaces.Discrete) -> None: super().__init__(observation_space, action_space) if (not isinstance(observation_space, gym.spaces.Discrete)): raise ValueError(f'QLearningPolicy observation space must be Discrete type (got {type(observation_space)})') if (not isinstance(action_space, gym.spaces.Discrete)): raise ValueError(f'QLearningPolicy action space must be Discrete type (got {type(action_space)})') self.q_table = np.zeros([observation_space.n, action_space.n]) def compute_action(self, observation: int) -> Any: '\n Arguments:\n observation: A single observation for the policy to act on.\n\n Returns:\n The action taken by the policy based on the given observation.\n ' return np.argmax(self.q_table[observation])
class QLearningTrainer(Trainer): '\n Simple QLearning algorithm implementation.\n\n Arguments:\n alpha: Learning rate.\n gamma: Discount factor.\n epsilon: Exploration rate.\n tensorboard_log_dir: If provided, will save metrics to the given directory\n in a format that can be viewed with tensorboard.\n ' policy_class = QLearningPolicy def __init__(self, alpha: float=0.1, gamma: float=0.6, epsilon: float=0.1, tensorboard_log_dir: Optional[str]=None) -> None: super().__init__(tensorboard_log_dir) self.alpha = alpha self.gamma = gamma self.epsilon = epsilon def training_step(self, env: PhantomEnv, policy_mapping: Mapping[(AgentID, PolicyID)], policies: Mapping[(PolicyID, Policy)], policies_to_train: Sequence[PolicyID]) -> None: batch_size = 10 for _ in range(batch_size): observations = env.reset() while ((not env.is_terminated()) or env.is_truncated()): actions: Dict[(AgentID, Any)] = {} for (agent_id, obs) in observations.items(): policy_name = policy_mapping[agent_id] policy = policies[policy_name] if (policy_name in policies_to_train): assert isinstance(policy, QLearningPolicy) if (np.random.uniform(0, 1) < self.epsilon): action = policy.action_space.sample() else: action = np.argmax(policy.q_table[obs]) else: action = policy.compute_action(obs) actions[agent_id] = action (next_observations, rewards, _, _) = env.step(actions) for (agent_id, obs) in observations.items(): policy_name = policy_mapping[agent_id] policy = policies[policy_name] if (policy_name in policies_to_train): assert isinstance(policy, QLearningPolicy) reward = rewards[agent_id] next_obs = next_observations[agent_id] old_value = policy.q_table[(obs, actions[agent_id])] next_max = np.max(policy.q_table[next_obs]) new_value = (((1 - self.alpha) * old_value) + (self.alpha * (reward + (self.gamma * next_max)))) policy.q_table[(obs, actions[agent_id])] = new_value observations = next_observations self.log_rewards(rewards) self.log_metrics(env)
class Agent(ABC): "\n Representation of an agent in the network.\n\n Instances of :class:`phantom.Agent` occupy the nodes on the network graph.\n They are resonsible for storing and monitoring internal state, constructing\n :class:`View` instances and handling messages.\n\n Arguments:\n agent_id: Unique identifier for the agent.\n supertype: Optional :class:`Supertype` instance. When the agent's reset function\n is called the supertype will be sampled from and the values set as the\n agent's :attr:`type` property.\n\n Implementations can make use of the ``msg_handler`` function decorator:\n\n .. code-block:: python\n\n class SomeAgent(ph.Agent):\n ...\n\n @ph.agents.msg_handler(RequestMessage)\n def handle_request_msg(self, ctx: ph.Context, message: ph.Message):\n response_msgs = do_something_with_msg(message)\n\n return [response_msgs]\n " def __init__(self, agent_id: AgentID, supertype: Optional[Supertype]=None) -> None: self._id = agent_id self.__handlers: DefaultDict[(Type[MsgPayload], List[Handler])] = defaultdict(list) self.supertype = supertype for name in dir(self): if (name not in ['observation_space', 'action_space']): attr = getattr(self, name) if (callable(attr) and hasattr(attr, '_message_type')): self.__handlers[attr._message_type].append(getattr(self, name)) @property def id(self) -> AgentID: 'The unique ID of the agent.' return self._id def view(self, neighbour_id: Optional[AgentID]=None) -> Optional[AgentView]: "Return an immutable view to the agent's public state." return None def pre_message_resolution(self, ctx: Context) -> None: 'Perform internal, pre-message resolution updates to the agent.' def post_message_resolution(self, ctx: Context) -> None: 'Perform internal, post-message resolution updates to the agent.' def handle_batch(self, ctx: Context, batch: Sequence[Message]) -> List[Tuple[(AgentID, MsgPayload)]]: "\n Handle a batch of messages from multiple potential senders.\n\n Arguments:\n ctx: A Context object representing agent's the local view of the environment.\n batch: The incoming batch of messages to handle.\n\n Returns:\n A list of receiver ID / message payload pairs to form into messages in\n response to further resolve.\n " all_responses = [] for message in batch: logger.log_msg_recv(message) responses = self.handle_message(ctx, message) if (responses is not None): all_responses += responses return all_responses def handle_message(self, ctx: Context, message: Message) -> List[Tuple[(AgentID, MsgPayload)]]: "\n Handle a messages sent from other agents. The default implementation is the use\n the ``msg_handler`` function decorators.\n\n Arguments:\n ctx: A Context object representing agent's the local view of the environment.\n message: The contents of the message.\n\n Returns:\n A list of receiver ID / message payload pairs to form into messages in\n response to further resolve.\n " ptype = type(message.payload) if (ptype not in self.__handlers): raise ValueError(f"Unknown message type {ptype} in message sent from '{message.sender_id}' to '{self.id}'. Agent '{self.id}' needs a message handler function capable of receiving this mesage type.") return list(chain.from_iterable(filter((lambda x: (x is not None)), (bound_handler(ctx, message) for bound_handler in self.__handlers[ptype])))) def generate_messages(self, ctx: Context) -> List[Tuple[(AgentID, MsgPayload)]]: return [] def reset(self) -> None: '\n Resets the Agent.\n\n Can be extended by subclasses to provide additional functionality.\n ' if (self.supertype is not None): self.type = self.supertype.sample() elif hasattr(self, 'Supertype'): try: self.type = self.Supertype().sample() except TypeError as e: raise Exception(f'''Tried to initialise agent {self.id}'s Supertype with default values but failed: {e}''') def __repr__(self) -> str: return f'[{self.__class__.__name__} {self.id}]'
class StrategicAgent(Agent): "\n Representation of a behavioural agent in the network.\n\n Instances of :class:`phantom.Agent` occupy the nodes on the network graph.\n They are resonsible for storing and monitoring internal state, constructing\n :class:`View` instances and handling messages.\n\n Arguments:\n agent_id: Unique identifier for the agent.\n observation_encoder: Optional :class:`Encoder` instance, otherwise define an\n :meth:`encode_observation` method on the :class:`Agent` sub-class.\n action_decoder: Optional :class:`Decoder` instance, otherwise define an\n :meth:`decode_action` method on the :class:`Agent` sub-class.\n reward_function: Optional :class:`RewardFunction` instance, otherwise define an\n :meth:`compute_reward` method on the :class:`Agent` sub-class.\n supertype: Optional :class:`Supertype` instance. When the agent's reset function\n is called the supertype will be sampled from and the values set as the\n agent's :attr:`type` property.\n " def __init__(self, agent_id: AgentID, observation_encoder: Optional[Encoder]=None, action_decoder: Optional[Decoder]=None, reward_function: Optional[RewardFunction]=None, supertype: Optional[Supertype]=None) -> None: super().__init__(agent_id, supertype) self.observation_encoder = observation_encoder self.action_decoder = action_decoder self.reward_function = reward_function if (action_decoder is not None): self.action_space = action_decoder.action_space elif ('action_space' not in dir(self)): self.action_space = None if (observation_encoder is not None): self.observation_space = observation_encoder.observation_space elif ('observation_space' not in dir(self)): self.observation_space = None def encode_observation(self, ctx: Context) -> Observation: "\n Encodes a local view of the environment state into a set of observations.\n\n Note:\n This method may be extended by sub-classes to provide additional functionality.\n\n Arguments:\n ctx: A Context object representing agent's the local view of the environment.\n\n Returns:\n A numpy array encoding the observations.\n " if (self.observation_encoder is None): raise NotImplementedError(f"Agent '{self.id}' does not have an Encoder instance set as 'observation_encoder' or a custom 'encode_observation' method defined") return self.observation_encoder.encode(ctx) def decode_action(self, ctx: Context, action: Action) -> Optional[List[Tuple[(AgentID, MsgPayload)]]]: "\n Decodes an action taken by the agent policy into a set of messages to be\n sent to other agents in the network.\n\n Note:\n This method may be extended by sub-classes to provide additional functionality.\n\n Arguments:\n ctx: A Context object representing the agent's local view of the environment.\n action: The action taken by the agent.\n\n Returns:\n A list of receiver ID / message payload pairs to form into messages in\n response to further resolve.\n " if (self.action_decoder is None): raise NotImplementedError(f"Agent '{self.id}' does not have an Decoder instance set as 'action_decoder' or a custom 'decode_action' method defined") return self.action_decoder.decode(ctx, action) def compute_reward(self, ctx: Context) -> float: "\n Computes a reward value based on an agents current state.\n\n Note:\n This method may be extended by sub-classes to provide additional functionality.\n\n Arguments:\n ctx: A Context object representing the agent's local view of the environment.\n\n Returns:\n A float representing the present reward value.\n " if (self.reward_function is None): raise NotImplementedError(f"Agent '{self.id}' does not have an RewardFunction instance set as 'reward_function' or a custom 'compute_reward' method defined") return self.reward_function.reward(ctx) def is_terminated(self, ctx: Context) -> bool: "\n Indicates whether 'a `terminal state` (as defined under the MDP of the task) is\n reached' for the agent. The default logic is for the agent to be done only once\n all the timesteps have been executed.\n\n Note:\n This method may be extended by sub-classes to provide additional functionality.\n\n Arguments:\n ctx: A Context object representing the agent's local view of the environment.\n\n Returns:\n A boolean representing the terminal status of the agent.\n " return False def is_truncated(self, ctx: Context) -> bool: "\n Indicates whether 'a truncation condition outside the scope of the MDP is\n satisfied' for the agent.\n\n Note:\n This method may be extended by sub-classes to provide additional functionality.\n\n Arguments:\n ctx: A Context object representing the agent's local view of the environment.\n\n Returns:\n A boolean representing the truncated status of the agent.\n " return False def collect_infos(self, ctx: Context) -> Dict[(str, Any)]: "\n Provides diagnostic information about the agent, usefult for debugging.\n\n Note:\n This method may be extended by sub-classes to provide additional functionality.\n\n Arguments:\n ctx: A Context object representing the agent's local view of the environment.\n\n Returns:\n A dictionary containing informations about the agent\n " return {}
def msg_handler(message_type: Type[MsgPayload]) -> Callable[([Handler], Handler)]: def decorator(fn: Handler) -> Handler: setattr(fn, '_message_type', message_type) return fn return decorator
@dataclass(frozen=True) class Context(): "\n Representation of the local neighbourhood around a focal agent node.\n\n This class is designed to provide context about an agent's local neighbourhood. In\n principle this could be extended to something different to a star graph, but for now\n this is how we define context.\n\n Attributes:\n agent: Focal node of the ego network.\n agent_views: A collection of view objects, each one associated with an adjacent\n agent.\n env_view: A view object associated with the environment.\n " agent: 'Agent' agent_views: Dict[(AgentID, Optional[AgentView])] env_view: EnvView @property def neighbour_ids(self) -> List[AgentID]: 'List of IDs of the neighbouring agents.' return list(self.agent_views.keys()) def __getitem__(self, view_id: str) -> Any: return self.agent_views[view_id] def __contains__(self, view_id: str) -> bool: return (view_id in self.agent_views)
class Decoder(Generic[Action], ABC): 'A trait for types that decode raw actions into packets.' @property @abstractmethod def action_space(self) -> gym.Space: 'The action/input space of the decoder type.' @abstractmethod def decode(self, ctx: Context, action: Action) -> List[Tuple[(AgentID, MsgPayload)]]: "Convert an action into a packet given a network context.\n\n Arguments:\n ctx: The local network context.\n action: An action instance which is an element of the decoder's\n action space.\n " def chain(self, others: Iterable['Decoder']) -> 'ChainedDecoder': 'Chains this decoder together with adjoint set of decoders.\n\n This method returns a :class:`ChainedDecoder` instance where the action\n space reduces to a tuple with each element given by the action space\n specified in each of the decoders provided.\n ' return ChainedDecoder(flatten([self, others])) def reset(self): 'Resets the decoder.' def __repr__(self) -> str: return repr(self.action_space) def __str__(self) -> str: return str(self.action_space)
class EmptyDecoder(Decoder[Any]): 'Converts any actions into empty packets.' @property def action_space(self) -> gym.Space: return gym.spaces.Box((- np.inf), np.inf, (0,)) def decode(self, _: Context, action: Action) -> List[Tuple[(AgentID, MsgPayload)]]: return []
class ChainedDecoder(Decoder[Tuple]): 'Combines n decoders into a single decoder with a tuple action space.\n\n Attributes:\n decoders: An iterable collection of decoders which is flattened into a\n list.\n ' def __init__(self, decoders: Iterable[Decoder]): self.decoders: List[Decoder] = flatten(decoders) @property def action_space(self) -> gym.Space: return gym.spaces.Tuple(tuple((d.action_space for d in self.decoders))) def decode(self, ctx: Context, action: Tuple) -> List[Tuple[(AgentID, MsgPayload)]]: return list(chain.from_iterable((decoder.decode(ctx, sub_action) for (decoder, sub_action) in zip(self.decoders, action)))) def chain(self, others: Iterable['Decoder']) -> 'ChainedDecoder': return ChainedDecoder((self.decoders + list(others))) def reset(self): for decoder in self.decoders: decoder.reset()
class DictDecoder(Decoder[Dict[(str, Any)]]): 'Combines n decoders into a single decoder with a dict action space.\n\n Attributes:\n decoders: A mapping of decoder names to decoders.\n ' def __init__(self, decoders: Mapping[(str, Decoder)]): self.decoders: Dict[(str, Decoder)] = dict(decoders) @property def action_space(self) -> gym.Space: return gym.spaces.Dict({name: decoder.action_space for (name, decoder) in self.decoders.items()}) def decode(self, ctx: Context, action: Dict[(str, Any)]) -> List[Tuple[(AgentID, MsgPayload)]]: return list(chain.from_iterable((decoder.decode(ctx, action[name]) for (name, decoder) in self.decoders.items()))) def reset(self): for decoder in self.decoders.values(): decoder.reset()
class Encoder(Generic[Observation], ABC): 'A trait for types that encodes the context of an agent into an observation.' @property @abstractmethod def observation_space(self) -> gym.Space: 'The output space of the encoder type.' @abstractmethod def encode(self, ctx: Context) -> Observation: 'Encode the data in a given network context into an observation.\n\n Arguments:\n ctx: The local network context.\n\n Returns:\n An observation encoding properties of the provided context.\n ' def chain(self, others: Iterable['Encoder']) -> 'ChainedEncoder': 'Chains this encoder together with adjoint set of encoders.\n\n This method returns a :class:`ChainedEncoder` instance where the output\n space reduces to a tuple with each element given by the output space\n specified in each of the encoders provided.\n ' return ChainedEncoder(flatten([self, others])) def reset(self): 'Resets the encoder.' def __repr__(self) -> str: return repr(self.observation_space) def __str__(self) -> str: return str(self.observation_space)
class EmptyEncoder(Encoder[np.ndarray]): 'Generates an empty observation.' @property def observation_space(self) -> gym.spaces.Box: return gym.spaces.Box((- np.inf), np.inf, (1,)) def encode(self, _: Context) -> np.ndarray: return np.zeros((1,))
class ChainedEncoder(Encoder[Tuple]): 'Combines n encoders into a single encoder with a tuple action space.\n\n Attributes:\n encoders: An iterable collection of encoders which is flattened into a\n list.\n ' def __init__(self, encoders: Iterable[Encoder]): self.encoders: List[Encoder] = flatten(encoders) @property def observation_space(self) -> gym.Space: return gym.spaces.Tuple(tuple((d.observation_space for d in self.encoders))) def encode(self, ctx: Context) -> Tuple: return tuple((e.encode(ctx) for e in self.encoders)) def chain(self, others: Iterable['Encoder']) -> 'ChainedEncoder': return ChainedEncoder((self.encoders + list(others))) def reset(self): for encoder in self.encoders: encoder.reset()
class DictEncoder(Encoder[Dict[(str, Any)]]): 'Combines n encoders into a single encoder with a dict action space.\n\n Attributes:\n encoders: A mapping of encoder names to encoders.\n ' def __init__(self, encoders: Mapping[(str, Encoder)]): self.encoders: Dict[(str, Encoder)] = dict(encoders) @property def observation_space(self) -> gym.Space: return gym.spaces.Dict({name: encoder.observation_space for (name, encoder) in self.encoders.items()}) def encode(self, ctx: Context) -> Dict[(str, Any)]: return {name: encoder.encode(ctx) for (name, encoder) in self.encoders.items()} def reset(self): for encoder in self.encoders.values(): encoder.reset()
class Constant(Encoder[np.ndarray]): 'Encoder that always returns a constant valued Box Space.\n\n Arguments:\n shape: Shape of the returned box.\n value: Value that the box is filled with.\n ' def __init__(self, shape: Tuple[int], value: float=0.0) -> None: self._shape = shape self._value = value @property def observation_space(self) -> gym.spaces.Box: return gym.spaces.Box((- np.inf), np.inf, shape=self._shape, dtype=np.float32) def encode(self, _: Context) -> np.ndarray: return np.full(self._shape, self._value)
class PhantomEnv(gym.Env): '\n Base Phantom environment.\n\n Usage:\n >>> env = PhantomEnv({ ... })\n >>> env.reset()\n <Observation: dict>\n >>> env.step({ ... })\n <Step: 4-tuple>\n\n Attributes:\n num_steps: The maximum number of steps the environment allows per episode.\n network: A Network class or derived class describing the connections between\n agents and agents in the environment.\n env_supertype: Optional Supertype class instance for the environment. If this is\n set, it will be sampled from and the :attr:`env_type` property set on the\n class with every call to :meth:`reset()`.\n agent_supertypes: Optional mapping of agent IDs to Supertype class instances. If\n these are set, each supertype will be sampled from and the :attr:`type`\n property set on the related agent with every call to :meth:`reset()`.\n ' class Step(NamedTuple): observations: Dict[(AgentID, Any)] rewards: Dict[(AgentID, float)] terminations: Dict[(AgentID, bool)] truncations: Dict[(AgentID, bool)] infos: Dict[(AgentID, Any)] def __init__(self, num_steps: int, network: Optional[Network]=None, env_supertype: Optional[Supertype]=None, agent_supertypes: Optional[Mapping[(AgentID, Supertype)]]=None) -> None: self.network = (network or Network()) self._current_step = 0 self.num_steps = num_steps self.env_supertype: Optional[Supertype] = None self.env_type: Optional[Supertype] = None self._terminations: Set[AgentID] = set() self._truncations: Set[AgentID] = set() self._ctxs: Dict[(AgentID, Context)] = {} self._samplers: List[Sampler] = [] if (env_supertype is not None): if isinstance(env_supertype, dict): env_supertype = self.Supertype(**env_supertype) else: assert isinstance(env_supertype, self.Supertype) env_supertype._managed = True for value in env_supertype.__dict__.values(): if (isinstance(value, Sampler) and (value not in self._samplers)): self._samplers.append(value) self.env_supertype = env_supertype if (agent_supertypes is not None): for (agent_id, agent_supertype) in agent_supertypes.items(): if isinstance(agent_supertype, dict): agent_supertype = self.agents[agent_id].Supertype(**agent_supertype) agent_supertype._managed = True for value in agent_supertype.__dict__.values(): if (isinstance(value, Sampler) and (value not in self._samplers)): self._samplers.append(value) agent = self.network.agents[agent_id] agent.supertype = agent_supertype for sampler in self._samplers: sampler.sample() for agent in self.agents.values(): agent.reset() @property def current_step(self) -> int: 'Return the current step of the environment.' return self._current_step @property def n_agents(self) -> int: 'Return the number of agents in the environment.' return len(self.agent_ids) @property def agents(self) -> Dict[(AgentID, Agent)]: 'Return a mapping of agent IDs to agents in the environment.' return self.network.agents @property def agent_ids(self) -> List[AgentID]: 'Return a list of the IDs of the agents in the environment.' return list(self.network.agent_ids) @property def strategic_agents(self) -> List[StrategicAgent]: 'Return a list of agents that take actions.' return [a for a in self.agents.values() if isinstance(a, StrategicAgent)] @property def non_strategic_agents(self) -> List[Agent]: 'Return a list of agents that do not take actions.' return [a for a in self.agents.values() if (not isinstance(a, StrategicAgent))] @property def strategic_agent_ids(self) -> List[AgentID]: 'Return a list of the IDs of the agents that take actions.' return [a.id for a in self.agents.values() if isinstance(a, StrategicAgent)] @property def non_strategic_agent_ids(self) -> List[AgentID]: 'Return a list of the IDs of the agents that do not take actions.' return [a.id for a in self.agents.values() if (not isinstance(a, StrategicAgent))] def view(self, agent_views: Dict[(AgentID, AgentView)]) -> EnvView: "Return an immutable view to the environment's public state." return EnvView(self.current_step, (self.current_step / self.num_steps)) def pre_message_resolution(self) -> None: 'Perform internal, pre-message resolution updates to the environment.' for ctx in self._ctxs.values(): ctx.agent.pre_message_resolution(ctx) def post_message_resolution(self) -> None: 'Perform internal, post-message resolution updates to the environment.' for ctx in self._ctxs.values(): ctx.agent.post_message_resolution(ctx) def resolve_network(self) -> None: self.pre_message_resolution() self.network.resolve(self._ctxs) self.post_message_resolution() def reset(self, seed: Optional[int]=None, options: Optional[Dict[(str, Any)]]=None) -> Tuple[(Dict[(AgentID, Any)], Dict[(str, Any)])]: '\n Reset the environment and return an initial observation.\n\n This method resets the step count and the :attr:`network`. This includes all the\n agents in the network.\n\n Args:\n seed: An optional seed to use for the new episode.\n options : Additional information to specify how the environment is reset.\n\n Returns:\n - A dictionary mapping Agent IDs to observations made by the respective\n agents. It is not required for all agents to make an initial observation.\n - A dictionary with auxillary information, equivalent to the info dictionary\n in `env.step()`.\n ' logger.log_reset() super().reset(seed=seed, options=options) self._current_step = 0 for sampler in self._samplers: sampler.sample() if (self.env_supertype is not None): self.env_type = self.env_supertype.sample() self.network.reset() self._terminations = set() self._truncations = set() self._make_ctxs(self.strategic_agent_ids) obs = {ctx.agent.id: ctx.agent.encode_observation(ctx) for ctx in self._ctxs.values()} logger.log_observations(obs) return ({k: v for (k, v) in obs.items() if (v is not None)}, {}) def step(self, actions: Mapping[(AgentID, Any)]) -> 'PhantomEnv.Step': '\n Step the simulation forward one step given some set of agent actions.\n\n Arguments:\n actions: Actions output by the agent policies to be translated into\n messages and passed throughout the network.\n\n Returns:\n A :class:`PhantomEnv.Step` object containing observations, rewards,\n terminations, truncations and infos.\n ' self._current_step += 1 logger.log_step(self.current_step, self.num_steps) logger.log_actions(actions) logger.log_start_decoding_actions() self._make_ctxs(self.agent_ids) self._handle_acting_agents(self.agent_ids, actions) self.resolve_network() observations: Dict[(AgentID, Any)] = {} rewards: Dict[(AgentID, Any)] = {} terminations: Dict[(AgentID, bool)] = {} truncations: Dict[(AgentID, bool)] = {} infos: Dict[(AgentID, Dict[(str, Any)])] = {} for aid in self.strategic_agent_ids: if ((aid in self._terminations) or (aid in self._truncations)): continue ctx = self._ctxs[aid] obs = ctx.agent.encode_observation(ctx) if (obs is not None): observations[aid] = obs infos[aid] = ctx.agent.collect_infos(ctx) rewards[aid] = ctx.agent.compute_reward(ctx) terminations[aid] = ctx.agent.is_terminated(ctx) truncations[aid] = ctx.agent.is_truncated(ctx) if terminations[aid]: self._terminations.add(aid) if truncations[aid]: self._truncations.add(aid) logger.log_step_values(observations, rewards, terminations, truncations, infos) logger.log_metrics(self) terminations['__all__'] = self.is_terminated() truncations['__all__'] = self.is_truncated() if (terminations['__all__'] or truncations['__all__']): logger.log_episode_done() return self.Step(observations, rewards, terminations, truncations, infos) def render(self) -> None: return None def is_terminated(self) -> bool: 'Implements the logic to decide when the episode is terminated.' return (len(self._terminations) == len(self.strategic_agents)) def is_truncated(self) -> bool: 'Implements the logic to decide when the episode is truncated.' is_at_max_step = ((self.num_steps is not None) and (self.current_step == self.num_steps)) return (is_at_max_step or (len(self._truncations) == len(self.strategic_agents))) def _handle_acting_agents(self, agent_ids: Sequence[AgentID], actions: Mapping[(AgentID, Any)]) -> None: 'Internal method.' for aid in agent_ids: if ((aid in self._terminations) or (aid in self._truncations)): continue ctx = self._ctxs[aid] if (aid in actions): messages = (ctx.agent.decode_action(ctx, actions[aid]) or []) else: messages = (ctx.agent.generate_messages(ctx) or []) for (receiver_id, message) in messages: self.network.send(aid, receiver_id, message) def _make_ctxs(self, agent_ids: Sequence[AgentID]) -> None: 'Internal method.' env_view = self.view({agent_id: agent.view() for (agent_id, agent) in self.agents.items()}) self._ctxs = {aid: self.network.context_for(aid, env_view) for aid in agent_ids if ((aid not in self._terminations) and (aid not in self._truncations))} def __getitem__(self, agent_id: AgentID) -> Agent: return self.network[agent_id]
class SingleAgentEnvAdapter(gym.Env): '\n Wraps a :class:`PhantomEnv` instance or sub-class providing a fully compatible\n :class:`gym.Env` interface, from the perspective of a single agent.\n\n This can be used to test and experiment with Phantom environments using other\n single-agent only frameworks when only one agent is an active learning agent.\n\n Arguments:\n env_class: The :class:`PhantomEnv` class or sub-class to wrap (note: must not be\n an already initialised class instance)\n agent_id: The ID of the agent that the wrapper will explicitly control.\n other_policies: A mapping of all other agent IDs to their policies and policy\n configs. The policies must be fixed/pre-trained policies.\n env_config: Any config options to pass to the underlying env when initialising.\n ' def __init__(self, env_class: Type[PhantomEnv], agent_id: AgentID, other_policies: Mapping[(AgentID, Tuple[(Type[Policy], Mapping[(str, Any)])])], env_config: Optional[Mapping[(str, Any)]]=None) -> None: self._env = env_class(**(env_config or {})) if (agent_id not in self._env.agent_ids): raise ValueError(f"Selected agent '{agent_id}' of SingleAgentEnvAdapter not found in underlying env '{env_class.__name__}'") if (agent_id in other_policies): raise ValueError(f"Selected agent '{agent_id}' of SingleAgentEnvAdapter found in agent ID to policy mapping") policies = (list(other_policies.keys()) + [agent_id]) for agent in self._env.agents.values(): if ((agent.action_space is not None) and (agent.id not in policies)): raise ValueError(f"Agent '{agent_id}' has not been defined a policy via the 'other_policies' parameter of SingleAgentEnvAdapter") self._env.reset() self._agent_id = agent_id self._other_policies = {agent_id: policy_class(self._env[agent_id].observation_space, self._env[agent_id].action_space, **policy_config) for (agent_id, (policy_class, policy_config)) in other_policies.items()} self._actions: Dict[(AgentID, Any)] = {} self._observations: Dict[(AgentID, Any)] = {} super().__init__() @property def active_agent(self) -> AgentID: return self._agent_id @property def agents(self) -> Dict[(AgentID, Agent)]: 'Return a mapping of agent IDs to agents in the environment.' return self._env.agents @property def agent_ids(self) -> List[AgentID]: 'Return a list of the IDs of the agents in the environment.' return self._env.agent_ids @property def n_agents(self) -> int: 'Return the number of agents in the environment.' return self._env.n_agents @property def current_step(self) -> int: 'Return the current step of the environment.' return self._env.current_step @property def action_space(self) -> gym.Space: 'Return the action space of the selected env agent.' return self._env[self._agent_id].action_space @property def observation_space(self) -> gym.Space: 'Return the observation space of the selected env agent.' return self._env[self._agent_id].observation_space def step(self, action: ActType) -> Tuple[(ObsType, float, bool, dict)]: "\n Run one timestep of the environment's dynamics.\n\n When end of episode is reached, you are responsible for calling :meth:`reset` to\n reset this environment's state.\n\n Accepts an action and returns a tuple `(observation, reward, done, info)`.\n\n Args:\n action: an action provided by the agent\n\n Returns:\n observation: this will be an element of the environment's\n :attr:`observation_space`. This may, for instance, be a numpy array\n containing the positions and velocities of certain objects.\n reward: The amount of reward returned as a result of taking the action.\n terminated: Whether the agent reaches the terminal state (as defined under\n the MDP of the task) which can be positive or negative. An example is\n reaching the goal state or moving into the lava from the Sutton and\n Barton, Gridworld. If true, the user needs to call reset().\n truncated: Whether the truncation condition outside the scope of the MDP is\n satisfied. Typically, this is a timelimit, but could also be used to\n indicate an agent physically going out of bounds. Can be used to end the\n episode prematurely before a terminal state is reached. If true, the\n user needs to call reset().\n info: A dictionary that may contain additional information regarding the\n reason for a ``done`` signal. `info` contains auxiliary diagnostic\n information (helpful for debugging, learning, and logging). This might,\n for instance, contain: metrics that describe the agent's performance\n state, variables that are hidden from observations, information that\n distinguishes truncation and termination or individual reward terms\n that are combined to produce the total reward\n " self._actions = {agent_id: policy.compute_action(self._observations[agent_id]) for (agent_id, policy) in self._other_policies.items()} self._actions[self._agent_id] = action step = self._env.step(self._actions) self._observations = step.observations return (step.observations[self._agent_id], step.rewards[self._agent_id], step.terminations[self._agent_id], step.truncations[self._agent_id], step.infos[self._agent_id]) def reset(self) -> Tuple[(ObsType, Dict[(str, Any)])]: "\n Resets the environment to an initial state and returns an initial observation.\n\n Note that this function should not reset the environment's random number\n generator(s); random variables in the environment's state should be sampled\n independently between multiple calls to `reset()`. In other words, each call of\n `reset()` should yield an environment suitable for a new episode, independent of\n previous episodes.\n\n Returns:\n - The initial observation.\n - A dictionary with auxillary information, equivalent to the info dictionary\n in `env.step()`.\n " (self._observations, infos) = self._env.reset() return (self._observations[self._agent_id], infos)
class FSMValidationError(Exception): '\n Error raised when validating the FSM when initialising the\n :class:`FiniteStateMachineEnv`.\n '