repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
bonito | bonito-master/bonito/reader.py | """
Bonito Read Utils
"""
from glob import iglob
from collections import OrderedDict
from importlib import import_module
import torch
import numpy as np
from scipy.signal import find_peaks
__formats__ = ["fast5", "pod5"]
# Normalisation parameters for kit 14 DNA
# Different parameters can be specified in the 'normalisation' section
# of a bonito config file
__default_norm_params__ = {'quantile_a' : 0.2,
'quantile_b' : 0.9,
'shift_multiplier' : 0.51,
'scale_multiplier' : 0.53}
class Reader:
def __init__(self, directory, recursive=False):
self.fmt = None
for fmt in __formats__:
pattern = f"**/*.{fmt}" if recursive else f"*.{fmt}"
match = next(iglob(directory + "/" + pattern, recursive=True), None)
if match is not None:
self.fmt = fmt
break
else:
raise FileNotFoundError()
_reader = import_module(f"bonito.{self.fmt}")
self._get_reads = getattr(_reader, "get_reads")
self._get_read_groups = getattr(_reader, "get_read_groups")
def get_reads(self, *args, **kwargs):
return self._get_reads(*args, **kwargs)
def get_read_groups(self, *args, **kwargs):
return self._get_read_groups(*args, **kwargs)
class Read:
def __init__(self, read, filename, meta=False):
...
def __repr__(self):
return "Read('%s')" % self.read_id
def readgroup(self, model):
self._groupdict = OrderedDict([
('ID', f"{self.run_id}_{model}"),
('PL', f"ONT"),
('DT', f"{self.exp_start_time}"),
('PU', f"{self.flow_cell_id}"),
('PM', f"{self.device_id}"),
('LB', f"{self.sample_id}"),
('SM', f"{self.sample_id}"),
('DS', f"%s" % ' '.join([
f"run_id={self.run_id}",
f"basecall_model={model}",
]))
])
return '\t'.join(["@RG", *[f"{k}:{v}" for k, v in self._groupdict.items()]])
def tagdata(self):
return [
f"mx:i:{self.mux}",
f"ch:i:{self.channel}",
f"st:Z:{self.start_time}",
f"du:f:{self.duration}",
f"rn:i:{self.read_number}",
f"f5:Z:{self.filename}",
f"sm:f:{self.shift}",
f"sd:f:{self.scale}",
f"sv:Z:quantile",
]
class ReadChunk:
def __init__(self, read, chunk, i, n):
self.read_id = "%s:%i:%i" % (read.read_id, i, n)
self.run_id = read.run_id
self.filename = read.filename
self.mux = read.mux
self.channel = read.channel
self.start = read.start
self.duration = read.duration
self.template_start = self.start
self.template_duration = self.duration
self.signal = chunk
def __repr__(self):
return "ReadChunk('%s')" % self.read_id
def read_chunks(read, chunksize=4000, overlap=400):
"""
Split a Read in fixed sized ReadChunks
"""
if len(read.signal) < chunksize:
return
_, offset = divmod(len(read.signal) - chunksize, chunksize - overlap)
signal = torch.from_numpy(read.signal[offset:])
blocks = signal.unfold(0, chunksize, chunksize - overlap)
for i, block in enumerate(blocks):
yield ReadChunk(read, block.numpy(), i+1, blocks.shape[0])
def trim(signal, window_size=40, threshold=2.4, min_trim=10, min_elements=3, max_samples=8000, max_trim=0.3):
seen_peak = False
num_windows = min(max_samples, len(signal)) // window_size
for pos in range(num_windows):
start = pos * window_size + min_trim
end = start + window_size
window = signal[start:end]
if len(window[window > threshold]) > min_elements or seen_peak:
seen_peak = True
if window[-1] > threshold:
continue
if end >= min(max_samples, len(signal)) or end / len(signal) > max_trim:
return min_trim
return end
return min_trim
def normalisation(sig, norm_params=None):
"""
Calculate signal shift and scale factors for normalisation..
"""
if norm_params is None:
norm_params = __default_norm_params__
qa, qb = np.quantile(sig, [norm_params['quantile_a'], norm_params['quantile_b']])
shift = max(10, norm_params['shift_multiplier'] * (qa + qb))
scale = max(1.0, norm_params['scale_multiplier'] * (qb - qa))
return shift, scale
| 4,550 | 29.543624 | 109 | py |
bonito | bonito-master/bonito/training.py | """
Bonito train
"""
import math
import os
import re
from glob import glob
from functools import partial
from time import perf_counter
from collections import OrderedDict
from datetime import datetime
from bonito.schedule import linear_warmup_cosine_decay
from bonito.util import accuracy, decode_ref, permute, concat, match_names
import bonito
import torch
import numpy as np
import torch.nn as nn
from tqdm import tqdm
import torch.cuda.amp as amp
def load_state(dirname, device, model, optim=None):
"""
Load a model state dict from disk
"""
model.to(device)
if hasattr(model, "module"):
model = model.module
weight_no = optim_no = None
optim_files = glob(os.path.join(dirname, "optim_*.tar"))
optim_nos = {int(re.sub(".*_([0-9]+).tar", "\\1", w)) for w in optim_files}
weight_files = glob(os.path.join(dirname, "weights_*.tar"))
weight_nos = {int(re.sub(".*_([0-9]+).tar", "\\1", w)) for w in weight_files}
if optim is not None:
weight_no = optim_no = max(optim_nos & weight_nos, default=None)
else:
weight_no = max(weight_nos, default=None)
to_load = []
if weight_no:
to_load.append(("weights", model))
if optim_no:
to_load.append(("optim", optim))
if to_load:
print("[picking up %s state from epoch %s]" % (', '.join([n for n, _ in to_load]), weight_no))
for name, obj in to_load:
state_dict = torch.load(
os.path.join(dirname, '%s_%s.tar' % (name, weight_no)), map_location=device
)
if name == "weights":
state_dict = {k2: state_dict[k1] for k1, k2 in match_names(state_dict, obj).items()}
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k.replace('module.', '')
new_state_dict[name] = v
state_dict = new_state_dict
obj.load_state_dict(state_dict)
epoch = weight_no
else:
epoch = 0
return epoch
class ClipGrad:
def __init__(self, quantile=0.5, factor=2.0, buffer_size=100):
self.buffer = np.full(buffer_size, fill_value=1e6)
self.quantile = quantile
self.factor = factor
self.i = 0
def append(self, grad_norm):
self.buffer[self.i] = grad_norm
self.i = (self.i + 1) % len(self.buffer)
def __call__(self, parameters):
max_norm = self.factor * np.quantile(self.buffer, self.quantile)
grad_norm = torch.nn.utils.clip_grad_norm_(parameters, max_norm=max_norm).item()
if not math.isnan(grad_norm):
self.append(grad_norm)
return grad_norm
class Trainer:
def __init__(
self, model, device, train_loader, valid_loader, criterion=None,
use_amp=True, lr_scheduler_fn=None, restore_optim=False,
save_optim_every=10, grad_accum_split=1, quantile_grad_clip=False
):
self.model = model.to(device)
self.device = device
self.train_loader = train_loader
self.valid_loader = valid_loader
self.criterion = criterion or model.loss
self.use_amp = use_amp
self.lr_scheduler_fn = lr_scheduler_fn or linear_warmup_cosine_decay()
self.restore_optim = restore_optim
self.save_optim_every = save_optim_every
self.grad_accum_split = grad_accum_split
self.scaler = torch.cuda.amp.GradScaler(enabled=use_amp)
self.optimizer = None
if quantile_grad_clip:
self.clip_grad = ClipGrad()
else:
self.clip_grad = lambda parameters: torch.nn.utils.clip_grad_norm_(parameters, max_norm=2.0).item()
def train_one_step(self, batch):
self.optimizer.zero_grad()
losses = None
with amp.autocast(enabled=self.use_amp):
for batch_ in zip(
*map(lambda t: t.chunk(self.grad_accum_split, dim=0), batch)
):
data_, targets_, lengths_, *args = (x.to(self.device) for x in batch_)
scores_ = self.model(data_, *args)
losses_ = self.criterion(scores_, targets_, lengths_)
if not isinstance(losses_, dict): losses_ = {'loss': losses_}
total_loss = losses_.get('total_loss', losses_['loss']) / self.grad_accum_split
self.scaler.scale(total_loss).backward()
losses = {
k: ((v.item() / self.grad_accum_split) if losses is None else (v.item() / self.grad_accum_split) + losses[k])
for k, v in losses_.items()
}
self.scaler.unscale_(self.optimizer)
grad_norm = self.clip_grad(self.model.parameters())
self.scaler.step(self.optimizer)
self.scaler.update()
return losses, grad_norm
def train_one_epoch(self, loss_log, lr_scheduler):
t0 = perf_counter()
chunks = 0
self.model.train()
progress_bar = tqdm(
total=len(self.train_loader), desc='[0/{}]'.format(len(self.train_loader.sampler)),
ascii=True, leave=True, ncols=100, bar_format='{l_bar}{bar}| [{elapsed}{postfix}]'
)
smoothed_loss = None
with progress_bar:
for batch in self.train_loader:
chunks += batch[0].shape[0]
losses, grad_norm = self.train_one_step(batch)
smoothed_loss = losses['loss'] if smoothed_loss is None else (0.01 * losses['loss'] + 0.99 * smoothed_loss)
progress_bar.set_postfix(loss='%.4f' % smoothed_loss)
progress_bar.set_description("[{}/{}]".format(chunks, len(self.train_loader.sampler)))
progress_bar.update()
if loss_log is not None:
lr = lr_scheduler.get_last_lr() if lr_scheduler is not None else [pg["lr"] for pg in optim.param_groups]
if len(lr) == 1: lr = lr[0]
loss_log.append({
'chunks': chunks,
'time': perf_counter() - t0,
'grad_norm': grad_norm,
'lr': lr,
**losses
})
if lr_scheduler is not None: lr_scheduler.step()
return smoothed_loss, perf_counter() - t0
def validate_one_step(self, batch):
data, targets, lengths, *args = batch
scores = self.model(data.to(self.device), *(x.to(self.device) for x in args))
losses = self.criterion(scores, targets.to(self.device), lengths.to(self.device))
losses = {k: v.item() for k, v in losses.items()} if isinstance(losses, dict) else losses.item()
if hasattr(self.model, 'decode_batch'):
seqs = self.model.decode_batch(scores)
else:
seqs = [self.model.decode(x) for x in permute(scores, 'TNC', 'NTC')]
refs = [decode_ref(target, self.model.alphabet) for target in targets]
n_pre = getattr(self.model, "n_pre_context_bases", 0)
n_post = getattr(self.model, "n_post_context_bases", 0)
if n_pre > 0 or n_post > 0:
refs = [ref[n_pre:len(ref)-n_post] for ref in refs]
accs = [
accuracy(ref, seq, min_coverage=0.5) if len(seq) else 0. for ref, seq in zip(refs, seqs)
]
return seqs, refs, accs, losses
def validate_one_epoch(self):
self.model.eval()
with torch.no_grad():
seqs, refs, accs, losses = zip(*(self.validate_one_step(batch) for batch in self.valid_loader))
seqs, refs, accs = (sum(x, []) for x in (seqs, refs, accs))
loss = np.mean([(x['loss'] if isinstance(x, dict) else x) for x in losses])
return loss, np.mean(accs), np.median(accs)
def init_optimizer(self, lr, **kwargs):
if isinstance(lr, (list, tuple)):
if len(list(self.model.children())) != len(lr):
raise ValueError('Number of lrs does not match number of model children')
param_groups = [{'params': list(m.parameters()), 'lr': v} for (m, v) in zip(self.model.children(), lr)]
self.optimizer = torch.optim.AdamW(param_groups, lr=lr[0], **kwargs)
else:
self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=lr, **kwargs)
def get_lr_scheduler(self, epochs, last_epoch=0):
return self.lr_scheduler_fn(self.optimizer, self.train_loader, epochs, last_epoch)
def fit(self, workdir, epochs=1, lr=2e-3, **optim_kwargs):
if self.optimizer is None:
self.init_optimizer(lr, **optim_kwargs)
last_epoch = load_state(workdir, self.device, self.model, self.optimizer if self.restore_optim else None)
if self.restore_optim:
# override learning rate to new value
for i, pg in enumerate(self.optimizer.param_groups):
pg["initial_lr"] = pg["lr"] = lr[i] if isinstance(lr, (list, tuple)) else lr
lr_scheduler = self.get_lr_scheduler(epochs, last_epoch=last_epoch)
for epoch in range(1 + last_epoch, epochs + 1):
try:
with bonito.io.CSVLogger(os.path.join(workdir, 'losses_{}.csv'.format(epoch))) as loss_log:
train_loss, duration = self.train_one_epoch(loss_log, lr_scheduler)
model_state = self.model.module.state_dict() if hasattr(self.model, 'module') else self.model.state_dict()
torch.save(model_state, os.path.join(workdir, "weights_%s.tar" % epoch))
if epoch % self.save_optim_every == 0:
torch.save(self.optimizer.state_dict(), os.path.join(workdir, "optim_%s.tar" % epoch))
val_loss, val_mean, val_median = self.validate_one_epoch()
except KeyboardInterrupt:
break
print("[epoch {}] directory={} loss={:.4f} mean_acc={:.3f}% median_acc={:.3f}%".format(
epoch, workdir, val_loss, val_mean, val_median
))
with bonito.io.CSVLogger(os.path.join(workdir, 'training.csv')) as training_log:
training_log.append({
'time': datetime.today(),
'duration': int(duration),
'epoch': epoch,
'train_loss': train_loss,
'validation_loss': val_loss,
'validation_mean': val_mean,
'validation_median': val_median
})
| 10,481 | 38.258427 | 129 | py |
bonito | bonito-master/bonito/data.py | import importlib
import os
from pathlib import Path
import numpy as np
from torch.utils.data import DataLoader
class ChunkDataSet:
def __init__(self, chunks, targets, lengths):
self.chunks = np.expand_dims(chunks, axis=1)
self.targets = targets
self.lengths = lengths
def __getitem__(self, i):
return (
self.chunks[i].astype(np.float32),
self.targets[i].astype(np.int64),
self.lengths[i].astype(np.int64),
)
def __len__(self):
return len(self.lengths)
def load_script(directory, name="dataset", suffix=".py", **kwargs):
directory = Path(directory)
filepath = (directory / name).with_suffix(suffix)
spec = importlib.util.spec_from_file_location(name, filepath)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
loader = module.Loader(**kwargs)
return loader.train_loader_kwargs(**kwargs), loader.valid_loader_kwargs(**kwargs)
def load_numpy(limit, directory, valid_chunks=None):
"""
Returns training and validation DataLoaders for data in directory.
"""
train_data = load_numpy_datasets(limit=limit, directory=directory)
if os.path.exists(os.path.join(directory, 'validation')):
valid_data = load_numpy_datasets(limit=valid_chunks,
directory=os.path.join(directory, 'validation')
)
else:
print("[validation set not found: splitting training set]")
if valid_chunks is None:
split = np.floor(len(train_data[0]) * 0.97).astype(np.int32)
else:
split = max(0, len(train_data[0]) - valid_chunks)
valid_data = [x[split:] for x in train_data]
train_data = [x[:split] for x in train_data]
train_loader_kwargs = {"dataset": ChunkDataSet(*train_data), "shuffle": True}
valid_loader_kwargs = {"dataset": ChunkDataSet(*valid_data), "shuffle": False}
return train_loader_kwargs, valid_loader_kwargs
def load_numpy_datasets(limit=None, directory=None):
"""
Returns numpy chunks, targets and lengths arrays.
"""
if directory is None:
directory = default_data
chunks = np.load(os.path.join(directory, "chunks.npy"), mmap_mode='r')
targets = np.load(os.path.join(directory, "references.npy"), mmap_mode='r')
lengths = np.load(os.path.join(directory, "reference_lengths.npy"), mmap_mode='r')
indices = os.path.join(directory, "indices.npy")
if os.path.exists(indices):
idx = np.load(indices, mmap_mode='r')
idx = idx[idx < lengths.shape[0]]
if limit:
idx = idx[:limit]
return chunks[idx, :], targets[idx, :], lengths[idx]
if limit:
chunks = chunks[:limit]
targets = targets[:limit]
lengths = lengths[:limit]
return np.array(chunks), np.array(targets), np.array(lengths)
| 2,868 | 32.752941 | 86 | py |
bonito | bonito-master/bonito/nn.py | """
Bonito nn modules.
"""
import torch
from torch.nn import Module
from torch.nn.init import orthogonal_
from torch.nn.utils.fusion import fuse_conv_bn_eval
layers = {}
def register(layer):
layer.name = layer.__name__.lower()
layers[layer.name] = layer
return layer
register(torch.nn.ReLU)
register(torch.nn.Tanh)
@register
class Linear(Module):
def __init__(self, in_features, out_features, bias=True):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.linear = torch.nn.Linear(
in_features=in_features, out_features=out_features, bias=bias
)
def forward(self, x):
return self.linear(x)
def to_dict(self, include_weights=False):
res = {
"in_features": self.in_features,
"out_features": self.out_features,
"bias": self.bias,
}
if include_weights:
res['params'] = {
'W': self.linear.weight,
'b': self.linear.bias if self.bias is not None else []
}
return res
@register
class Swish(torch.nn.SiLU):
pass
@register
class Clamp(Module):
def __init__(self, min, max):
super().__init__()
self.min = min
self.max = max
def forward(self, x):
return torch.clamp(x, min=self.min, max=self.max)
def to_dict(self, include_weights=False):
return {
'min': self.min,
'max': self.max
}
@register
class Serial(torch.nn.Sequential):
def __init__(self, sublayers):
super().__init__(*sublayers)
def forward(self, x, return_features=False):
if return_features:
fmaps = []
for layer in self:
x = layer(x)
fmaps.append(x)
return x, fmaps
return super().forward(x)
def to_dict(self, include_weights=False):
return {
'sublayers': [to_dict(layer, include_weights) for layer in self._modules.values()]
}
@register
class Reverse(Module):
def __init__(self, sublayers):
super().__init__()
self.layer = Serial(sublayers) if isinstance(sublayers, list) else sublayers
def forward(self, x):
return self.layer(x.flip(0)).flip(0)
def to_dict(self, include_weights=False):
if isinstance(self.layer, Serial):
return self.layer.to_dict(include_weights)
else:
return {'sublayers': to_dict(self.layer, include_weights)}
@register
class BatchNorm(Module):
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True):
super().__init__()
self.bn = torch.nn.BatchNorm1d(num_features, eps, momentum, affine, track_running_stats)
def forward(self, x):
return self.bn(x)
def to_dict(self, include_weights=False):
res = {
"num_features": self.bn.num_features,
"eps": self.bn.eps,
"momentum": self.bn.momentum,
"affine": self.bn.affine,
"track_running_stats": self.bn.track_running_stats
}
if include_weights:
params = {}
if res["affine"]:
params["W"] = self.bn.weight
params["b"] = self.bn.bias
if res["track_running_stats"]:
params["running_mean"] = self.bn.running_mean
params["running_var"] = self.bn.running_var
res["params"] = params
return res
@register
class Convolution(Module):
def __init__(self, insize, size, winlen, stride=1, padding=0, bias=True, activation=None, norm=None):
super().__init__()
self.conv = torch.nn.Conv1d(insize, size, winlen, stride=stride, padding=padding, bias=bias)
self.activation = layers.get(activation, lambda: activation)()
if isinstance(norm, dict):
self.norm = from_dict(norm)
elif isinstance(norm, str):
self.norm = layers[norm](size)
else:
self.norm = norm
def forward(self, x):
h = self.conv(x)
if self.norm is not None:
h = self.norm(h)
if self.activation is not None:
h = self.activation(h)
return h
def to_dict(self, include_weights=False):
res = {
"insize": self.conv.in_channels,
"size": self.conv.out_channels,
"bias": self.conv.bias is not None,
"winlen": self.conv.kernel_size[0],
"stride": self.conv.stride[0],
"padding": self.conv.padding[0],
"activation": self.activation.name if self.activation else None,
"norm": self.norm.to_dict(include_weights) if self.norm is not None else None
}
if include_weights:
res['params'] = {
'W': self.conv.weight, 'b': self.conv.bias if self.conv.bias is not None else []
}
return res
@register
class LinearCRFEncoder(Module):
def __init__(self, insize, n_base, state_len, bias=True, scale=None, activation=None, blank_score=None, expand_blanks=True):
super().__init__()
self.scale = scale
self.n_base = n_base
self.state_len = state_len
self.blank_score = blank_score
self.expand_blanks = expand_blanks
size = (n_base + 1) * n_base**state_len if blank_score is None else n_base**(state_len + 1)
self.linear = torch.nn.Linear(insize, size, bias=bias)
self.activation = layers.get(activation, lambda: activation)()
def forward(self, x):
scores = self.linear(x)
if self.activation is not None:
scores = self.activation(scores)
if self.scale is not None:
scores = scores * self.scale
if self.blank_score is not None and self.expand_blanks:
T, N, C = scores.shape
scores = torch.nn.functional.pad(
scores.view(T, N, C // self.n_base, self.n_base),
(1, 0, 0, 0, 0, 0, 0, 0),
value=self.blank_score
).view(T, N, -1)
return scores
def to_dict(self, include_weights=False):
res = {
'insize': self.linear.in_features,
'n_base': self.n_base,
'state_len': self.state_len,
'bias': self.linear.bias is not None,
'scale': self.scale,
'activation': self.activation.name if self.activation else None,
'blank_score': self.blank_score,
}
if include_weights:
res['params'] = {
'W': self.linear.weight, 'b': self.linear.bias
if self.linear.bias is not None else []
}
return res
def extra_repr(self):
return 'n_base={}, state_len={}, scale={}, blank_score={}, expand_blanks={}'.format(
self.n_base, self.state_len, self.scale, self.blank_score, self.expand_blanks
)
@register
class Permute(Module):
def __init__(self, dims):
super().__init__()
self.dims = dims
def forward(self, x):
return x.permute(*self.dims)
def to_dict(self, include_weights=False):
return {'dims': self.dims}
def extra_repr(self):
return 'dims={}'.format(self.dims)
def truncated_normal(size, dtype=torch.float32, device=None, num_resample=5):
x = torch.empty(size + (num_resample,), dtype=torch.float32, device=device).normal_()
i = ((x < 2) & (x > -2)).max(-1, keepdim=True)[1]
return torch.clamp_(x.gather(-1, i).squeeze(-1), -2, 2)
class RNNWrapper(Module):
def __init__(
self, rnn_type, *args, reverse=False, orthogonal_weight_init=True, disable_state_bias=True, bidirectional=False, **kwargs
):
super().__init__()
if reverse and bidirectional:
raise Exception("'reverse' and 'bidirectional' should not both be set to True")
self.reverse = reverse
self.rnn = rnn_type(*args, bidirectional=bidirectional, **kwargs)
self.init_orthogonal(orthogonal_weight_init)
self.init_biases()
if disable_state_bias: self.disable_state_bias()
def forward(self, x):
if self.reverse: x = x.flip(0)
y, h = self.rnn(x)
if self.reverse: y = y.flip(0)
return y
def init_biases(self, types=('bias_ih',)):
for name, param in self.rnn.named_parameters():
if any(k in name for k in types):
with torch.no_grad():
param.set_(0.5*truncated_normal(param.shape, dtype=param.dtype, device=param.device))
def init_orthogonal(self, types=True):
if not types: return
if types == True: types = ('weight_ih', 'weight_hh')
for name, x in self.rnn.named_parameters():
if any(k in name for k in types):
for i in range(0, x.size(0), self.rnn.hidden_size):
orthogonal_(x[i:i+self.rnn.hidden_size])
def disable_state_bias(self):
for name, x in self.rnn.named_parameters():
if 'bias_hh' in name:
x.requires_grad = False
x.zero_()
def extra_repr(self):
return 'reverse={}'.format(bool(self.reverse))
@register
class LSTM(RNNWrapper):
def __init__(self, size, insize, bias=True, reverse=False):
super().__init__(torch.nn.LSTM, insize, size, bias=bias, reverse=reverse)
def to_dict(self, include_weights=False):
res = {
'size': self.rnn.hidden_size,
'insize': self.rnn.input_size,
'bias': self.rnn.bias,
'reverse': self.reverse,
}
if include_weights:
res['params'] = {
'iW': self.rnn.weight_ih_l0.reshape(4, self.rnn.hidden_size, self.rnn.input_size),
'sW': self.rnn.weight_hh_l0.reshape(4, self.rnn.hidden_size, self.rnn.hidden_size),
'b': self.rnn.bias_ih_l0.reshape(4, self.rnn.hidden_size)
}
return res
def to_dict(layer, include_weights=False):
if hasattr(layer, 'to_dict'):
return {'type': layer.name, **layer.to_dict(include_weights)}
return {'type': layer.name}
def from_dict(model_dict, layer_types=None):
model_dict = model_dict.copy()
if layer_types is None:
layer_types = layers
type_name = model_dict.pop('type')
typ = layer_types[type_name]
if 'sublayers' in model_dict:
sublayers = model_dict['sublayers']
model_dict['sublayers'] = [
from_dict(x, layer_types) for x in sublayers
] if isinstance(sublayers, list) else from_dict(sublayers, layer_types)
try:
layer = typ(**model_dict)
except Exception as e:
raise Exception(f'Failed to build layer of type {typ} with args {model_dict}') from e
return layer
def fuse_bn_(m):
"""
Sets the module m to eval mode and if a Convolution fuses any batchnorm layer.
"""
m.training = False
if isinstance(m, Convolution) and isinstance(m.norm, BatchNorm):
m.conv = fuse_conv_bn_eval(m.conv, m.norm.bn)
m.norm = None
| 11,194 | 30.713881 | 133 | py |
bonito | bonito-master/bonito/util.py | """
Bonito utils
"""
import os
import re
import sys
import random
from glob import glob
from itertools import groupby
from operator import itemgetter
from importlib import import_module
from collections import deque, defaultdict, OrderedDict
from torch.utils.data import DataLoader
import toml
import torch
import koi.lstm
import parasail
import numpy as np
from torch.cuda import get_device_capability
try:
from claragenomics.bindings import cuda
from claragenomics.bindings.cudapoa import CudaPoaBatch
except ImportError:
pass
__dir__ = os.path.dirname(os.path.realpath(__file__))
__data__ = os.path.join(__dir__, "data")
__models__ = os.path.join(__dir__, "models")
__configs__ = os.path.join(__dir__, "models/configs")
split_cigar = re.compile(r"(?P<len>\d+)(?P<op>\D+)")
default_data = os.path.join(__data__, "dna_r9.4.1")
default_config = os.path.join(__configs__, "dna_r9.4.1@v3.1.toml")
def init(seed, device, deterministic=True):
"""
Initialise random libs and setup cudnn
https://pytorch.org/docs/stable/notes/randomness.html
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if device == "cpu": return
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = deterministic
torch.backends.cudnn.benchmark = (not deterministic)
assert(torch.cuda.is_available())
def permute(x, input_layout, output_layout):
"""
Permute `x` from `input_layout` to `output_layout`
>>> permute(x, 'TNC', 'NTC')
"""
if input_layout == output_layout: return x
return x.permute(*[input_layout.index(x) for x in output_layout])
def concat(xs, dim=0):
"""
Type agnostic concat.
"""
if isinstance(xs[0], torch.Tensor):
return torch.cat(xs, dim=dim)
elif isinstance(xs[0], np.ndarray):
return np.concatenate(xs, axis=dim)
elif isinstance(xs[0], list):
return [x for l in xs for x in l]
elif isinstance(xs[0], str):
return ''.join(xs)
elif isinstance(xs[0], dict):
return {k: concat([x[k] for x in xs], dim) for k in xs[0].keys()}
else:
raise TypeError
def select_range(x, start, end, dim=0):
"""
Type agnostic range select.
"""
if isinstance(x, dict):
return {k: select_range(v, start, end, dim) for (k, v) in x.items()}
if dim == 0 or isinstance(x, list): return x[start:end]
return x[(*(slice(None),)*dim, slice(start, end))]
def size(x, dim=0):
"""
Type agnostic size.
"""
if hasattr(x, 'shape'):
return x.shape[dim]
elif dim == 0:
return len(x)
raise TypeError
def half_supported():
"""
Returns whether FP16 is support on the GPU
"""
try:
return get_device_capability()[0] >= 7
except:
return False
def phred(prob, scale=1.0, bias=0.0):
"""
Converts `prob` into a ascii encoded phred quality score between 0 and 40.
"""
p = max(1 - prob, 1e-4)
q = -10 * np.log10(p) * scale + bias
return chr(int(np.round(q) + 33))
def mean_qscore_from_qstring(qstring):
"""
Convert qstring into a mean qscore
"""
if len(qstring) == 0: return 0.0
qs = (np.array(qstring, 'c').view(np.uint8) - 33)
mean_err = np.exp(qs * (-np.log(10) / 10.)).mean()
return -10 * np.log10(max(mean_err, 1e-4))
def decode_ref(encoded, labels):
"""
Convert a integer encoded reference into a string and remove blanks
"""
return ''.join(labels[e] for e in encoded.tolist() if e)
def column_to_set(filename, idx=0, skip_header=False):
"""
Pull a column from a file and return a set of the values.
"""
if filename and os.path.isfile(filename):
with open(filename, 'r') as tsv:
if skip_header:
next(tsv)
return {line.strip().split()[idx] for line in tsv.readlines()}
def chunk(signal, chunksize, overlap):
"""
Convert a read into overlapping chunks before calling
"""
if signal.ndim == 1:
signal = signal.unsqueeze(0)
T = signal.shape[-1]
if chunksize == 0:
chunks = signal[None, :]
elif T < chunksize:
n, overhang = divmod(chunksize, T)
# np.tile operates only on dimension -1 by default,
# whereas torch.repeat requires explicit listing of all input dimensions eg (1,n) or (1,1,n)
chunks = torch.cat((torch.from_numpy(np.tile(signal,n)), signal[...,:overhang]), dim=-1)[None, :]
else:
stub = (T - overlap) % (chunksize - overlap)
chunks = signal[...,stub:].unfold(-1, chunksize, chunksize - overlap).movedim(-2,0)
if stub > 0:
chunks = torch.cat([signal[None, ..., :chunksize], chunks], dim=0)
return chunks
def stitch(chunks, chunksize, overlap, length, stride, reverse=False):
"""
Stitch chunks together with a given overlap
"""
if chunks.shape[0] == 1: return chunks.squeeze(0)
semi_overlap = overlap // 2
start, end = semi_overlap // stride, (chunksize - semi_overlap) // stride
stub = (length - overlap) % (chunksize - overlap)
first_chunk_end = (stub + semi_overlap) // stride if (stub > 0) else end
if reverse:
chunks = list(chunks)
return concat([
chunks[-1][:-start], *(x[-end:-start] for x in reversed(chunks[1:-1])), chunks[0][-first_chunk_end:]
])
else:
return concat([
chunks[0, :first_chunk_end], *chunks[1:-1, start:end], chunks[-1, start:]
])
def batchify(items, batchsize, dim=0):
"""
Batch up items up to `batch_size`.
"""
stack, pos = [], 0
for k, v in items:
breaks = range(batchsize - pos, size(v, dim), batchsize)
for start, end in zip([0, *breaks], [*breaks, size(v, dim)]):
sub_batch = select_range(v, start, end, dim)
stack.append(((k, (pos, pos + end - start)), sub_batch))
if pos + end - start == batchsize:
ks, vs = zip(*stack)
yield ks, concat(vs, dim)
stack, pos = [], 0
else:
pos += end - start
if len(stack):
ks, vs = zip(*stack)
yield ks, concat(vs, dim)
def unbatchify(batches, dim=0):
"""
Reconstruct batches.
"""
batches = (
(k, select_range(v, start, end, dim))
for sub_batches, v in batches
for k, (start, end) in sub_batches
)
return (
(k, concat([v for (k, v) in group], dim))
for k, group in groupby(batches, itemgetter(0))
)
def load_symbol(config, symbol):
"""
Dynamic load a symbol from module specified in model config.
"""
if not isinstance(config, dict):
if not os.path.isdir(config) and os.path.isdir(os.path.join(__models__, config)):
dirname = os.path.join(__models__, config)
else:
dirname = config
config = toml.load(os.path.join(dirname, 'config.toml'))
imported = import_module(config['model']['package'])
return getattr(imported, symbol)
def match_names(state_dict, model):
keys_and_shapes = lambda state_dict: zip(*[
(k, s) for s, i, k in sorted([(v.shape, i, k)
for i, (k, v) in enumerate(state_dict.items())])
])
k1, s1 = keys_and_shapes(state_dict)
k2, s2 = keys_and_shapes(model.state_dict())
assert s1 == s2
remap = dict(zip(k1, k2))
return OrderedDict([(k, remap[k]) for k in state_dict.keys()])
def get_last_checkpoint(dirname):
weight_files = glob(os.path.join(dirname, "weights_*.tar"))
if not weight_files:
raise FileNotFoundError("no model weights found in '%s'" % dirname)
weights = max([int(re.sub(".*_([0-9]+).tar", "\\1", w)) for w in weight_files])
return os.path.join(dirname, 'weights_%s.tar' % weights)
def set_config_defaults(config, chunksize=None, batchsize=None, overlap=None, quantize=False):
basecall_params = config.get("basecaller", {})
# use `value or dict.get(key)` rather than `dict.get(key, value)` to make
# flags override values in config
basecall_params["chunksize"] = chunksize or basecall_params.get("chunksize", 4000)
basecall_params["overlap"] = overlap if overlap is not None else basecall_params.get("overlap", 500)
basecall_params["batchsize"] = batchsize or basecall_params.get("batchsize", 64)
basecall_params["quantize"] = basecall_params.get("quantize") if quantize is None else quantize
config["basecaller"] = basecall_params
return config
def load_model(dirname, device, weights=None, half=None, chunksize=None, batchsize=None, overlap=None, quantize=False, use_koi=False):
"""
Load a model config and weights off disk from `dirname`.
"""
if not os.path.isdir(dirname) and os.path.isdir(os.path.join(__models__, dirname)):
dirname = os.path.join(__models__, dirname)
weights = get_last_checkpoint(dirname) if weights is None else os.path.join(dirname, 'weights_%s.tar' % weights)
config = toml.load(os.path.join(dirname, 'config.toml'))
config = set_config_defaults(config, chunksize, batchsize, overlap, quantize)
return _load_model(weights, config, device, half, use_koi)
def _load_model(model_file, config, device, half=None, use_koi=False):
device = torch.device(device)
Model = load_symbol(config, "Model")
model = Model(config)
config["basecaller"]["chunksize"] -= config["basecaller"]["chunksize"] % model.stride
# overlap must be even multiple of stride for correct stitching
config["basecaller"]["overlap"] -= config["basecaller"]["overlap"] % (model.stride * 2)
if use_koi:
model.use_koi(
batchsize=config["basecaller"]["batchsize"],
chunksize=config["basecaller"]["chunksize"],
quantize=config["basecaller"]["quantize"],
)
state_dict = torch.load(model_file, map_location=device)
state_dict = {k2: state_dict[k1] for k1, k2 in match_names(state_dict, model).items()}
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k.replace('module.', '')
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
if half is None:
half = half_supported()
if half: model = model.half()
model.eval()
model.to(device)
return model
def parasail_to_sam(result, seq):
"""
Extract reference start and sam compatible cigar string.
:param result: parasail alignment result.
:param seq: query sequence.
:returns: reference start coordinate, cigar string.
"""
cigstr = result.cigar.decode.decode()
first = re.search(split_cigar, cigstr)
first_count, first_op = first.groups()
prefix = first.group()
rstart = result.cigar.beg_ref
cliplen = result.cigar.beg_query
clip = '' if cliplen == 0 else '{}S'.format(cliplen)
if first_op == 'I':
pre = '{}S'.format(int(first_count) + cliplen)
elif first_op == 'D':
pre = clip
rstart = int(first_count)
else:
pre = '{}{}'.format(clip, prefix)
mid = cigstr[len(prefix):]
end_clip = len(seq) - result.end_query - 1
suf = '{}S'.format(end_clip) if end_clip > 0 else ''
new_cigstr = ''.join((pre, mid, suf))
return rstart, new_cigstr
def accuracy(ref, seq, balanced=False, min_coverage=0.0):
"""
Calculate the accuracy between `ref` and `seq`
"""
alignment = parasail.sw_trace_striped_32(seq, ref, 8, 4, parasail.dnafull)
counts = defaultdict(int)
q_coverage = len(alignment.traceback.query) / len(seq)
r_coverage = len(alignment.traceback.ref) / len(ref)
if r_coverage < min_coverage:
return 0.0
_, cigar = parasail_to_sam(alignment, seq)
for count, op in re.findall(split_cigar, cigar):
counts[op] += int(count)
if balanced:
accuracy = (counts['='] - counts['I']) / (counts['='] + counts['X'] + counts['D'])
else:
accuracy = counts['='] / (counts['='] + counts['I'] + counts['X'] + counts['D'])
return accuracy * 100
def print_alignment(ref, seq):
"""
Print the alignment between `ref` and `seq`
"""
alignment = parasail.sw_trace_striped_32(seq, ref, 8, 4, parasail.dnafull)
print(alignment.traceback.ref)
print(alignment.traceback.comp)
print(alignment.traceback.query)
print(" Score=%s" % alignment.score)
return alignment.score
def poa(groups, max_poa_sequences=100, gpu_mem_per_batch=0.9):
"""
Generate consensus for POA groups.
Args:
groups : A list of lists of sequences for which consensus is to be generated.
"""
free, total = cuda.cuda_get_mem_info(cuda.cuda_get_device())
gpu_mem_per_batch *= free
batch = CudaPoaBatch(max_poa_sequences, gpu_mem_per_batch, stream=None, output_type="consensus")
results = []
for i, group in enumerate(groups, start=1):
group_status, seq_status = batch.add_poa_group(group)
# Once batch is full, run POA processing
if group_status == 1 or i == len(groups):
batch.generate_poa()
consensus, coverage, status = batch.get_consensus()
results.extend(consensus)
batch.reset()
group_status, seq_status = batch.add_poa_group(group)
return results
| 13,291 | 30.49763 | 134 | py |
bonito | bonito-master/bonito/schedule.py | import math
import numpy as np
from torch.optim.lr_scheduler import LambdaLR
def linear_warmup_cosine_decay(end_ratio=0.01, warmup_steps=500, **kwargs):
"""
Linear warmup, cosine decay scheduler
"""
return lambda optimizer, train_loader, epochs, last_epoch: func_scheduler(
optimizer=optimizer,
func=cosine_decay_schedule(1.0, end_ratio),
total_steps=epochs * len(train_loader),
warmup_steps=warmup_steps,
start_step=last_epoch * len(train_loader),
)
def linear_warmup_const_inverse_sqrt_decay(
warmup_steps=1000,
decay_start_epoch=10,
decay_scale=1.0,
linear_cooldown_n_epochs=0,
linear_cooldown_end_ratio=0.0,
**kwargs
):
"""
Linear warmup, hold const, inverse sqrt decay, optional cooldown scheduler
"""
def gen_sched(optimizer, train_loader, epochs, last_epoch):
steps_per_epoch = len(train_loader)
start_step = steps_per_epoch*last_epoch
total_steps = steps_per_epoch * epochs
n_decay_epochs = epochs - decay_start_epoch - linear_cooldown_n_epochs
decay_sched = inverse_sqrt_decay_schedule(decay_scale*n_decay_epochs)
func = piecewise_schedule(
[
warmup_steps / total_steps,
decay_start_epoch / epochs,
(epochs - linear_cooldown_n_epochs) / epochs
],
[
linear_schedule(0.0, 1.0),
const_schedule(1.0),
decay_sched,
linear_schedule(
decay_sched(1.0),
linear_cooldown_end_ratio
)
]
)
return LambdaLR(optimizer, (lambda step: func((step + start_step) / total_steps)))
return gen_sched
def linear_cooldown(end_ratio=0.0, **kwargs):
"""
Linear Cooldown Scheduler
"""
return lambda optimizer, train_loader, epochs, last_epoch: func_scheduler(
optimizer=optimizer,
func=linear_schedule(1.0, end_ratio),
total_steps=epochs * len(train_loader),
start_step=0,
)
#-------------------------------------------------------------------------------
def const_schedule(y):
"""
Constant Scheduler
"""
return lambda t: y
def linear_schedule(y0, y1):
"""
Linear Scheduler
"""
return lambda t: y0 + (y1 - y0) * t
def cosine_decay_schedule(y0, y1):
"""
Cosine Decay Scheduler
"""
return lambda t: y1 + 0.5 * (y0 - y1) * (np.cos(t * np.pi) + 1.0)
def piecewise_schedule(knots, funcs):
"""
Piecewise Scheduler
"""
def f(t):
i = np.searchsorted(knots, t)
t0 = 0.0 if i == 0 else knots[i - 1]
t1 = 1.0 if i == len(knots) else knots[i]
return funcs[i]((t - t0) / (t1 - t0))
return f
def inverse_sqrt_decay_schedule(scale):
return lambda t: 1.0 / math.sqrt(1 + scale*t)
def func_scheduler(optimizer, func, total_steps, warmup_steps=None, warmup_ratio=0.1, start_step=0):
"""
Learning Rate Scheduler
"""
if warmup_steps:
y0 = func(0.0)
func = piecewise_schedule(
[warmup_steps / total_steps],
[linear_schedule(warmup_ratio * y0, y0), func]
)
return LambdaLR(optimizer, (lambda step: func((step + start_step) / total_steps)))
| 3,330 | 26.528926 | 100 | py |
bonito | bonito-master/bonito/ctc/basecall.py | """
Bonito basecall
"""
import torch
import numpy as np
from functools import partial
from bonito.multiprocessing import process_map
from bonito.util import mean_qscore_from_qstring
from bonito.util import chunk, stitch, batchify, unbatchify, permute
def basecall(model, reads, beamsize=5, chunksize=0, overlap=0, batchsize=1, qscores=False, reverse=None):
"""
Basecalls a set of reads.
"""
chunks = (
(read, chunk(torch.tensor(read.signal), chunksize, overlap)) for read in reads
)
scores = unbatchify(
(k, compute_scores(model, v)) for k, v in batchify(chunks, batchsize)
)
scores = (
(read, {'scores': stitch(v, chunksize, overlap, len(read.signal), model.stride)}) for read, v in scores
)
decoder = partial(decode, decode=model.decode, beamsize=beamsize, qscores=qscores, stride=model.stride)
basecalls = process_map(decoder, scores, n_proc=4)
return basecalls
def compute_scores(model, batch):
"""
Compute scores for model.
"""
with torch.no_grad():
device = next(model.parameters()).device
chunks = batch.to(torch.half).to(device)
probs = permute(model(chunks), 'TNC', 'NTC')
return probs.cpu().to(torch.float32)
def decode(scores, decode, beamsize=5, qscores=False, stride=1):
"""
Convert the network scores into a sequence.
"""
# do a greedy decode to get a sensible qstring to compute the mean qscore from
seq, path = decode(scores['scores'], beamsize=1, qscores=True, return_path=True)
seq, qstring = seq[:len(path)], seq[len(path):]
mean_qscore = mean_qscore_from_qstring(qstring)
# beam search will produce a better sequence but doesn't produce a sensible qstring/path
if not (qscores or beamsize == 1):
try:
seq = decode(scores['scores'], beamsize=beamsize)
path = None
qstring = '*'
except:
pass
return {'sequence': seq, 'qstring': qstring, 'stride': stride, 'moves': path}
| 2,018 | 31.564516 | 111 | py |
bonito | bonito-master/bonito/ctc/model.py | """
Bonito Model template
"""
import numpy as np
from bonito.nn import Permute, layers
import torch
from torch.nn.functional import log_softmax, ctc_loss
from torch.nn import Module, ModuleList, Sequential, Conv1d, BatchNorm1d, Dropout
from fast_ctc_decode import beam_search, viterbi_search
class Model(Module):
"""
Model template for QuartzNet style architectures
https://arxiv.org/pdf/1910.10261.pdf
"""
def __init__(self, config):
super(Model, self).__init__()
if 'qscore' not in config:
self.qbias = 0.0
self.qscale = 1.0
else:
self.qbias = config['qscore']['bias']
self.qscale = config['qscore']['scale']
self.config = config
self.stride = config['block'][0]['stride'][0]
self.alphabet = config['labels']['labels']
self.features = config['block'][-1]['filters']
self.encoder = Encoder(config)
self.decoder = Decoder(self.features, len(self.alphabet))
def forward(self, x):
encoded = self.encoder(x)
return self.decoder(encoded)
def decode(self, x, beamsize=5, threshold=1e-3, qscores=False, return_path=False):
x = x.exp().cpu().numpy().astype(np.float32)
if beamsize == 1 or qscores:
seq, path = viterbi_search(x, self.alphabet, qscores, self.qscale, self.qbias)
else:
seq, path = beam_search(x, self.alphabet, beamsize, threshold)
if return_path: return seq, path
return seq
def ctc_label_smoothing_loss(self, log_probs, targets, lengths, weights=None):
T, N, C = log_probs.shape
weights = weights or torch.cat([torch.tensor([0.4]), (0.1 / (C - 1)) * torch.ones(C - 1)])
log_probs_lengths = torch.full(size=(N, ), fill_value=T, dtype=torch.int64)
loss = ctc_loss(log_probs.to(torch.float32), targets, log_probs_lengths, lengths, reduction='mean')
label_smoothing_loss = -((log_probs * weights.to(log_probs.device)).mean())
return {'total_loss': loss + label_smoothing_loss, 'loss': loss, 'label_smooth_loss': label_smoothing_loss}
def loss(self, log_probs, targets, lengths):
return self.ctc_label_smoothing_loss(log_probs, targets, lengths)
class Encoder(Module):
"""
Builds the model encoder
"""
def __init__(self, config):
super(Encoder, self).__init__()
self.config = config
features = self.config['input']['features']
activation = layers[self.config['encoder']['activation']]()
encoder_layers = []
for layer in self.config['block']:
encoder_layers.append(
Block(
features, layer['filters'], activation,
repeat=layer['repeat'], kernel_size=layer['kernel'],
stride=layer['stride'], dilation=layer['dilation'],
dropout=layer['dropout'], residual=layer['residual'],
separable=layer['separable'],
)
)
features = layer['filters']
self.encoder = Sequential(*encoder_layers)
def forward(self, x):
return self.encoder(x)
class TCSConv1d(Module):
"""
Time-Channel Separable 1D Convolution
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False, separable=False):
super(TCSConv1d, self).__init__()
self.separable = separable
if separable:
self.depthwise = Conv1d(
in_channels, in_channels, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=bias, groups=in_channels
)
self.pointwise = Conv1d(
in_channels, out_channels, kernel_size=1, stride=1,
dilation=dilation, bias=bias, padding=0
)
else:
self.conv = Conv1d(
in_channels, out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, dilation=dilation, bias=bias
)
def forward(self, x):
if self.separable:
x = self.depthwise(x)
x = self.pointwise(x)
else:
x = self.conv(x)
return x
class Block(Module):
"""
TCSConv, Batch Normalisation, Activation, Dropout
"""
def __init__(self, in_channels, out_channels, activation, repeat=5, kernel_size=1, stride=1, dilation=1, dropout=0.0, residual=False, separable=False):
super(Block, self).__init__()
self.use_res = residual
self.conv = ModuleList()
_in_channels = in_channels
padding = self.get_padding(kernel_size[0], stride[0], dilation[0])
# add the first n - 1 convolutions + activation
for _ in range(repeat - 1):
self.conv.extend(
self.get_tcs(
_in_channels, out_channels, kernel_size=kernel_size,
stride=stride, dilation=dilation,
padding=padding, separable=separable
)
)
self.conv.extend(self.get_activation(activation, dropout))
_in_channels = out_channels
# add the last conv and batch norm
self.conv.extend(
self.get_tcs(
_in_channels, out_channels,
kernel_size=kernel_size,
stride=stride, dilation=dilation,
padding=padding, separable=separable
)
)
# add the residual connection
if self.use_res:
self.residual = Sequential(*self.get_tcs(in_channels, out_channels))
# add the activation and dropout
self.activation = Sequential(*self.get_activation(activation, dropout))
def get_activation(self, activation, dropout):
return activation, Dropout(p=dropout)
def get_padding(self, kernel_size, stride, dilation):
if stride > 1 and dilation > 1:
raise ValueError("Dilation and stride can not both be greater than 1")
return (kernel_size // 2) * dilation
def get_tcs(self, in_channels, out_channels, kernel_size=1, stride=1, dilation=1, padding=0, bias=False, separable=False):
return [
TCSConv1d(
in_channels, out_channels, kernel_size,
stride=stride, dilation=dilation, padding=padding,
bias=bias, separable=separable
),
BatchNorm1d(out_channels, eps=1e-3, momentum=0.1)
]
def forward(self, x):
_x = x
for layer in self.conv:
_x = layer(_x)
if self.use_res:
_x = _x + self.residual(x)
return self.activation(_x)
class Decoder(Module):
"""
Decoder
"""
def __init__(self, features, classes):
super(Decoder, self).__init__()
self.layers = Sequential(
Conv1d(features, classes, kernel_size=1, bias=True),
Permute([2, 0, 1])
)
def forward(self, x):
return log_softmax(self.layers(x), dim=-1)
| 7,110 | 33.1875 | 155 | py |
bonito | bonito-master/bonito/cli/export.py | """
Bonito Export
"""
import io
import os
import re
import sys
import json
import toml
import torch
import bonito
import hashlib
import numpy as np
from glob import glob
import base64
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from bonito.nn import fuse_bn_
from bonito.util import _load_model, get_last_checkpoint, set_config_defaults
class JsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, torch.nn.Parameter):
return obj.data
elif isinstance(obj, torch.Tensor):
return obj.detach().numpy()
elif isinstance(obj, bytes):
return obj.decode('ascii')
else:
return super(JsonEncoder, self).default(obj)
def file_md5(filename, nblock=1024):
"""
Get md5 string from file.
"""
hasher = hashlib.md5()
block_size = nblock * hasher.block_size
with open(filename, "rb") as fh:
for blk in iter((lambda: fh.read(block_size)), b""):
hasher.update(blk)
return hasher.hexdigest()
def save_tensor(directory, name, tensor):
"""
Save a tensor `x` to `fn.tensor` for use with libtorch.
"""
module = torch.nn.Module()
param = torch.nn.Parameter(tensor, requires_grad=False)
module.register_parameter("0", param)
tensors = torch.jit.script(module)
tensors.save(f"{directory}/{name}.tensor")
def reformat_output_layer(layer_dict, v4=True):
n_base, state_len, blank_score = [layer_dict.pop(k) for k in ['n_base', 'state_len', 'blank_score']]
layer_dict['size'] = (n_base + 1) * n_base**state_len
if blank_score is not None:
if v4:
layer_dict['type'] = 'GlobalNormTransducer'
params = layer_dict['params']
params['W'] = torch.nn.functional.pad(
params['W'].reshape([n_base**state_len, n_base, -1]),
(0, 0, 1, 0),
value=0.
).reshape((n_base + 1) * n_base**state_len, -1)
if layer_dict['bias'] is False:
params['b'] = torch.zeros(n_base**state_len * (n_base + 1))
params['b'][0::5] = np.arctanh(blank_score / 5.0)
else:
params['b'] = torch.nn.functional.pad(
params['b'].reshape(n_base**state_len, n_base),
(1, 0),
value=0.
).reshape(-1)
layer_dict['activation'] = 'identity'
layer_dict['scale'] = 1.0
layer_dict['stay_score'] = blank_score
else:
layer_dict['type'] = 'GlobalNormTransducer'
assert layer_dict['activation'] == 'tanh'
params = layer_dict['params']
params['W'] = torch.nn.functional.pad(
params['W'].reshape([n_base**state_len, n_base, -1]),
(0, 0, 1, 0),
value=0.
).reshape((n_base + 1) * n_base**state_len, -1)
params['b'] = torch.nn.functional.pad(
params['b'].reshape(n_base**state_len, n_base),
(1, 0),
value=np.arctanh(blank_score / layer_dict['scale'])
).reshape(-1)
return layer_dict
def to_guppy_feed_forward(layer):
layer['type'] = 'feed-forward'
layer['insize'] = layer['in_features']
layer['size'] = layer['out_features']
layer['activation'] = 'identity'
del layer['in_features']
del layer['out_features']
return layer
def to_guppy_dict(model, include_weights=True, binary_weights=True, v4=True):
guppy_dict = bonito.nn.to_dict(model.encoder, include_weights=include_weights)
guppy_dict['sublayers'] = [x for x in guppy_dict['sublayers'] if x['type'] != 'permute']
guppy_dict['sublayers'] = [dict(x, type='LSTM', activation='tanh', gate='sigmoid') if x['type'] == 'lstm' else x for x in guppy_dict['sublayers']]
guppy_dict['sublayers'] = [dict(x, padding=(x['padding'], x['padding'])) if x['type'] == 'convolution' else x for x in guppy_dict['sublayers']]
guppy_dict['sublayers'] = [to_guppy_feed_forward(x) if x['type'] == 'linear' else x for x in guppy_dict['sublayers']]
idx = -1 if guppy_dict['sublayers'][-1]['type'] == 'linearcrfencoder' else -2
guppy_dict['sublayers'][idx] = reformat_output_layer(guppy_dict['sublayers'][idx], v4=v4)
if binary_weights:
for layer_dict in guppy_dict['sublayers']:
if 'params' in layer_dict:
layer_dict['params'] = {
f'{k}_binary': base64.b64encode(v.data.detach().numpy().astype(np.float32).tobytes()) for (k, v) in layer_dict['params'].items()
}
guppy_dict['sublayers'] = [{'type': 'reverse', 'sublayers': x} if x.pop('reverse', False) else x for x in guppy_dict['sublayers']]
return guppy_dict
def main(args):
model_file = get_last_checkpoint(args.model) if os.path.isdir(args.model) else args.model
if args.config is None:
args.config = os.path.join(os.path.dirname(model_file), "config.toml")
config = toml.load(args.config)
config = set_config_defaults(config)
model = _load_model(model_file, config, device='cpu')
if args.fuse_bn:
# model weights might be saved in half when training and PyTorch's bn fusion
# code uses an op (rsqrt) that currently (1.11) only has a float implementation
model = model.to(torch.float32).apply(fuse_bn_)
if args.format == 'guppy':
v4 = True if 'type' in config['encoder'] else False
jsn = to_guppy_dict(model, v4=v4)
jsn["md5sum"] = file_md5(model_file)
json.dump(jsn, sys.stdout, cls=JsonEncoder)
elif args.format == 'dorado':
for name, tensor in model.encoder.state_dict().items():
save_tensor(args.model, name, tensor)
elif args.format == 'torchscript':
tmp_tensor = torch.rand(10, 1, 1000)
model = model.float()
traced_script_module = torch.jit.trace(model, tmp_tensor)
buffer = io.BytesIO()
torch.jit.save(traced_script_module, buffer)
buffer.seek(0)
sys.stdout.buffer.write(buffer.getvalue())
sys.stdout.flush()
else:
raise NotImplementedError("Export format not supported")
def argparser():
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
add_help=False
)
parser.add_argument('model')
parser.add_argument('--format', choices=['guppy', 'dorado', 'torchscript'], default='guppy')
parser.add_argument('--config', default=None, help='config file to read settings from')
parser.add_argument('--fuse-bn', default=True, help='fuse batchnorm layers')
return parser
| 6,898 | 35.696809 | 150 | py |
bonito | bonito-master/bonito/cli/evaluate.py | """
Bonito model evaluator
"""
import os
import time
import torch
import numpy as np
from itertools import starmap
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from pathlib import Path
from bonito.data import load_numpy, load_script
from bonito.util import accuracy, poa, decode_ref, half_supported
from bonito.util import init, load_model, concat, permute
from torch.utils.data import DataLoader
def main(args):
poas = []
init(args.seed, args.device)
print("* loading data")
try:
_, valid_loader_kwargs = load_numpy(args.chunks, args.directory)
except FileNotFoundError:
_, valid_loader_kwargs = load_script(
args.directory,
seed=args.seed,
chunks=args.chunks,
valid_chunks=args.chunks
)
dataloader = DataLoader(
batch_size=args.batchsize, num_workers=4, pin_memory=True,
**valid_loader_kwargs
)
accuracy_with_cov = lambda ref, seq: accuracy(ref, seq, min_coverage=args.min_coverage)
for w in [int(i) for i in args.weights.split(',')]:
seqs = []
print("* loading model", w)
model = load_model(args.model_directory, args.device, weights=w)
print("* calling")
t0 = time.perf_counter()
targets = []
with torch.no_grad():
for data, target, *_ in dataloader:
targets.extend(torch.unbind(target, 0))
if half_supported():
data = data.type(torch.float16).to(args.device)
else:
data = data.to(args.device)
log_probs = model(data)
if hasattr(model, 'decode_batch'):
seqs.extend(model.decode_batch(log_probs))
else:
seqs.extend([model.decode(p) for p in permute(log_probs, 'TNC', 'NTC')])
duration = time.perf_counter() - t0
refs = [decode_ref(target, model.alphabet) for target in targets]
accuracies = [accuracy_with_cov(ref, seq) if len(seq) else 0. for ref, seq in zip(refs, seqs)]
if args.poa: poas.append(sequences)
print("* mean %.2f%%" % np.mean(accuracies))
print("* median %.2f%%" % np.median(accuracies))
print("* time %.2f" % duration)
print("* samples/s %.2E" % (args.chunks * data.shape[2] / duration))
if args.poa:
print("* doing poa")
t0 = time.perf_counter()
# group each sequence prediction per model together
poas = [list(seq) for seq in zip(*poas)]
consensuses = poa(poas)
duration = time.perf_counter() - t0
accuracies = list(starmap(accuracy_with_coverage_filter, zip(references, consensuses)))
print("* mean %.2f%%" % np.mean(accuracies))
print("* median %.2f%%" % np.median(accuracies))
print("* time %.2f" % duration)
def argparser():
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
add_help=False
)
parser.add_argument("model_directory")
parser.add_argument("--directory", type=Path)
parser.add_argument("--device", default="cuda")
parser.add_argument("--seed", default=9, type=int)
parser.add_argument("--weights", default="0", type=str)
parser.add_argument("--chunks", default=1000, type=int)
parser.add_argument("--batchsize", default=96, type=int)
parser.add_argument("--beamsize", default=5, type=int)
parser.add_argument("--poa", action="store_true", default=False)
parser.add_argument("--min-coverage", default=0.5, type=float)
return parser
| 3,647 | 31.283186 | 102 | py |
bonito | bonito-master/bonito/cli/train.py | #!/usr/bin/env python3
"""
Bonito training.
"""
import os
from argparse import ArgumentParser
from argparse import ArgumentDefaultsHelpFormatter
from pathlib import Path
from importlib import import_module
from bonito.data import load_numpy, load_script
from bonito.util import __models__, default_config, default_data
from bonito.util import load_model, load_symbol, init, half_supported
from bonito.training import load_state, Trainer
import toml
import torch
import numpy as np
from torch.utils.data import DataLoader
def main(args):
workdir = os.path.expanduser(args.training_directory)
if os.path.exists(workdir) and not args.force:
print("[error] %s exists, use -f to force continue training." % workdir)
exit(1)
init(args.seed, args.device, (not args.nondeterministic))
device = torch.device(args.device)
if not args.pretrained:
config = toml.load(args.config)
else:
dirname = args.pretrained
if not os.path.isdir(dirname) and os.path.isdir(os.path.join(__models__, dirname)):
dirname = os.path.join(__models__, dirname)
pretrain_file = os.path.join(dirname, 'config.toml')
config = toml.load(pretrain_file)
if 'lr_scheduler' in config:
print(f"[ignoring 'lr_scheduler' in --pretrained config]")
del config['lr_scheduler']
argsdict = dict(training=vars(args))
print("[loading model]")
if args.pretrained:
print("[using pretrained model {}]".format(args.pretrained))
model = load_model(args.pretrained, device, half=False)
else:
model = load_symbol(config, 'Model')(config)
print("[loading data]")
try:
train_loader_kwargs, valid_loader_kwargs = load_numpy(
args.chunks, args.directory, valid_chunks = args.valid_chunks
)
except FileNotFoundError:
train_loader_kwargs, valid_loader_kwargs = load_script(
args.directory,
seed=args.seed,
chunks=args.chunks,
valid_chunks=args.valid_chunks,
n_pre_context_bases=getattr(model, "n_pre_context_bases", 0),
n_post_context_bases=getattr(model, "n_post_context_bases", 0),
)
loader_kwargs = {
"batch_size": args.batch, "num_workers": 4, "pin_memory": True
}
train_loader = DataLoader(**loader_kwargs, **train_loader_kwargs)
valid_loader = DataLoader(**loader_kwargs, **valid_loader_kwargs)
os.makedirs(workdir, exist_ok=True)
toml.dump({**config, **argsdict}, open(os.path.join(workdir, 'config.toml'), 'w'))
if config.get("lr_scheduler"):
sched_config = config["lr_scheduler"]
lr_scheduler_fn = getattr(
import_module(sched_config["package"]), sched_config["symbol"]
)(**sched_config)
else:
lr_scheduler_fn = None
trainer = Trainer(
model, device, train_loader, valid_loader,
use_amp=half_supported() and not args.no_amp,
lr_scheduler_fn=lr_scheduler_fn,
restore_optim=args.restore_optim,
save_optim_every=args.save_optim_every,
grad_accum_split=args.grad_accum_split,
quantile_grad_clip=args.quantile_grad_clip
)
if (',' in args.lr):
lr = [float(x) for x in args.lr.split(',')]
else:
lr = float(args.lr)
trainer.fit(workdir, args.epochs, lr)
def argparser():
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
add_help=False
)
parser.add_argument("training_directory")
group = parser.add_mutually_exclusive_group()
group.add_argument('--config', default=default_config)
group.add_argument('--pretrained', default="")
parser.add_argument("--directory", type=Path)
parser.add_argument("--device", default="cuda")
parser.add_argument("--lr", default='2e-3')
parser.add_argument("--seed", default=25, type=int)
parser.add_argument("--epochs", default=5, type=int)
parser.add_argument("--batch", default=64, type=int)
parser.add_argument("--chunks", default=0, type=int)
parser.add_argument("--valid-chunks", default=None, type=int)
parser.add_argument("--no-amp", action="store_true", default=False)
parser.add_argument("-f", "--force", action="store_true", default=False)
parser.add_argument("--restore-optim", action="store_true", default=False)
parser.add_argument("--nondeterministic", action="store_true", default=False)
parser.add_argument("--save-optim-every", default=10, type=int)
parser.add_argument("--grad-accum-split", default=1, type=int)
parser.add_argument("--quantile-grad-clip", action="store_true", default=False)
return parser
| 4,704 | 35.472868 | 91 | py |
bonito | bonito-master/bonito/crf/basecall.py | """
Bonito CRF basecalling
"""
import torch
import numpy as np
from koi.decode import beam_search, to_str
from bonito.multiprocessing import thread_iter
from bonito.util import chunk, stitch, batchify, unbatchify, half_supported
def stitch_results(results, length, size, overlap, stride, reverse=False):
"""
Stitch results together with a given overlap.
"""
if isinstance(results, dict):
return {
k: stitch_results(v, length, size, overlap, stride, reverse=reverse)
for k, v in results.items()
}
if length < size:
return results[0, :int(np.floor(length / stride))]
return stitch(results, size, overlap, length, stride, reverse=reverse)
def compute_scores(model, batch, beam_width=32, beam_cut=100.0, scale=1.0, offset=0.0, blank_score=2.0, reverse=False):
"""
Compute scores for model.
"""
with torch.inference_mode():
device = next(model.parameters()).device
dtype = torch.float16 if half_supported() else torch.float32
scores = model(batch.to(dtype).to(device))
if reverse:
scores = model.seqdist.reverse_complement(scores)
with torch.cuda.device(scores.device):
sequence, qstring, moves = beam_search(
scores, beam_width=beam_width, beam_cut=beam_cut,
scale=scale, offset=offset, blank_score=blank_score
)
return {
'moves': moves,
'qstring': qstring,
'sequence': sequence,
}
def fmt(stride, attrs, rna=False):
fliprna = (lambda x:x[::-1]) if rna else (lambda x:x)
return {
'stride': stride,
'moves': attrs['moves'].numpy(),
'qstring': fliprna(to_str(attrs['qstring'])),
'sequence': fliprna(to_str(attrs['sequence'])),
}
def basecall(model, reads, chunksize=4000, overlap=100, batchsize=32,
reverse=False, rna=False):
"""
Basecalls a set of reads.
"""
chunks = thread_iter(
((read, 0, read.signal.shape[-1]), chunk(torch.from_numpy(read.signal), chunksize, overlap))
for read in reads
)
batches = thread_iter(batchify(chunks, batchsize=batchsize))
scores = thread_iter(
(read, compute_scores(model, batch, reverse=reverse)) for read, batch in batches
)
results = thread_iter(
(read, stitch_results(scores, end - start, chunksize, overlap, model.stride, reverse))
for ((read, start, end), scores) in unbatchify(scores)
)
return thread_iter(
(read, fmt(model.stride, attrs, rna))
for read, attrs in results
)
| 2,632 | 30.345238 | 119 | py |
bonito | bonito-master/bonito/crf/model.py | """
Bonito CTC-CRF Model.
"""
import torch
import numpy as np
import koi
from koi.ctc import SequenceDist, Max, Log, semiring
from koi.ctc import logZ_cu, viterbi_alignments, logZ_cu_sparse, bwd_scores_cu_sparse, fwd_scores_cu_sparse
from bonito.nn import Module, Convolution, LinearCRFEncoder, Serial, Permute, layers, from_dict
def get_stride(m):
children = list(m.children())
if len(children) == 0:
if hasattr(m, "stride"):
stride = m.stride
if isinstance(stride, int):
return stride
return np.prod(stride)
return 1
return np.prod([get_stride(c) for c in children])
class CTC_CRF(SequenceDist):
def __init__(self, state_len, alphabet, n_pre_context_bases=0, n_post_context_bases=0):
super().__init__()
self.alphabet = alphabet
self.state_len = state_len
self.n_pre_context_bases = n_pre_context_bases
self.n_post_context_bases = n_post_context_bases
self.n_base = len(alphabet[1:])
self.idx = torch.cat([
torch.arange(self.n_base**(self.state_len))[:, None],
torch.arange(
self.n_base**(self.state_len)
).repeat_interleave(self.n_base).reshape(self.n_base, -1).T
], dim=1).to(torch.int32)
def n_score(self):
return len(self.alphabet) * self.n_base**(self.state_len)
def logZ(self, scores, S:semiring=Log):
T, N, _ = scores.shape
Ms = scores.reshape(T, N, -1, len(self.alphabet))
alpha_0 = Ms.new_full((N, self.n_base**(self.state_len)), S.one)
beta_T = Ms.new_full((N, self.n_base**(self.state_len)), S.one)
return logZ_cu_sparse(Ms, self.idx, alpha_0, beta_T, S)
def normalise(self, scores):
return (scores - self.logZ(scores)[:, None] / len(scores))
def forward_scores(self, scores, S: semiring=Log):
T, N, _ = scores.shape
Ms = scores.reshape(T, N, -1, self.n_base + 1)
alpha_0 = Ms.new_full((N, self.n_base**(self.state_len)), S.one)
return fwd_scores_cu_sparse(Ms, self.idx, alpha_0, S, K=1)
def backward_scores(self, scores, S: semiring=Log):
T, N, _ = scores.shape
Ms = scores.reshape(T, N, -1, self.n_base + 1)
beta_T = Ms.new_full((N, self.n_base**(self.state_len)), S.one)
return bwd_scores_cu_sparse(Ms, self.idx, beta_T, S, K=1)
def compute_transition_probs(self, scores, betas):
T, N, C = scores.shape
# add bwd scores to edge scores
log_trans_probs = (scores.reshape(T, N, -1, self.n_base + 1) + betas[1:, :, :, None])
# transpose from (new_state, dropped_base) to (old_state, emitted_base) layout
log_trans_probs = torch.cat([
log_trans_probs[:, :, :, [0]],
log_trans_probs[:, :, :, 1:].transpose(3, 2).reshape(T, N, -1, self.n_base)
], dim=-1)
# convert from log probs to probs by exponentiating and normalising
trans_probs = torch.softmax(log_trans_probs, dim=-1)
#convert first bwd score to initial state probabilities
init_state_probs = torch.softmax(betas[0], dim=-1)
return trans_probs, init_state_probs
def reverse_complement(self, scores):
T, N, C = scores.shape
expand_dims = T, N, *(self.n_base for _ in range(self.state_len)), self.n_base + 1
scores = scores.reshape(*expand_dims)
blanks = torch.flip(scores[..., 0].permute(
0, 1, *range(self.state_len + 1, 1, -1)).reshape(T, N, -1, 1), [0, 2]
)
emissions = torch.flip(scores[..., 1:].permute(
0, 1, *range(self.state_len, 1, -1),
self.state_len +2,
self.state_len + 1).reshape(T, N, -1, self.n_base), [0, 2, 3]
)
return torch.cat([blanks, emissions], dim=-1).reshape(T, N, -1)
def viterbi(self, scores):
traceback = self.posteriors(scores, Max)
a_traceback = traceback.argmax(2)
moves = (a_traceback % len(self.alphabet)) != 0
paths = 1 + (torch.div(a_traceback, len(self.alphabet), rounding_mode="floor") % self.n_base)
return torch.where(moves, paths, 0)
def path_to_str(self, path):
alphabet = np.frombuffer(''.join(self.alphabet).encode(), dtype='u1')
seq = alphabet[path[path != 0]]
return seq.tobytes().decode()
def prepare_ctc_scores(self, scores, targets):
# convert from CTC targets (with blank=0) to zero indexed
targets = torch.clamp(targets - 1, 0)
T, N, C = scores.shape
scores = scores.to(torch.float32)
n = targets.size(1) - (self.state_len - 1)
stay_indices = sum(
targets[:, i:n + i] * self.n_base ** (self.state_len - i - 1)
for i in range(self.state_len)
) * len(self.alphabet)
move_indices = stay_indices[:, 1:] + targets[:, :n - 1] + 1
stay_scores = scores.gather(2, stay_indices.expand(T, -1, -1))
move_scores = scores.gather(2, move_indices.expand(T, -1, -1))
return stay_scores, move_scores
def ctc_loss(self, scores, targets, target_lengths, loss_clip=None, reduction='mean', normalise_scores=True):
if normalise_scores:
scores = self.normalise(scores)
stay_scores, move_scores = self.prepare_ctc_scores(scores, targets)
logz = logZ_cu(stay_scores, move_scores, target_lengths + 1 - self.state_len)
loss = - (logz / target_lengths)
if loss_clip:
loss = torch.clamp(loss, 0.0, loss_clip)
if reduction == 'mean':
return loss.mean()
elif reduction in ('none', None):
return loss
else:
raise ValueError('Unknown reduction type {}'.format(reduction))
def ctc_viterbi_alignments(self, scores, targets, target_lengths):
stay_scores, move_scores = self.prepare_ctc_scores(scores, targets)
return viterbi_alignments(stay_scores, move_scores, target_lengths + 1 - self.state_len)
def conv(c_in, c_out, ks, stride=1, bias=False, activation=None, norm=None):
return Convolution(c_in, c_out, ks, stride=stride, padding=ks//2, bias=bias, activation=activation, norm=norm)
def rnn_encoder(n_base, state_len, insize=1, stride=5, winlen=19, activation='swish', rnn_type='lstm', features=768, scale=5.0, blank_score=None, expand_blanks=True, num_layers=5, norm=None):
rnn = layers[rnn_type]
return Serial([
conv(insize, 4, ks=5, bias=True, activation=activation, norm=norm),
conv(4, 16, ks=5, bias=True, activation=activation, norm=norm),
conv(16, features, ks=winlen, stride=stride, bias=True, activation=activation, norm=norm),
Permute([2, 0, 1]),
*(rnn(features, features, reverse=(num_layers - i) % 2) for i in range(num_layers)),
LinearCRFEncoder(
features, n_base, state_len, activation='tanh', scale=scale,
blank_score=blank_score, expand_blanks=expand_blanks
)
])
class SeqdistModel(Module):
def __init__(self, encoder, seqdist, n_pre_post_context_bases=None):
super().__init__()
self.seqdist = seqdist
self.encoder = encoder
self.stride = get_stride(encoder)
self.alphabet = seqdist.alphabet
if n_pre_post_context_bases is None:
self.n_pre_context_bases = self.seqdist.state_len - 1
self.n_post_context_bases = 1
else:
self.n_pre_context_bases, self.n_post_context_bases = n_pre_post_context_bases
def forward(self, x, *args):
return self.encoder(x)
def decode_batch(self, x):
scores = self.seqdist.posteriors(x.to(torch.float32)) + 1e-8
tracebacks = self.seqdist.viterbi(scores.log()).to(torch.int16).T
return [self.seqdist.path_to_str(x) for x in tracebacks.cpu().numpy()]
def decode(self, x):
return self.decode_batch(x.unsqueeze(1))[0]
def loss(self, scores, targets, target_lengths, **kwargs):
return self.seqdist.ctc_loss(scores.to(torch.float32), targets, target_lengths, **kwargs)
def use_koi(self, **kwargs):
self.encoder = koi.lstm.update_graph(
self.encoder,
batchsize=kwargs["batchsize"],
chunksize=kwargs["chunksize"] // self.stride,
quantize=kwargs["quantize"],
)
class Model(SeqdistModel):
def __init__(self, config):
seqdist = CTC_CRF(
state_len=config['global_norm']['state_len'],
alphabet=config['labels']['labels']
)
if 'type' in config['encoder']: #new-style config
encoder = from_dict(config['encoder'])
else: #old-style
encoder = rnn_encoder(seqdist.n_base, seqdist.state_len, insize=config['input']['features'], **config['encoder'])
super().__init__(encoder, seqdist, n_pre_post_context_bases=config['input'].get('n_pre_post_context_bases'))
self.config = config
| 8,951 | 40.831776 | 191 | py |
ReAgent | ReAgent-master/reagent/ope/utils.py | #!/usr/bin/env python3
import math
from collections import OrderedDict
from typing import Sequence, Union
import numpy as np
import torch
DEFAULT_MIN = float("-inf")
DEFAULT_MAX = float("inf")
def convert_to_one_hots(a, num_classes: int, dtype=torch.int, device=None):
"""
Convert class index array (num_sample,) to an one hots array
(num_sample, num_classes)
Args:
a: index array
num_classes: number of classes
dtype: data type
Returns:
one hots array in shape of (a.shape[0], num_classes)
"""
one_hots = torch.zeros((len(a), num_classes), dtype=dtype, device=device)
one_hots[torch.arange(one_hots.shape[0]), a] = 1
return one_hots
class LRUCache(OrderedDict):
def __init__(self, maxsize=2 ** 10, *args, **kwds):
self.maxsize = maxsize
super().__init__(*args, **kwds)
def __getitem__(self, key):
value = super().__getitem__(key)
self.move_to_end(key)
return value
def __setitem__(self, key, value):
super().__setitem__(key, value)
if len(self) > self.maxsize:
del self[next(iter(self))]
class RunningAverage:
def __init__(self, init_val: float = float("nan")):
self._average = init_val
self._count = 0 if math.isnan(init_val) else 1
def add(self, value) -> "RunningAverage":
if not math.isnan(value) and not math.isinf(value):
if self._count == 0:
self._average = 0.0
self._count += 1
self._average = self._average + (float(value) - self._average) / self._count
return self
@property
def average(self):
return self._average
@property
def count(self):
return self._count
@property
def total(self):
return self._average * self._count
def __float__(self):
return self._average
class Clamper:
def __init__(self, min_v: float = DEFAULT_MIN, max_v: float = DEFAULT_MAX):
self._min = min_v
self._max = max_v
if self._min >= self._max:
raise ValueError(f"min[{min}] greater than max[{max}]")
def __call__(
self, v: Union[float, Sequence[float], torch.Tensor, np.ndarray]
) -> Union[float, Sequence[float], torch.Tensor, np.ndarray]:
if isinstance(v, torch.Tensor):
return v.clamp(self._min, self._max)
elif isinstance(v, np.ndarray):
return v.clip(self._min, self._max)
elif isinstance(v, Sequence):
return [max(self._min, min(self._max, float(i))) for i in v]
else:
return max(self._min, min(self._max, float(v)))
def __repr__(self):
return f"Clamper({self._min},{self._max})"
| 2,743 | 26.717172 | 88 | py |
ReAgent | ReAgent-master/reagent/ope/estimators/slate_estimators.py | #!/usr/bin/env python3
import logging
import math
import random
import time
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import (
Iterable,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Sequence,
Set,
Tuple,
Union,
)
import numpy as np
import torch
from reagent.ope.estimators.estimator import (
Estimator,
EstimatorResult,
EstimatorSampleResult,
)
from reagent.ope.estimators.types import (
Action,
Items,
Objects,
Probability,
Reward,
Trainer,
TrainingData,
TypeWrapper,
Values,
ValueType,
)
from reagent.ope.utils import Clamper, RunningAverage
from torch import Tensor
# Main algorithms are from two papers:
# 1. Offline Evaluation of Ranking Policies with Click Models
# https://arxiv.org/abs/1804.10488
# 2. Off-policy evaluation for slate recommendation
# https://arxiv.org/abs/1605.04812
# Types for slates
SlateSlotType = Union[int, Tuple[int], float, Tuple[float], np.ndarray, Tensor]
SlateSlot = TypeWrapper[SlateSlotType]
logger = logging.getLogger(__name__)
class SlateSlotValues(Values[SlateSlot]):
"""
Map from a slot to a value
"""
def _to_key(self, k: int) -> SlateSlot:
return SlateSlot(k)
class SlateSlots(Items[SlateSlot]):
"""
List of slot
"""
def _new_item(self, i: int) -> SlateSlot:
return SlateSlot(i)
# pyre-fixme[15]: `fill` overrides method defined in `Items` inconsistently.
def fill(
self,
values: Union[Mapping[SlateSlot, float], Sequence[float], np.ndarray, Tensor],
) -> SlateSlotValues:
"""
Map slots to given values
Args:
values: given values
Returns:
Map from slots to given values
"""
return SlateSlotValues(super().fill(values))
class SlateSlotObjects(Objects[SlateSlot, ValueType]):
def __init__(
self,
values: Union[MutableMapping[SlateSlot, ValueType], MutableSequence[ValueType]],
):
assert (len(values)) > 0
super().__init__(values)
def _to_key(self, k: int) -> SlateSlot:
return SlateSlot(k)
@property
def slots(self) -> SlateSlots:
if self.is_sequence:
# pyre-fixme[16]: `SlateSlotObjects` has no attribute `_values`.
return SlateSlots(len(self._values))
else:
return SlateSlots(list(self._key_to_index.keys()))
@property
def objects(self) -> Sequence[ValueType]:
return super().values
def fill(
self, values: Sequence[ValueType]
) -> Union[Mapping[SlateSlot, ValueType], Sequence[ValueType]]:
# pyre-fixme[16]: `SlateSlotObjects` has no attribute `_values`.
assert len(values) >= len(self._values)
if self._key_to_index is None:
return values[: len(self._values)]
else:
return {s: v for s, v in zip(self.slots, values[: len(self._values)])}
# type of valid slate candidates, e.g., doc id
SlateItem = Action
class SlateItems(Items[SlateItem]):
def _new_item(self, i: int) -> SlateItem:
return SlateItem(i)
class SlateItemValues(Values[SlateItem]):
def _to_key(self, k: int) -> SlateItem:
return SlateItem(k)
@property
def items(self) -> SlateItems:
if self.is_sequence:
return SlateItems(len(self))
else:
return SlateItems(super().keys)
class SlateItemFeatures(Objects[SlateItem, Tensor]):
def __init__(
self,
values: Union[Mapping[SlateItem, Tensor], Sequence[Tensor], Tensor, np.ndarray],
):
# pyre-fixme[6]: Expected
# `Union[Mapping[Variable[reagent.ope.estimators.types.KeyType],
# Variable[ValueType]], Sequence[Variable[ValueType]]]` for 1st param but got
# `Union[Mapping[TypeWrapper[Union[Tuple[float], Tuple[int], Tensor, float,
# int, np.ndarray]], Tensor], Sequence[Tensor], Tensor, np.ndarray]`.
super().__init__(values)
def _init_values(
self,
values: Union[Mapping[SlateItem, Tensor], Sequence[Tensor], Tensor, np.ndarray],
):
if isinstance(values, Tensor):
# pyre-fixme[16]: `SlateItemFeatures` has no attribute `_values`.
self._values = values.to(dtype=torch.double)
elif isinstance(values, np.ndarray):
self._values = torch.as_tensor(values, dtype=torch.double)
elif isinstance(values, Sequence):
# pyre-fixme[6]: Expected `Union[typing.List[Tensor],
# typing.Tuple[Tensor, ...]]` for 1st param but got `Sequence[Tensor]`.
self._values = torch.stack(values).to(dtype=torch.double)
elif isinstance(values, Mapping):
self._key_to_index = dict(zip(values.keys(), range(len(values))))
self._index_to_key = list(values.keys())
self._values = torch.stack(list(values.values())).to(dtype=torch.double)
else:
raise TypeError(f"Unsupported values type {type(values)}")
def _to_key(self, k: int) -> SlateItem:
return SlateItem(k)
@property
def items(self) -> SlateItems:
if self.is_sequence:
return SlateItems(len(self))
else:
return SlateItems(super().keys)
# SlateSlotFeatures = SlateSlotObjects[Tensor]
class SlateSlotFeatures(SlateSlotObjects[Tensor]):
@property
def features(self) -> Tensor:
# pyre-fixme[16]: `SlateSlotFeatures` has no attribute `_values`.
return torch.stack(self._values)
class Slate(SlateSlotObjects[SlateItem]):
"""
Class represents a slate: map from slots to items/docs
"""
def one_hots(self, items: SlateItems, device=None) -> Tensor:
oh = torch.zeros((len(self), len(items)), dtype=torch.double, device=device)
# pyre-fixme[16]: `Slate` has no attribute `_values`.
for t, i in zip(oh, self._values):
t[items.index_of(i)] = 1.0
return oh
@property
def items(self) -> Sequence[SlateItem]:
return super().values
def slot_values(self, item_values: SlateItemValues) -> SlateSlotValues:
"""
Map items in the slate to given values
Args:
item_values: Map from all items to some values
Returns:
List of values in the slate
"""
if self._key_to_index is None:
# pyre-fixme[16]: `Slate` has no attribute `_values`.
return SlateSlotValues([item_values[i] for i in self._values])
else:
return SlateSlotValues({k: item_values[i] for k, i in self._key_to_index})
def slot_features(self, item_features: SlateItemFeatures) -> SlateSlotFeatures:
"""
Map items in the slate to given values
Args:
item_values: Map from all items to some values
Returns:
List of values in the slate
"""
if self._key_to_index is None:
return SlateSlotFeatures(
# pyre-fixme[16]: `Slate` has no attribute `_values`.
[item_features[i].detach().clone() for i in self._values]
)
else:
return SlateSlotFeatures(
{k: item_features[i].detach().clone() for k, i in self._key_to_index}
)
def __repr__(self):
return f"{self.__class__.__name__}{{value[{self._values}]}}"
def make_slate(slots: SlateSlots, items: Sequence[SlateItem]) -> Slate:
"""
Assign items to slots to make a slate
"""
assert len(items) >= len(slots)
if slots.is_sequence:
return Slate(list(items[: len(slots)]))
else:
return Slate(dict(zip(slots, items[: len(slots)])))
class SlateSlotItemValues(SlateSlotObjects[SlateItemValues]):
def __init__(
self,
values: Union[
MutableMapping[SlateSlot, SlateItemValues], MutableSequence[SlateItemValues]
],
):
super().__init__(values)
# pyre-fixme[16]: `SlateSlotItemValues` has no attribute `_values`.
self._item_size = len(self._values[0])
for v in self._values[1:]:
assert self._item_size == len(v)
def values_tensor(self, device=None) -> Tensor:
# pyre-fixme[16]: `SlateSlotItemValues` has no attribute `_values`.
dist = [v.values for v in self._values]
return torch.stack(dist).to(device=device)
class SlateSlotItemExpectations(SlateSlotItemValues):
def expected_rewards(
self, item_rewards: SlateItemValues, device=None
) -> SlateSlotValues:
"""
Calculate expected relevances of each slot, given each item's
relevances, under this distribution
Args:
item_rewards:
device:
Returns:
Map of slots to their expected relevance
"""
dist = self.values_tensor(device)
rewards = item_rewards.values.to(device=device)
rewards = torch.mm(dist, rewards.unsqueeze(0).t()).squeeze()
if self.is_sequence:
return SlateSlotValues(rewards)
else:
return SlateSlotValues(dict(zip(self.slots, rewards.tolist())))
@property
def expectations(self) -> Sequence[SlateItemValues]:
return super().values
def make_slot_item_distributions(
slots: SlateSlots, dists: Sequence[SlateItemValues]
) -> SlateSlotItemExpectations:
assert len(dists) >= len(slots)
if slots.is_sequence:
return SlateSlotItemExpectations(list(dists[: len(slots)]))
else:
return SlateSlotItemExpectations(dict(zip(slots, dists[: len(slots)])))
def is_to_calculate_expectation(slate_size: int, item_size: int) -> bool:
"""
Switch between calculating and sampling expectations, balanced by execution
time and accuracy
Return:
True to calculate
False to sample
"""
return (
slate_size < 4
or (slate_size == 4 and item_size < 182)
or (slate_size == 5 and item_size < 47)
or (slate_size == 6 and item_size < 22)
or (slate_size == 7 and item_size < 15)
)
def _calculate_slot_expectation(
d_out: Tensor,
probs: Sequence[float],
buffer: Iterable[Tuple[Set[int], float, float, float]],
) -> Iterable[Tuple[Set[int], float, float, float]]:
"""
A helper function to calculate items' expectations for a slot
"""
assert d_out.shape[0] == len(probs)
next_buffer = []
for b0, b1, b2, _ in buffer:
# memory buffer for all ordered combinations so far, list of tuples of
# b0: all the items in this ordered combination
# b1: cumulative probability of b0
# b2: sum of the probabilities of b0
# b3: = b1 / (1.0 - b2) cached value for faster computation
for i, i_prob in enumerate(probs):
# only add i if it's not already in
if i in b0:
continue
# nb* are next buffer values
nb2 = b2 + i_prob
# due to precision errors, sometimes nb2 becomes 1, in this
# case, discard the combination
if nb2 < 1.0:
nb1 = b1 * i_prob / (1.0 - b2)
next_buffer.append(({*b0, i}, nb1, nb2, nb1 / (1.0 - nb2)))
for i, i_prob in enumerate(probs):
p = 0.0
for b0, _, _, b3 in next_buffer:
if i in b0:
continue
p += b3
d_out[i] = p * i_prob
return next_buffer
class SlateItemProbabilities(SlateItemValues):
"""
Probabilities of each item being selected into the slate
"""
def __init__(
self,
values: Union[Mapping[SlateItem, float], Sequence[float], np.ndarray, Tensor],
greedy: bool = False,
):
super().__init__(values)
self._greedy = greedy
self._slot_item_expectations = None
def _to_key(self, k: int) -> SlateItem:
return SlateItem(k)
def _reset(self):
super()._reset()
self._slot_item_expectations = None
def slate_probability(self, slate: Slate) -> Probability:
"""
Calculate probability of a slate under this distribution
Args:
slate:
Returns:
probability
"""
if self._greedy:
items = super().greedy(len(slate))
for i1, i2 in zip(items, slate.items):
if i1 != i2:
return 0.0
return 1.0
else:
# pyre-fixme[16]: `SlateItemProbabilities` has no attribute `_values`.
clamped = torch.clamp(self._values, 0.0)
indices = [self.index_of(item) for _, item in slate]
probs = clamped[indices]
sums = clamped[indices]
clamped[indices] = 0.0
sums = sums.flip(0).cumsum(0).flip(0) + clamped.sum()
return Probability((probs / sums).prod().item())
def slot_item_expectations(self, slots: SlateSlots) -> SlateSlotItemExpectations:
slate_size = len(slots)
if (
self._slot_item_expectations is not None
and len(self._slot_item_expectations) >= slate_size
):
return self._slot_item_expectations
item_size = len(self)
assert item_size >= slate_size
if self._greedy:
self._slot_item_expectations = make_slot_item_distributions(
slots,
# pyre-fixme[6]: Expected `Sequence[SlateItemValues]` for 2nd param
# but got `List[Values[typing.Any]]`.
[
self.replace(torch.zeros(item_size, dtype=torch.double))
for _ in range(len(self))
],
)
sorted_items, _ = self.sort()
for item, ds in zip(
sorted_items, self._slot_item_expectations.expectations
):
ds[item] = 1.0
else:
self._normalize()
if is_to_calculate_expectation(len(slots), len(self)):
self._calculate_expectations(slots)
else:
self._sample_expectations(slots, 20000)
return self._slot_item_expectations
def _sample_expectations(self, slots: SlateSlots, num_samples: int):
slate_size = len(slots)
item_size = len(self)
dm = torch.zeros((slate_size, item_size), dtype=torch.double)
ri = torch.arange(slate_size)
# pyre-fixme[16]: `SlateItemProbabilities` has no attribute `_probabilities`.
ws = self._probabilities.repeat((num_samples, 1))
for _ in range(item_size):
samples = torch.multinomial(ws, slate_size)
for sample in samples:
dm[ri, sample] += 1
dm /= num_samples * item_size
self._slot_item_expectations = make_slot_item_distributions(
slots,
# pyre-fixme[6]: Expected `Sequence[SlateItemValues]` for 2nd param but
# got `List[Values[typing.Any]]`.
[self.replace(vs) for vs in dm],
)
def _calculate_expectations(self, slots: SlateSlots):
"""
A brute-force way to calculate each item's expectations at each slot by
going through all l-choose-m (l!/(l-m)!) possible slates.
"""
slate_size = len(slots)
item_size = len(self)
dm = torch.zeros((slate_size, item_size), dtype=torch.double)
# pyre-fixme[16]: `SlateItemProbabilities` has no attribute `_probabilities`.
dm[0] = self._probabilities
buffer = [(set(), 1.0, 0.0, 1.0)]
probs = self._probabilities.tolist()
for d in dm[1:]:
buffer = _calculate_slot_expectation(d, probs, buffer)
self._slot_item_expectations = make_slot_item_distributions(
slots,
# pyre-fixme[6]: Expected `Sequence[SlateItemValues]` for 2nd param but
# got `List[Values[typing.Any]]`.
[self.replace(vs) for vs in dm],
)
def sample_slate(self, slots: SlateSlots) -> Slate:
slate_size = len(slots)
if self._greedy:
items = super().greedy(slate_size)
else:
items = super().sample(slate_size)
return make_slate(slots, items)
@property
def is_deterministic(self) -> bool:
return self._greedy
def slate_space(
self, slots: SlateSlots, max_size: int = -1
) -> Iterable[Tuple[Sequence[SlateItem], float]]:
"""Return all possible slates and their probabilities
The algorithm is similar to :func:`~_calculate_expectations`, but has
less value to cache thus save both space and computation
Args:
slots: slots to be filled
max_size: max number of samples to be returned
<= 0 return all samples
"""
slate_size = len(slots)
item_size = len(self)
assert item_size >= slate_size
if self._greedy:
items = super().greedy(slate_size)
return [(items, 1.0)]
else:
buffer = [([], 1.0, 0.0)]
# pyre-fixme[16]: `SlateItemProbabilities` has no attribute
# `_probabilities`.
probs = self._probabilities.tolist()
for _ in range(slate_size):
next_buffer = []
for b0, b1, b2 in buffer:
# memory buffer for all ordered combinations so far, list of tuples of
# b0: all the items in this ordered combination
# b1: cumulative probability of b0
# b2: sum of the probabilities of b0
for i, i_prob in enumerate(probs):
if i in b0:
continue
nb2 = b2 + i_prob
if nb2 < 1.0:
nb1 = b1 * i_prob / (1.0 - b2)
next_buffer.append(([*b0, i], nb1, nb2))
if max_size <= 0 or max_size > len(next_buffer):
buffer = next_buffer
else:
buffer = random.sample(next_buffer, max_size)
return [([SlateItem(i) for i in b[0]], b[1]) for b in buffer]
class SlateSlotItemProbabilities(SlateSlotItemValues):
def __init__(
self,
values: Union[
MutableMapping[SlateSlot, SlateItemValues], MutableSequence[SlateItemValues]
],
greedy: bool = False,
):
super().__init__(values)
self._greedy = greedy
self._slot_item_distributions = None
self._slot_item_expectations = None
def slate_probability(self, slate: Slate) -> Probability:
"""
Calculate probability of a slate under this distribution
Args:
slate:
Returns:
probability
"""
assert len(slate) <= len(self)
if self._greedy:
for slot, item in slate:
probs = self[slot]
its, _ = probs.sort()
if its[0] != item:
return 0.0
return 1.0
else:
p = 1.0
last_items = []
for slot, item in slate:
item_probs = self[slot]
w = 1.0
for last_item in last_items:
w -= item_probs.probability(last_item)
if math.fabs(w - 0.0) < 1.0e-10:
return 0.0
p *= item_probs.probability(item) / w
last_items.append(item)
return p
def slot_item_expectations(self, samples: int = 20000) -> SlateSlotItemExpectations:
slate_size = len(self.slots)
if (
self._slot_item_expectations is not None
and len(self._slot_item_expectations) >= slate_size
):
return self._slot_item_expectations
# pyre-fixme[16]: `SlateSlotItemProbabilities` has no attribute `_values`.
item_size = len(self._values[0])
assert item_size >= slate_size
ps = self.values_tensor()
if self._greedy:
dists = []
for i, value in zip(range(slate_size), self._values):
item = ps[i].argmax().item()
dist = torch.zeros(item_size, dtype=torch.double)
dist[item] = 1.0
dists.append(value.replace(dist))
ps[torch.arange(i + 1, slate_size), item] = 0.0
self._slot_item_expectations = make_slot_item_distributions(
self.slots, dists
)
else:
if is_to_calculate_expectation(slate_size, item_size):
self._calculate_expectations()
else:
self._sample_expectations(samples * item_size)
return self._slot_item_expectations
def _sample_expectations(self, num_samples: int):
slate_size = len(self.slots)
# pyre-fixme[16]: `SlateSlotItemProbabilities` has no attribute `_values`.
item_size = len(self._values[0])
dm = torch.zeros((slate_size, item_size), dtype=torch.double)
ri = torch.arange(slate_size)
for _ in range(num_samples):
ps = self.values_tensor()
sample = []
for i in range(slate_size):
item = ps[i].multinomial(1)
sample.append(item)
ps[torch.arange(i + 1, slate_size), item] = 0.0
dm[ri, sample] += 1
dm /= num_samples
self._slot_item_expectations = make_slot_item_distributions(
self.slots, [ivs.replace(vs) for ivs, vs in zip(self._values, dm)]
)
def _calculate_expectations(self):
slate_size = len(self.slots)
item_size = len(self._values[0])
dm = torch.zeros((slate_size, item_size), dtype=torch.double)
prob_list = []
for v in self._values:
v._normalize()
prob_list.append(v._probabilities.detach().clone())
dm[0] = prob_list[0]
buffer = [({}, 1.0, 0.0, 1.0)]
for d, probs in zip(dm[1:], prob_list[1:]):
buffer = _calculate_slot_expectation(d, probs.tolist(), buffer)
self._slot_item_expectations = make_slot_item_distributions(
self.slots, [its.replace(vs) for its, vs in zip(self._values, dm)]
)
def sample_slate(self, slots: SlateSlots) -> Slate:
slate_size = len(slots)
ps = self.values_tensor()
items = []
if self._greedy:
# pyre-fixme[16]: `SlateSlotItemProbabilities` has no attribute `_values`.
for i, value in zip(range(slate_size), self._values):
item = ps[i].argmax().item()
items.append(value.items[item])
ps[torch.arange(i + 1, slate_size), item] = 0.0
else:
for i, value in zip(range(slate_size), self._values):
item = ps[i].multinomial(1).item()
items.append(value.items[item])
ps[torch.arange(i + 1, slate_size), item] = 0.0
return make_slate(slots, items)
class RewardDistribution(ABC):
"""
Return customized probability distribution according to rewards
"""
def __init__(self, deterministic: bool = False):
self._deterministic = deterministic
@abstractmethod
def distribution(self, rewards: Tensor) -> Tensor:
pass
def __call__(self, rewards: SlateItemValues) -> SlateItemProbabilities:
dist = self.distribution(rewards.values)
return SlateItemProbabilities(rewards.items.fill(dist), self._deterministic)
@property
@abstractmethod
def name(self) -> str:
pass
class PassThruDistribution(RewardDistribution):
"""
No-op distribution, probability determined by reward
"""
def distribution(self, rewards: Tensor) -> Tensor:
return rewards.detach().clone()
@property
def name(self) -> str:
return f"{self._deterministic}"
def __repr__(self):
return f"PassThruDistribution[deterministic={self._deterministic}]"
class RankingDistribution(RewardDistribution):
"""
Ranking distribution according to https://arxiv.org/abs/1605.04812
"""
def __init__(self, alpha: float = -1.0, deterministic: bool = False):
super().__init__(deterministic)
self._alpha = alpha
def distribution(self, rewards: Tensor) -> Tensor:
dist = rewards.detach().clone()
if self._alpha >= 0:
_, ids = torch.sort(rewards, descending=True)
rank = torch.arange(1, ids.shape[0] + 1, dtype=torch.double)
dist[ids] = torch.pow(
2.0,
# pyre-fixme[16]: `float` has no attribute `floor_`.
(-1.0 * (self._alpha * torch.log2(rank)).floor_()),
)
return dist
@property
def name(self) -> str:
return f"ranking_{self._alpha}_{self._deterministic}"
def __repr__(self):
return (
f"RankingDistribution[alpha={self._alpha}"
f",deterministic={self._deterministic}]"
)
class FrechetDistribution(RewardDistribution):
"""
Frechet distribution
"""
def __init__(self, shape: float, deterministic: bool = False):
super().__init__(deterministic)
self._shape = shape
def distribution(self, rewards: Tensor) -> Tensor:
return torch.pow(rewards, self._shape)
@property
def name(self) -> str:
return f"frechet_{self._shape}_{self._deterministic}"
def __repr__(self):
return (
f"FrechetDistribution[shape={self._shape}]"
f",deterministic={self._deterministic}]"
)
SlateQueryType = Union[Tuple[int], Tuple[float], np.ndarray, Tensor, Tuple[int, int]]
SlateQuery = TypeWrapper[SlateQueryType]
@dataclass(frozen=True)
class SlateContext:
query: SlateQuery
slots: SlateSlots
params: object = None
class SlatePolicy(ABC):
"""
Policy interface
"""
def __init__(self, device=None):
self.device = device
@abstractmethod
def _query(self, context: SlateContext) -> SlateItemProbabilities:
pass
def __call__(self, context: SlateContext) -> SlateItemProbabilities:
return self._query(context)
class SlateMetric:
"""
Metric calculator for a slate: weights (dot) rewards
Base class is just sum of the all item rewards
"""
def __init__(self, device=None):
self._device = device
def calculate_reward(
self,
slots: SlateSlots,
rewards: Optional[SlateSlotValues] = None,
slot_values: Optional[SlateSlotValues] = None,
slot_weights: Optional[SlateSlotValues] = None,
) -> float:
if slot_values is None:
assert rewards is not None
slot_values = self.slot_values(rewards)
values = slot_values.values.to(device=self._device)
if slot_weights is None:
slot_weights = self.slot_weights(slots)
weights = slot_weights.values.to(device=self._device)
return torch.tensordot(values, weights, dims=([0], [0])).item()
def __call__(self, slots: SlateSlots, rewards: SlateSlotValues) -> float:
return self.calculate_reward(slots, rewards)
def slot_weights(self, slots: SlateSlots) -> SlateSlotValues:
return slots.fill([1.0] * len(slots))
def slot_values(self, rewards: SlateSlotValues) -> SlateSlotValues:
return rewards
class DCGSlateMetric(SlateMetric):
_weights: Optional[Tensor] = None
def _get_discount(self, slate_size: int) -> Tensor:
weights = DCGSlateMetric._weights
if (
weights is None
or weights.shape[0] < slate_size
or weights.device != self._device
):
DCGSlateMetric._weights = torch.reciprocal(
torch.log2(
torch.arange(
2, slate_size + 2, dtype=torch.double, device=self._device
)
)
)
weights = DCGSlateMetric._weights
assert weights is not None
return weights[:slate_size]
def slot_weights(self, slots: SlateSlots) -> SlateSlotValues:
return slots.fill(self._get_discount(len(slots)))
def slot_values(self, rewards: SlateSlotValues) -> SlateSlotValues:
# pyre-fixme[7]: Expected `SlateSlotValues` but got `Values[typing.Any]`.
return rewards.replace(torch.pow(2.0, rewards.values) - 1.0)
class NDCGSlateMetric(DCGSlateMetric):
def __init__(self, item_rewards: SlateItemValues, device=None):
super().__init__(device)
self._sorted_items, _ = item_rewards.sort()
self._item_rewards = item_rewards
self._idcg = {}
def slot_weights(self, slots: SlateSlots) -> SlateSlotValues:
slate_size = len(slots)
assert len(self._sorted_items) >= slate_size
if slate_size not in self._idcg:
i_slate = make_slate(slots, self._sorted_items[:slate_size])
idcg = super().calculate_reward(
slots,
i_slate.slot_values(self._item_rewards),
None,
super().slot_weights(slots),
)
self._idcg[slate_size] = idcg
else:
idcg = self._idcg[slate_size]
return slots.fill(
torch.zeros(slate_size, dtype=torch.double)
if idcg == 0
else self._get_discount(slate_size) / idcg
)
class ERRSlateMetric(SlateMetric):
def __init__(self, max_reward: float, device=None):
super().__init__(device)
self._max_reward = max_reward
def slot_weights(self, slots: SlateSlots) -> SlateSlotValues:
return slots.fill([1.0 / (r + 1) for r in range(len(slots))])
def slot_values(self, rewards: SlateSlotValues) -> SlateSlotValues:
d = torch.tensor(self._max_reward, device=self._device).pow(2.0)
r = (torch.pow(2.0, rewards.values.clamp(0.0, self._max_reward)) - 1.0) / d
p = 1.0
err = torch.zeros(len(rewards), dtype=torch.double, device=self._device)
for i in range(len(rewards)):
ri = r[i]
err[i] = p * ri
p = p * (1.0 - ri.item())
# pyre-fixme[7]: Expected `SlateSlotValues` but got `Values[typing.Any]`.
return rewards.replace(err)
class SlateModel(ABC):
"""
Model providing item relevance/reward, slot examination (click) distribution
"""
@abstractmethod
def item_rewards(self, context: SlateContext) -> SlateItemValues:
"""
Returns each item's relevance under the context
Args:
context:
Returns:
Item relevances
"""
pass
def slot_probabilities(self, context: SlateContext) -> SlateSlotValues:
"""
Returns each slot/positions's probability independent of showing item,
used in PBM estimator
Args:
context:
Returns:
"""
return context.slots.fill(torch.ones(len(context.slots), dtype=torch.double))
@dataclass(frozen=True)
class LogSample:
context: SlateContext
metric: SlateMetric
log_slate: Slate
log_reward: Reward
_log_slate_probability: Probability = float("nan")
# probability for each item being places at each slot
_log_slot_item_probabilities: Optional[SlateSlotItemProbabilities] = None
# item probability distribution from behavior policy
_log_item_probabilities: Optional[SlateItemProbabilities] = None
_tgt_slate_probability: Probability = float("nan")
_tgt_slot_item_probabilities: Optional[SlateSlotItemProbabilities] = None
# item probability distribution from target policy
_tgt_item_probabilities: Optional[SlateItemProbabilities] = None
# gt_item_rewards: Optional[SlateItemValues] = None
# pre-calculated ground truth for target policy
ground_truth_reward: Reward = float("nan")
# context dependent slot weights (e.g. DCG or ERR weights), used by PBM
slot_weights: Optional[SlateSlotValues] = None
# item/action independent examination probabilities of each slot, used by PBM
slot_probabilities: Optional[SlateSlotValues] = None
# features associated with the slate, to train direct model
item_features: Optional[SlateItemFeatures] = None
def validate(self):
slate_size = len(self.context.slots)
item_size = len(self.items)
assert len(self.log_slate) == slate_size
assert (
math.isnan(self._log_slate_probability)
or self._log_slate_probability <= 1.0
)
assert (
math.isnan(self._tgt_slate_probability)
or self._tgt_slate_probability <= 1.0
)
assert (
self._log_slot_item_probabilities is None
or len(self._log_slot_item_probabilities) == slate_size
)
assert (
self._log_item_probabilities is None
or len(self._log_item_probabilities) == item_size
)
assert (
self._tgt_slot_item_probabilities is None
or len(self._tgt_slot_item_probabilities) == slate_size
)
assert (
self._tgt_item_probabilities is None
or len(self._tgt_item_probabilities) == item_size
)
assert self.slot_weights is None or len(self.slot_weights) == slate_size
assert (
self.slot_probabilities is None
or len(self.slot_probabilities) == slate_size
)
def log_slot_item_expectations(
self, slots: SlateSlots
) -> Optional[SlateSlotItemExpectations]:
if self._log_slot_item_probabilities is not None:
return self._log_slot_item_probabilities.slot_item_expectations()
if self._log_item_probabilities is not None:
return self._log_item_probabilities.slot_item_expectations(slots)
return None
def log_slate_probability(self, slate: Optional[Slate] = None) -> float:
if not math.isnan(self._log_slate_probability):
return self._log_slate_probability
if slate is None:
slate = self.log_slate
if self._log_slot_item_probabilities is not None:
return self._log_slot_item_probabilities.slate_probability(slate)
if self._log_item_probabilities is not None:
return self._log_item_probabilities.slate_probability(slate)
return 0.0
def tgt_slot_expectations(
self, slots: SlateSlots
) -> Optional[SlateSlotItemExpectations]:
if self._tgt_slot_item_probabilities is not None:
return self._tgt_slot_item_probabilities.slot_item_expectations()
if self._tgt_item_probabilities is not None:
return self._tgt_item_probabilities.slot_item_expectations(slots)
return None
def tgt_slate_probability(self) -> float:
if not math.isnan(self._tgt_slate_probability):
return self._tgt_slate_probability
if self._tgt_slot_item_probabilities is not None:
return self._tgt_slot_item_probabilities.slate_probability(self.log_slate)
if self._tgt_item_probabilities is not None:
return self._tgt_item_probabilities.slate_probability(self.log_slate)
return 0.0
def tgt_slate_space(
self, slots: SlateSlots
) -> Iterable[Tuple[Sequence[SlateItem], float]]:
if self._tgt_item_probabilities is not None:
return self._tgt_item_probabilities.slate_space(slots)
return []
@property
def items(self) -> SlateItems:
if self._log_slot_item_probabilities is not None:
# pyre-fixme[16]: `SlateSlotItemProbabilities` has no attribute `_values`.
return self._log_slot_item_probabilities._values[0].items
if self._log_item_probabilities is not None:
return self._log_item_probabilities.items
return SlateItems(0)
@dataclass(frozen=True)
class SlateEstimatorInput:
samples: Sequence[LogSample]
def validate(self):
for s in self.samples:
s.validate()
class SlateEstimator(Estimator):
@abstractmethod
def _evaluate_sample(self, sample: LogSample) -> Optional[EstimatorSampleResult]:
pass
class DMEstimator(SlateEstimator):
"""
Direct Method estimator
"""
def __init__(self, trainer: Trainer, training_sample_ratio: float, device=None):
super().__init__(device)
self._trainer = trainer
self._training_sample_ratio = training_sample_ratio
def _train_model(
self, samples: Sequence[LogSample]
) -> Optional[Iterable[LogSample]]:
if self._trainer is None:
logger.error("Target model trainer is none, DM is not available")
return None
self._trainer.reset()
logger.info(" training direct model...")
st = time.perf_counter()
sample_size = len(samples)
if self._training_sample_ratio > 0.0 and self._training_sample_ratio < 1.0:
training_samples = range(int(sample_size * self._training_sample_ratio))
else:
training_samples = range(sample_size)
train_x = []
train_y = []
vali_mask = [True] * len(samples)
for i in training_samples:
sample = samples[i]
if sample.item_features is None:
continue
slate_features = sample.log_slate.slot_features(sample.item_features)
train_x.append(slate_features.features.flatten())
train_y.append(sample.log_reward)
vali_mask[i] = False
if len(train_x) == 0:
logger.error("Slate features not provided, DM is not available")
return None
train_x = torch.stack(train_x)
train_y = torch.tensor(train_y, dtype=torch.double, device=train_x.device)
vali_x = []
vali_y = []
evaluate_samples = []
for mask, sample in zip(vali_mask, samples):
if not mask or sample.item_features is None:
continue
slate_features = sample.log_slate.slot_features(sample.item_features)
vali_x.append(slate_features.features.flatten())
vali_y.append(sample.log_reward)
evaluate_samples.append(sample)
if len(vali_x) == 0:
vali_x = train_x.detach().clone()
vali_y = train_y.detach().clone()
evaluate_samples = samples
else:
vali_x = torch.stack(vali_x)
vali_y = torch.tensor(vali_y, dtype=torch.double, device=vali_x.device)
training_data = TrainingData(train_x, train_y, None, vali_x, vali_y, None)
self._trainer.train(training_data)
logger.info(f" training direct model done: {time.perf_counter() - st}s")
return evaluate_samples
def _evaluate_sample(self, sample: LogSample) -> Optional[EstimatorSampleResult]:
slots = sample.context.slots
tgt_slate_space = sample.tgt_slate_space(slots)
features = []
probs = []
for items, prob in tgt_slate_space:
slate = make_slate(slots, items)
assert sample.item_features is not None
slate_features = slate.slot_features(sample.item_features)
features.append(slate_features.features.flatten())
probs.append(prob)
preds = self._trainer.predict(torch.stack(features), device=self._device)
tgt_reward = torch.dot(
preds.scores, torch.tensor(probs, dtype=torch.double, device=self._device)
)
return EstimatorSampleResult(
sample.log_reward,
tgt_reward.item(),
sample.ground_truth_reward,
float("nan"),
)
# pyre-fixme[14]: `evaluate` overrides method defined in `Estimator` inconsistently.
def evaluate(
self, input: SlateEstimatorInput, *kwargs
) -> Optional[EstimatorResult]:
input.validate()
samples = self._train_model(input.samples)
if samples is None:
return None
log_avg = RunningAverage()
tgt_avg = RunningAverage()
gt_avg = RunningAverage()
for sample in samples:
result = self._evaluate_sample(sample)
if result is None:
continue
log_avg.add(result.log_reward)
tgt_avg.add(result.target_reward)
gt_avg.add(result.ground_truth_reward)
return EstimatorResult(
log_avg.average, tgt_avg.average, gt_avg.average, tgt_avg.count
)
def __repr__(self):
return (
f"DMEstimator(trainer({self._trainer.name})"
f",ratio({self._training_sample_ratio}),device({self._device}))"
)
class IPSEstimator(SlateEstimator):
def __init__(
self,
weight_clamper: Optional[Clamper] = None,
weighted: bool = True,
device=None,
):
super().__init__(device)
self._weight_clamper = (
weight_clamper if weight_clamper is not None else Clamper()
)
self._weighted = weighted
def _evaluate_sample(self, sample: LogSample) -> Optional[EstimatorSampleResult]:
tgt_prob = sample.tgt_slate_probability()
log_prob = sample.log_slate_probability(sample.log_slate)
if tgt_prob == log_prob:
weight = 1.0
elif tgt_prob <= 0.0:
weight = 0.0
elif log_prob <= 0.0:
return None
else:
weight = self._weight_clamper(tgt_prob / log_prob)
return EstimatorSampleResult(
sample.log_reward,
sample.log_reward * weight,
sample.ground_truth_reward,
weight,
)
# pyre-fixme[14]: `evaluate` overrides method defined in `Estimator` inconsistently.
def evaluate(
self, input: SlateEstimatorInput, *kwargs
) -> Optional[EstimatorResult]:
input.validate()
log_avg = RunningAverage()
tgt_avg = RunningAverage()
acc_weight = RunningAverage()
gt_avg = RunningAverage()
zw = 0
for sample in input.samples:
result = self._evaluate_sample(sample)
if result is None:
zw += 1
continue
log_avg.add(result.log_reward)
tgt_avg.add(result.target_reward)
gt_avg.add(result.ground_truth_reward)
acc_weight.add(result.weight)
if result.weight == 0.0:
zw += 1
logging.info(
f"IPSEstimator invalid sample pct: {zw * 100 / len(input.samples)}%"
)
if tgt_avg.count == 0:
return None
if self._weighted:
estimated = tgt_avg.total / acc_weight.total
return EstimatorResult(
log_avg.average, estimated, gt_avg.average, acc_weight.average
)
else:
return EstimatorResult(
log_avg.average, tgt_avg.average, gt_avg.average, tgt_avg.count
)
def __repr__(self):
return (
f"IPSEstimator(weight_clamper({self._weight_clamper})"
f",weighted({self._weighted}),device({self._device}))"
)
class DoublyRobustEstimator(DMEstimator):
def __init__(
self,
trainer: Trainer,
training_sample_ratio: float,
weight_clamper: Optional[Clamper] = None,
weighted: bool = False,
device=None,
):
super().__init__(trainer, training_sample_ratio, device)
self._weight_clamper = (
weight_clamper if weight_clamper is not None else Clamper()
)
self._weighted = weighted
def _evaluate_sample(self, sample: LogSample) -> Optional[EstimatorSampleResult]:
slots = sample.context.slots
if self._trainer.is_trained:
tgt_slate_space = sample.tgt_slate_space(slots)
features = []
probs = []
for items, prob in tgt_slate_space:
slate = make_slate(slots, items)
assert sample.item_features is not None
slate_features = slate.slot_features(sample.item_features)
features.append(slate_features.features.flatten())
probs.append(prob)
preds = self._trainer.predict(torch.stack(features), device=self._device)
dm_reward = torch.dot(
preds.scores,
torch.tensor(probs, dtype=torch.double, device=self._device),
).item()
assert sample.item_features is not None
log_slate_feature = sample.log_slate.slot_features(sample.item_features)
pred = self._trainer.predict(
torch.unsqueeze(log_slate_feature.features.flatten(), dim=0),
device=self._device,
)
log_dm_reward = pred.scores[0].item()
else:
dm_reward = 0.0
log_dm_reward = 0.0
tgt_prob = sample.tgt_slate_probability()
log_prob = sample.log_slate_probability(sample.log_slate)
if tgt_prob == log_prob:
weight = 1.0
elif tgt_prob <= 0.0:
weight = 0.0
elif log_prob <= 0.0:
return None
else:
weight = self._weight_clamper(tgt_prob / log_prob)
target_reward = (sample.log_reward - log_dm_reward) * weight + dm_reward
return EstimatorSampleResult(
sample.log_reward, target_reward, sample.ground_truth_reward, weight
)
def evaluate(
self, input: SlateEstimatorInput, *kwargs
) -> Optional[EstimatorResult]:
input.validate()
samples = self._train_model(input.samples)
if samples is None:
samples = input.samples
log_avg = RunningAverage()
tgt_avg = RunningAverage()
acc_weight = RunningAverage()
gt_avg = RunningAverage()
for sample in samples:
result = self._evaluate_sample(sample)
if result is None:
continue
log_avg.add(result.log_reward)
tgt_avg.add(result.target_reward)
acc_weight.add(result.weight)
gt_avg.add(result.ground_truth_reward)
if self._weighted:
estimated = tgt_avg.total / acc_weight.total
return EstimatorResult(
log_avg.average, estimated, gt_avg.average, acc_weight.average
)
else:
return EstimatorResult(
log_avg.average, tgt_avg.average, gt_avg.average, tgt_avg.count
)
def __repr__(self):
return (
f"DoublyRobustEstimator(trainer({self._trainer.name})"
f",ratio({self._training_sample_ratio})"
f",weight_clamper({self._weight_clamper})"
f",weighted({self._weighted}),device({self._device}))"
)
class PseudoInverseEstimator(SlateEstimator):
"""
Estimator from reference 2
"""
def __init__(
self,
weight_clamper: Optional[Clamper] = None,
weighted: bool = True,
device=None,
):
super().__init__(device)
self._weight_clamper = (
weight_clamper if weight_clamper is not None else Clamper()
)
self._weighted = weighted
def _evaluate_sample(self, sample: LogSample) -> Optional[EstimatorSampleResult]:
log_slot_expects = sample.log_slot_item_expectations(sample.context.slots)
if log_slot_expects is None:
logger.warning("Log slot distribution not available")
return None
tgt_slot_expects = sample.tgt_slot_expectations(sample.context.slots)
if tgt_slot_expects is None:
logger.warning("Target slot distribution not available")
return None
log_indicator = log_slot_expects.values_tensor(self._device)
tgt_indicator = tgt_slot_expects.values_tensor(self._device)
lm = len(sample.context.slots) * len(sample.items)
gamma = torch.as_tensor(
np.linalg.pinv(
torch.mm(
log_indicator.view((lm, 1)), log_indicator.view((1, lm))
).numpy()
)
)
# torch.pinverse is not very stable
# gamma = torch.pinverse(
# torch.mm(log_indicator.view((lm, 1)), log_indicator.view((1, lm)))
# )
ones = sample.log_slate.one_hots(sample.items, self._device)
weight = self._weight_clamper(
torch.mm(tgt_indicator.view((1, lm)), torch.mm(gamma, ones.view((lm, 1))))
).item()
return EstimatorSampleResult(
sample.log_reward,
sample.log_reward * weight,
sample.ground_truth_reward,
weight,
)
# pyre-fixme[14]: `evaluate` overrides method defined in `Estimator` inconsistently.
def evaluate(
self, input: SlateEstimatorInput, *kwargs
) -> Optional[EstimatorResult]:
input.validate()
log_avg = RunningAverage()
tgt_avg = RunningAverage()
acc_weight = RunningAverage()
gt_avg = RunningAverage()
zw = 0
for sample in input.samples:
result = self._evaluate_sample(sample)
if result is None:
zw += 1
continue
log_avg.add(result.log_reward)
tgt_avg.add(result.target_reward)
gt_avg.add(result.ground_truth_reward)
acc_weight.add(result.weight)
if result.weight == 0.0:
zw += 1
if tgt_avg.count % 1000 == 0:
logger.info(f" PseudoInverseEstimator: processed {tgt_avg.count}")
logging.info(
f"PseudoInverseEstimator invalid sample pct: {zw * 100 / len(input.samples)}%"
)
if tgt_avg.count == 0:
return None
if self._weighted:
estimated = tgt_avg.total / acc_weight.total
return EstimatorResult(
log_avg.average, estimated, gt_avg.average, acc_weight.average
)
else:
return EstimatorResult(
log_avg.average, tgt_avg.average, gt_avg.average, tgt_avg.count
)
def __repr__(self):
return (
f"PseudoInverseEstimator(weight_clamper({self._weight_clamper})"
f",weighted({self._weighted}),device({self._device}))"
)
class PBMEstimator(SlateEstimator):
"""
Estimator from reference 1: Position-Based Click Model
"""
def __init__(
self,
weight_clamper: Optional[Clamper] = None,
weighted: bool = True,
device=None,
):
super().__init__(device)
self._weight_clamper = (
weight_clamper if weight_clamper is not None else Clamper()
)
self._weighted = weighted
def _evaluate_sample(self, sample: LogSample) -> Optional[EstimatorSampleResult]:
log_slot_expects = sample.log_slot_item_expectations(sample.context.slots)
if log_slot_expects is None:
logger.warning(" Log slot distribution not available")
return None
tgt_slot_expects = sample.tgt_slot_expectations(sample.context.slots)
if tgt_slot_expects is None:
logger.warning(" Target slot distribution not available")
return None
slate_size = len(sample.context.slots)
slot_weights = sample.slot_weights
if slot_weights is None:
slot_weights = SlateSlotValues(torch.ones(slate_size, dtype=torch.double))
weights = slot_weights.values.to(device=self._device)
if sample.slot_probabilities is not None:
weights *= sample.slot_probabilities.values
h = torch.zeros(slate_size, dtype=torch.double, device=self._device)
p = torch.zeros(slate_size, dtype=torch.double, device=self._device)
i = 0
for slot, item in sample.log_slate:
h[i] = tgt_slot_expects[slot][item]
p[i] = log_slot_expects[slot][item]
i += 1
nu = torch.tensordot(h, weights, dims=([0], [0]))
de = torch.tensordot(p, weights, dims=([0], [0]))
if nu == de:
weight = 1.0
elif nu == 0:
weight = 0.0
elif de == 0:
return None
else:
weight = self._weight_clamper(nu / de)
return EstimatorSampleResult(
sample.log_reward,
sample.log_reward * weight,
sample.ground_truth_reward,
weight,
)
# pyre-fixme[14]: `evaluate` overrides method defined in `Estimator` inconsistently.
def evaluate(
self, input: SlateEstimatorInput, *kwargs
) -> Optional[EstimatorResult]:
input.validate()
log_avg = RunningAverage()
tgt_avg = RunningAverage()
acc_weight = RunningAverage()
gt_avg = RunningAverage()
zw = 0
for sample in input.samples:
result = self._evaluate_sample(sample)
if result is None:
zw += 1
continue
log_avg.add(result.log_reward)
tgt_avg.add(result.target_reward)
gt_avg.add(result.ground_truth_reward)
acc_weight.add(result.weight)
if result.weight == 0.0:
zw += 1
if tgt_avg.count % 1000 == 0:
logger.info(f" PBMEstimator: processed {tgt_avg.count}")
logging.info(
f"PBMEstimator invalid sample pct: {zw * 100 / len(input.samples)}%"
)
if tgt_avg.count == 0:
return None
if self._weighted:
estimated = tgt_avg.total / acc_weight.total
return EstimatorResult(
log_avg.average, estimated, gt_avg.average, acc_weight.average
)
else:
return EstimatorResult(
log_avg.average, tgt_avg.average, gt_avg.average, tgt_avg.count
)
def __repr__(self):
return (
f"PBMEstimator(weight_clamper({self._weight_clamper})"
f",weighted({self._weighted}),device({self._device}))"
)
| 54,387 | 34.225389 | 90 | py |
ReAgent | ReAgent-master/reagent/ope/estimators/sequential_estimators.py | #!/usr/bin/env python3
import copy
import logging
import random
import time
import typing
from abc import ABC, abstractmethod
from copy import deepcopy
from dataclasses import dataclass
from enum import Enum
from functools import reduce
from itertools import count, zip_longest
from typing import Callable, Dict, Iterable, Mapping, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from reagent.ope.estimators.estimator import (
Estimator,
EstimatorResult,
EstimatorResults,
)
from reagent.ope.estimators.types import (
Action,
ActionDistribution,
ActionSpace,
Probability,
Reward,
TypeWrapper,
)
from reagent.ope.trainers.linear_trainers import LinearNet
from reagent.ope.utils import Clamper, RunningAverage
from torch import Tensor
StateType = Union[float, Tuple[float], Tuple[int], np.ndarray, Tensor]
@dataclass(frozen=True)
class State(TypeWrapper[StateType]):
is_terminal: bool = False
def __repr__(self):
return super().__repr__()[:-1] + f",is_terminal[{self.is_terminal}]}}"
@dataclass(frozen=True)
class StateReward:
state: Optional[State] = None
reward: Reward = 0.0
@dataclass(frozen=True)
class RewardProbability:
reward: Reward = 0.0
prob: Probability = 0.0
# State distribution: State -> (reward, probability)
StateDistribution = Mapping[State, RewardProbability]
@dataclass(frozen=True)
class Transition:
class Status(Enum):
NOOP = 0
NORMAL = 1
TERMINATED = 2
last_state: Optional[State] = None # from state
action: Optional[Action] = None # action
action_prob: float = 0.0 # action probability
state: Optional[State] = None # to state
reward: float = 0.0
status: Status = Status.NORMAL
# MDP sequence
Mdp = Sequence[Transition]
class RLPolicy(ABC):
"""
Policy interface
"""
def __init__(self, action_space: ActionSpace, device=None):
self._action_space = action_space
self._device = device
@abstractmethod
def action_dist(self, state: State) -> ActionDistribution:
pass
def __call__(self, state: State) -> ActionDistribution:
return self.action_dist(state)
@property
def action_space(self):
return self._action_space
class RandomRLPolicy(RLPolicy):
"""
A random policy which return an action according to uniform distribution
"""
def __init__(self, action_space: ActionSpace, device=None):
super().__init__(action_space, device)
self._prob = 1.0 / len(action_space)
def action_dist(self, state: State) -> ActionDistribution:
# pyre-fixme[6]: Expected `Union[Mapping[TypeWrapper[Union[Tuple[float],
# Tuple[int], Tensor, float, int, np.ndarray]], float], Sequence[float],
# Tensor, np.ndarray]` for 1st param but got `int`.
return self._action_space.distribution([self._prob] * len(self._action_space))
class EpsilonGreedyRLPolicy(RLPolicy):
"""
A wrapper policy:
Skewing the wrapped policy action distribution by epsilon
Number of total actions must be given, and wrapped policy should
calculate probabilities for all actions
"""
def __init__(self, policy: RLPolicy, epsilon: float = 0.0):
assert policy is not None and 0.0 <= epsilon < 1.0
super().__init__(policy._device)
self._policy = policy
self._exploitation_prob = 1.0 - epsilon
self._exploration_prob = epsilon / len(policy.action_space)
def action_dist(self, state) -> ActionDistribution:
new_dist = deepcopy(self._policy(state))
for a, p in new_dist:
new_dist[a] = p * self._exploitation_prob + self._exploration_prob
return new_dist
class Model(ABC):
"""
Model interface
"""
@abstractmethod
def next_state_reward_dist(self, state: State, action: Action) -> StateDistribution:
pass
def __call__(self, state: State, action: Action) -> StateDistribution:
return self.next_state_reward_dist(state, action)
class ValueFunction(ABC):
"""
Value function to calculate state and state-action values
"""
@abstractmethod
def state_action_value(self, state: State, action: Action) -> float:
pass
@abstractmethod
def state_value(self, state: State) -> float:
pass
@abstractmethod
def reset(self):
pass
def __call__(self, state: State, action: Optional[Action] = None) -> float:
return (
self.state_action_value(state, action)
if action is not None
else self.state_value(state)
)
@dataclass(frozen=True)
class RLEstimatorInput:
gamma: float
log: Sequence[Mdp]
target_policy: RLPolicy
value_function: Optional[ValueFunction] = None
ground_truth: Optional[ValueFunction] = None
horizon: int = -1
discrete_states: bool = True
class RLEstimator(Estimator):
def _log_reward(self, gamma: float, mdps: Sequence[Mdp]) -> float:
avg = RunningAverage()
for mdp in mdps:
discount = 1.0
r = 0.0
for t in mdp:
r += discount * t.reward
discount *= gamma
avg.add(r)
return avg.average
def _estimate_value(
self, gamma: float, mdps: Sequence[Mdp], value_function: ValueFunction
) -> float:
avg = RunningAverage()
for mdp in mdps:
discount = 1.0
r = 0.0
for t in mdp:
if t.last_state is None:
break
r += discount * value_function(t.last_state)
discount *= gamma
avg.add(r)
return avg.average
class DMEstimator(RLEstimator):
"""
Direct Method estimator
"""
def evaluate(self, input: RLEstimatorInput, **kwargs) -> EstimatorResults:
# kwargs is part of the function signature, so to satisfy pyre it must be included
assert input.value_function is not None
logging.info(f"{self}: start evaluating")
stime = time.process_time()
results = EstimatorResults()
estimate = self._estimate_value(input.gamma, input.log, input.value_function)
if input.ground_truth is not None:
gt = self._estimate_value(input.gamma, input.log, input.ground_truth)
results.append(
EstimatorResult(
self._log_reward(input.gamma, input.log),
estimate,
# pyre-fixme[61]: `gt` may not be initialized here.
None if input.ground_truth is None else gt,
)
)
logging.info(
f"{self}: finishing evaluating["
f"process_time={time.process_time() - stime}]"
)
return results
class IPSEstimator(RLEstimator):
"""
IPS estimator
"""
def __init__(
self,
weight_clamper: Optional[Clamper] = None,
weighted: bool = True,
device=None,
):
super().__init__(device)
self._weight_clamper = (
weight_clamper if weight_clamper is not None else Clamper()
)
self._weighted = weighted
def _calc_weights(
self,
episodes: int,
horizon: int,
mdp_transitions: Iterable[Iterable[Transition]],
policy: RLPolicy,
) -> torch.Tensor:
pi_e = torch.ones((episodes, horizon))
pi_b = torch.ones((episodes, horizon))
mask = torch.ones((episodes, horizon))
j = 0
for ts in mdp_transitions:
i = 0
for t in ts:
if t is not None and t.action is not None and t.action_prob > 0.0:
assert t.last_state is not None
pi_e[i, j] = policy(t.last_state)[t.action]
pi_b[i, j] = t.action_prob
else:
mask[i, j] = 0.0
i += 1
j += 1
pi_e = pi_e.to(device=self._device)
pi_b = pi_b.to(device=self._device)
mask = mask.to(device=self._device)
rho = pi_e.div_(pi_b).cumprod(1).mul_(mask)
if self._weighted:
weight = rho.sum(0)
else:
weight = mask.sum(0)
weight.add_(weight.lt(1.0e-15) * episodes)
ws = rho / weight
return self._weight_clamper(ws)
def evaluate(self, input: RLEstimatorInput, **kwargs) -> EstimatorResults:
# kwargs is part of the function signature, so to satisfy pyre it must be included
logging.info(f"{self}: start evaluating")
stime = time.process_time()
results = EstimatorResults()
n = len(input.log)
horizon = len(reduce(lambda a, b: a if len(a) > len(b) else b, input.log))
weights = self._calc_weights(
n, horizon, zip_longest(*input.log), input.target_policy
)
discount = torch.full((horizon,), input.gamma, device=self._device)
discount[0] = 1.0
discount = discount.cumprod(0)
rewards = torch.zeros((n, horizon))
j = 0
for ts in zip_longest(*input.log):
i = 0
for t in ts:
if t is not None:
rewards[i, j] = t.reward
i += 1
j += 1
rewards = rewards.to(device=self._device)
estimate = weights.mul(rewards).sum(0).mul(discount).sum().item()
results.append(
EstimatorResult(
self._log_reward(input.gamma, input.log),
estimate,
None
if input.ground_truth is None
else self._estimate_value(input.gamma, input.log, input.ground_truth),
)
)
logging.info(
f"{self}: finishing evaluating["
f"process_time={time.process_time() - stime}]"
)
return results
def __repr__(self):
return super().__repr__()[0:-1] + f",weighted[{self._weighted}]}}"
class DoublyRobustEstimator(IPSEstimator):
"""
Doubly Robust estimator
"""
def evaluate(self, input: RLEstimatorInput, **kwargs) -> EstimatorResults:
# kwargs is part of the function signature, so to satisfy pyre it must be included
logging.info(f"{self}: start evaluating")
stime = time.process_time()
results = EstimatorResults()
n = len(input.log)
horizon = len(reduce(lambda a, b: a if len(a) > len(b) else b, input.log))
ws = self._calc_weights(
n, horizon, zip_longest(*input.log), input.target_policy
)
last_ws = torch.zeros((n, horizon), device=self._device)
last_ws[:, 0] = 1.0 / n
last_ws[:, 1:] = ws[:, :-1]
discount = torch.full((horizon,), input.gamma, device=self._device)
discount[0] = 1.0
discount = discount.cumprod(0)
rs = torch.zeros((n, horizon))
vs = torch.zeros((n, horizon))
qs = torch.zeros((n, horizon))
for ts, j in zip(zip_longest(*input.log), count()):
for t, i in zip(ts, count()):
if t is not None and t.action is not None:
assert input.value_function is not None
qs[i, j] = input.value_function(t.last_state, t.action)
vs[i, j] = input.value_function(t.last_state)
rs[i, j] = t.reward
vs = vs.to(device=self._device)
qs = qs.to(device=self._device)
rs = rs.to(device=self._device)
estimate = ((ws * (rs - qs) + last_ws * vs).sum(0) * discount).sum().item()
results.append(
EstimatorResult(
self._log_reward(input.gamma, input.log),
estimate,
None
if input.ground_truth is None
else self._estimate_value(input.gamma, input.log, input.ground_truth),
)
)
logging.info(
f"{self}: finishing evaluating["
f"process_time={time.process_time() - stime}]"
)
return results
class MAGICEstimator(IPSEstimator):
"""
Algorithm from https://arxiv.org/abs/1604.00923, appendix G.3
"""
def __init__(self, weight_clamper: Optional[Clamper] = None, device=None):
super().__init__(weight_clamper, True, device)
def evaluate(self, input: RLEstimatorInput, **kwargs) -> EstimatorResults:
assert input.value_function is not None
logging.info(f"{self}: start evaluating")
stime = time.process_time()
results = EstimatorResults()
num_resamples = kwargs["num_resamples"] if "num_resamples" in kwargs else 200
loss_threshold = (
kwargs["loss_threshold"] if "loss_threshold" in kwargs else 0.00001
)
lr = kwargs["lr"] if "lr" in kwargs else 0.0001
logging.info(
f" params: num_resamples[{num_resamples}], "
f"loss_threshold[{loss_threshold}], "
f"lr[{lr}]"
)
# Compute MAGIC estimate
n = len(input.log)
horizon = len(reduce(lambda a, b: a if len(a) > len(b) else b, input.log))
ws = self._calc_weights(
n, horizon, zip_longest(*input.log), input.target_policy
)
last_ws = torch.zeros((n, horizon), device=self._device)
last_ws[:, 0] = 1.0 / n
last_ws[:, 1:] = ws[:, :-1]
discount = torch.full((horizon,), input.gamma, device=self._device)
discount[0] = 1.0
discount = discount.cumprod(0)
rs = torch.zeros((n, horizon))
vs = torch.zeros((n, horizon))
qs = torch.zeros((n, horizon))
for ts, j in zip(zip_longest(*input.log), count()):
for t, i in zip(ts, count()):
if t is not None and t.action is not None:
qs[i, j] = input.value_function(t.last_state, t.action)
vs[i, j] = input.value_function(t.last_state)
rs[i, j] = t.reward
vs = vs.to(device=self._device)
qs = qs.to(device=self._device)
rs = rs.to(device=self._device)
wdrs = ((ws * (rs - qs) + last_ws * vs) * discount).cumsum(1)
wdr = wdrs[:, -1].sum(0)
next_vs = torch.zeros((n, horizon), device=self._device)
next_vs[:, :-1] = vs[:, 1:]
gs = wdrs + ws * next_vs * discount
gs_normal = gs.sub(torch.mean(gs, 0))
assert n > 1
omiga = (n / (n - 1.0)) * torch.einsum("ij,ik->jk", gs_normal, gs_normal)
resample_wdrs = torch.zeros((num_resamples,))
for i in range(num_resamples):
samples = random.choices(range(n), k=n)
sws = ws[samples, :]
last_sws = last_ws[samples, :]
srs = rs[samples, :]
svs = vs[samples, :]
sqs = qs[samples, :]
resample_wdrs[i] = (
((sws * (srs - sqs) + last_sws * svs).sum(0) * discount).sum().item()
)
resample_wdrs, _ = resample_wdrs.to(device=self._device).sort(0)
lb = torch.min(wdr, resample_wdrs[int(round(0.05 * num_resamples))])
ub = torch.max(wdr, resample_wdrs[int(round(0.95 * num_resamples)) - 1])
b = torch.tensor(
list(
map(
lambda a: a - ub if a > ub else (a - lb if a < lb else 0.0),
gs.sum(0),
)
),
device=self._device,
)
b.unsqueeze_(0)
bb = b * b.t()
cov = omiga + bb
# x = torch.rand((1, horizon), device=self.device, requires_grad=True)
x = torch.zeros((1, horizon), device=self._device, requires_grad=True)
# using SGD to find min x
optimizer = torch.optim.SGD([x], lr=lr)
last_y = 0.0
for i in range(100):
x = torch.nn.functional.softmax(x, dim=1)
y = torch.mm(torch.mm(x, cov), x.t())
if abs(y.item() - last_y) < loss_threshold:
print(f"{i}: {last_y} -> {y.item()}")
break
last_y = y.item()
optimizer.zero_grad()
y.backward(retain_graph=True)
optimizer.step()
x = torch.nn.functional.softmax(x, dim=1)
estimate = torch.mm(x, gs.sum(0, keepdim=True).t()).cpu().item()
results.append(
EstimatorResult(
self._log_reward(input.gamma, input.log),
estimate,
None
if input.ground_truth is None
else self._estimate_value(input.gamma, input.log, input.ground_truth),
)
)
logging.info(
f"{self}: finishing evaluating["
f"process_time={time.process_time() - stime}]"
)
return results
@dataclass
class NeuralDualDICE(RLEstimator):
# See https://arxiv.org/pdf/1906.04733.pdf sections 4, 5, A
# Google's implementation: https://github.com/google-research/google-research/tree/master/dual_dice
"""
Args:
state_dim: The dimensionality of the state vectors
action_dim: The number of discrete actions
deterministic_env: Whether or not the environment is determinstic.
Can help with stability of training.
average_next_v: Whether or not to average the next nu value over all
possible actions. Can help with stability of training.
polynomial_degree: The degree of the convex function f(x) = 1/p * |x|^p
value_lr: The learning rate for nu
zeta_lr: The learning rate for zeta
hidden_dim: The dimensionality of the hidden layers for zeta and v
hidden_layers: The number of hidden layers for zeta and v
activation: The activation function for zeta and v
training_samples: The number of batches to train zeta and v for
batch_size: The number of samples in each batch
loss_callback_fn: A function that will be called every reporting_frequency batches,
giving the average zeta loss, average nu loss, and self
reporting_frequency: The number of batches between outputting the state of the training
"""
state_dim: int
action_dim: int
deterministic_env: bool
average_next_v: bool = False
polynomial_degree: float = 1.5
value_lr: float = 0.01
zeta_lr: float = 0.01
hidden_dim: int = 64
hidden_layers: int = 2
activation = torch.nn.Tanh
training_samples: int = 100000
batch_size: int = 2048
device: typing.Any = None
loss_callback_fn: Optional[Callable[[float, float, RLEstimator], None]] = None
reporting_frequency: int = 1000
# These are initialized in __post_init__() and calms Pyre
v: typing.Any = None
zeta: typing.Any = None
f: typing.Any = None
fconjugate: typing.Any = None
zeta_net: typing.Any = None
v_net: typing.Any = None
def __post_init__(self):
conjugate_exponent = self.polynomial_degree / (self.polynomial_degree - 1)
self.f = self._get_convex_f(self.polynomial_degree)
self.fconjugate = self._get_convex_f(conjugate_exponent)
self.reset()
def _get_convex_f(self, degree):
return lambda x: (torch.abs(x) ** degree) / degree
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def _mdps_value(self, mdps: Sequence[Mdp], gamma: float) -> float:
self.zeta_net.eval()
avg = RunningAverage()
for mdp in mdps:
discount = 1.0
r = 0.0
for t in mdp:
assert t.last_state is not None, "Expected last_state, got None"
assert t.action is not None, "Expected action, got None"
zeta = self.zeta(
torch.tensor(t.last_state.value, dtype=torch.float)
.reshape(-1, self.state_dim)
.to(self.device),
torch.nn.functional.one_hot(
torch.tensor(t.action.value, dtype=torch.long), self.action_dim
)
.reshape(-1, self.action_dim)
.float()
.to(self.device),
)
r += discount * t.reward * zeta.cpu().item()
discount *= gamma
avg.add(r)
self.zeta_net.train()
return avg.average
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def _compute_estimates(self, input: RLEstimatorInput) -> EstimatorResults:
results = EstimatorResults()
estimate = self._mdps_value(input.log, input.gamma)
results.append(
EstimatorResult(
self._log_reward(input.gamma, input.log),
estimate,
None
if input.ground_truth is None
else self._estimate_value(input.gamma, input.log, input.ground_truth),
)
)
return results
def _compute_average_v(self, transition):
next_vs = [
transition["tgt_action_props"][:, a].reshape(-1, 1)
* self.v(
transition["state"],
torch.nn.functional.one_hot(
torch.tensor(a, dtype=torch.long), self.action_dim
)
.reshape(1, -1)
.float()
.to(self.device)
.repeat(transition["state"].shape[0], 1),
)
for a in range(self.action_dim)
]
return sum(next_vs)
def _compute_loss(
self, gamma: float, transition: Dict, compute_determ_v_loss: bool
):
if self.average_next_v:
next_v = self._compute_average_v(transition)
else:
next_v = self.v(transition["state"], transition["next_action"])
delta_v = (
self.v(transition["last_state"], transition["log_action"]) - gamma * next_v
)
init_v = self.v(transition["init_state"], transition["init_action"])
if compute_determ_v_loss:
unweighted_loss = self.f(delta_v) - (1 - gamma) * init_v
else:
zeta = self.zeta(transition["last_state"], transition["log_action"])
unweighted_loss = (
delta_v * zeta - self.fconjugate(zeta) - (1 - gamma) * init_v
)
weights = torch.full(
(unweighted_loss.shape[0], 1), gamma, dtype=torch.float
).to(device=self.device) ** transition["timestep"].reshape((-1, 1))
return torch.sum(weights * unweighted_loss) / torch.sum(weights)
def reset(self):
self.v_net = LinearNet(
self.state_dim + self.action_dim,
self.hidden_dim,
1,
self.hidden_layers,
self.activation,
)
self.zeta_net = copy.deepcopy(self.v_net)
self.v_net.to(self.device)
self.zeta_net.to(self.device)
self.v = self._build_function(self.v_net)
self.zeta = self._build_function(self.zeta_net)
def _build_function(self, net: torch.nn.Module):
return lambda s, a: net(torch.cat((s, a), dim=1))
def _collect_data(self, input: RLEstimatorInput):
samples = {
"init_state": [],
"init_action": [],
"last_state": [],
"state": [],
"log_action": [],
"next_action": [],
"tgt_action_props": [],
"timestep": [],
"reward": [],
}
for mdp in input.log:
state = mdp[0].last_state
assert state is not None, "Expected initial state, got None"
tgt_init_action = input.target_policy.action_dist(state).sample()[0]
for i, t in enumerate(mdp):
assert (
t.state is not None
and t.last_state is not None
and t.action is not None
), "Expected all fields to be present"
tgt_dist = input.target_policy.action_dist(t.state)
tgt_action = tgt_dist.sample()[0]
samples["init_state"].append(state.value)
samples["init_action"].append(
torch.nn.functional.one_hot(
torch.tensor(tgt_init_action.value, dtype=torch.long),
self.action_dim,
).float()
)
samples["last_state"].append(t.last_state.value)
samples["state"].append(t.state.value)
samples["log_action"].append(
torch.nn.functional.one_hot(
torch.tensor(t.action.value, dtype=torch.long), self.action_dim
).float()
)
samples["next_action"].append(
torch.nn.functional.one_hot(
torch.tensor(tgt_action.value, dtype=torch.long),
self.action_dim,
).float()
)
samples["tgt_action_props"].append(tgt_dist.values)
samples["timestep"].append(i)
samples["reward"].append(t.reward)
return {
k: torch.stack(v).to(self.device)
if "action" in k
else torch.tensor(v, dtype=torch.float).to(self.device)
for k, v in samples.items()
}
def _sample_batch(self, dataset):
idxs = np.random.choice(dataset["init_state"].shape[0], self.batch_size)
return {k: v[idxs] for k, v in dataset.items()}
def evaluate(self, input: RLEstimatorInput, **kwargs) -> EstimatorResults:
stime = time.process_time()
dataset = self._collect_data(input)
logging.info(f"Data loading time: {time.process_time() - stime}")
zeta_optim = torch.optim.Adam(self.zeta_net.parameters(), lr=self.zeta_lr)
v_optim = torch.optim.Adam(self.v_net.parameters(), lr=self.value_lr)
avg_zeta_loss = RunningAverage()
avg_v_loss = RunningAverage()
sample_time = time.process_time()
for sampled in range(self.training_samples):
sample = self._sample_batch(dataset)
zeta_loss = -(self._compute_loss(input.gamma, sample, False))
# Populate zeta gradients and optimize
zeta_optim.zero_grad()
zeta_loss.backward()
zeta_optim.step()
if self.deterministic_env:
v_loss = self._compute_loss(input.gamma, sample, True)
else:
v_loss = self._compute_loss(*sample)
# Populate value gradients and optimize
v_optim.zero_grad()
v_loss.backward()
v_optim.step()
avg_zeta_loss.add(zeta_loss.cpu().item())
avg_v_loss.add(v_loss.cpu().item())
if sampled % self.reporting_frequency == 0:
report_time = time.process_time() - sample_time
callback_time = None
if self.loss_callback_fn is not None:
# Pyre gets angry if we don't make callback local
callback = self.loss_callback_fn
assert callback is not None
stime = time.process_time()
callback(avg_zeta_loss.average, avg_v_loss.average, self)
callback_time = abs(time.process_time() - stime)
logging.info(
f"Samples {sampled}, "
f"Avg Zeta Loss {avg_zeta_loss.average}, "
f"Avg Value Loss {avg_v_loss.average},\n"
f"Time per {self.reporting_frequency} samples: {report_time}"
+ (
""
if callback_time is None
else f", Time for callback: {callback_time}"
)
)
avg_zeta_loss = RunningAverage()
avg_v_loss = RunningAverage()
sample_time = time.process_time()
return self._compute_estimates(input)
| 28,267 | 34.964377 | 103 | py |
ReAgent | ReAgent-master/reagent/ope/estimators/types.py | #!/usr/bin/env python3
import logging
import pickle
from abc import ABC, abstractmethod
from copy import deepcopy
from dataclasses import dataclass
from typing import Generic, Mapping, Optional, Sequence, Tuple, TypeVar, Union
import numpy as np
import torch
from torch import Tensor
def is_array(obj):
return isinstance(obj, Tensor) or isinstance(obj, np.ndarray)
Type = TypeVar("Type")
KeyType = TypeVar("KeyType")
ValueType = TypeVar("ValueType")
@dataclass(frozen=True)
class TypeWrapper(Generic[ValueType]):
value: ValueType
def __index__(self):
try:
return int(self.value)
except Exception:
raise ValueError(f"{self} cannot be used as index")
def __int__(self):
try:
return int(self.value)
except Exception:
raise ValueError(f"{self} cannot be converted to int")
def __hash__(self):
if (
isinstance(self.value, int)
or isinstance(self.value, float)
or isinstance(self.value, tuple)
):
return hash(self.value)
elif isinstance(self.value, Tensor):
return hash(tuple(self.value.numpy().flatten()))
elif isinstance(self.value, np.ndarray):
return hash(tuple(self.value.flatten()))
elif isinstance(self.value, list):
return hash(tuple(self.value))
else:
raise TypeError
def __eq__(self, other):
if not isinstance(other, TypeWrapper):
return False
if isinstance(self.value, Tensor):
if isinstance(other.value, Tensor):
return torch.equal(self.value, other.value)
else:
raise TypeError(f"{self} cannot be compared with non-tensor")
elif isinstance(self.value, np.ndarray):
return np.array_equal(self.value, other.value)
else:
return self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if not isinstance(other, TypeWrapper):
return False
if isinstance(self.value, Tensor) and isinstance(other.value, Tensor):
return torch.lt(self.value, other.value).prod().item()
elif isinstance(self.value, np.ndarray) and isinstance(other.value, np.ndarray):
return np.less(self.value, other.value).prod()
else:
return self.value < other.value
def __repr__(self):
return f"{self.__class__.__name__}{{value[{self.value}]}}"
class Objects(Generic[KeyType, ValueType], ABC):
"""
Generic class for a map from item to its value.
It supports [] indexing, and iterator protocol
Attributes:
items: list of items
values: list of their values
"""
def __init__(self, values: Union[Mapping[KeyType, ValueType], Sequence[ValueType]]):
self._key_to_index = None
self._index_to_key = None
self._init_values(values)
self._reset()
def _init_values(
self, values: Union[Mapping[KeyType, ValueType], Sequence[ValueType]]
):
if isinstance(values, Sequence):
# pyre-fixme[16]: `Objects` has no attribute `_values`.
self._values = list(values)
elif isinstance(values, Mapping):
self._key_to_index = dict(zip(values.keys(), range(len(values))))
self._index_to_key = list(values.keys())
self._values = list(values.values())
else:
raise TypeError(f"Unsupported values type {type(values)}")
def _reset(self):
self._unzipped = None
self._keys = None
def __getitem__(self, key: KeyType) -> ValueType:
if self._key_to_index is not None:
# pyre-fixme[16]: `Objects` has no attribute `_values`.
return self._values[self._key_to_index[key]]
else:
return self._values[key]
def __setitem__(self, key: KeyType, value: ValueType):
if self._key_to_index is not None:
# pyre-fixme[16]: `Objects` has no attribute `_values`.
self._values[self._key_to_index[key]] = value
else:
self._values[key] = value
self._reset()
@abstractmethod
def _to_key(self, k: int) -> KeyType:
pass
def _to_value(self, v) -> ValueType:
return v
def __iter__(self):
if self._key_to_index is not None:
return (
(k, self._to_value(self._values[i]))
for k, i in self._key_to_index.items()
)
else:
return (
(self._to_key(i), self._to_value(v)) for i, v in enumerate(self._values)
)
def __len__(self) -> int:
# pyre-fixme[16]: `Objects` has no attribute `_values`.
return len(self._values)
@property
def is_sequence(self):
return self._key_to_index is None
@property
def _values_copy(self) -> Sequence[ValueType]:
# pyre-fixme[16]: `Objects` has no attribute `_values`.
return list(self._values)
def index_of(self, key: KeyType) -> int:
if self._key_to_index is None:
try:
# pyre-fixme[6]: Expected `Union[_SupportsIndex, bytes, str,
# typing.SupportsInt]` for 1st param but got `KeyType`.
index = int(key)
if 0 <= index < len(self):
return index
else:
raise ValueError(f"{key} is not valid")
except Exception:
raise ValueError(f"{key} is not valid")
elif self._key_to_index is not None:
try:
return self._key_to_index[key]
except Exception:
raise ValueError(f"{key} is not valid")
else:
raise ValueError(f"{key} is not valid")
@property
def keys(self) -> Sequence[KeyType]:
if self._keys is None:
if self._key_to_index is not None:
self._keys = list(self._key_to_index.keys())
else:
self._keys = [self._to_key(i) for i in range(len(self))]
return self._keys
@property
def values(self):
return self._values_copy
def __repr__(self):
return f"{self.__class__.__name__}{{values[{self._values}]}}"
class Values(Objects[KeyType, float]):
"""
Generic class for a map from item to its value.
It supports [] indexing, and iterator protocol
Attributes:
items: list of items
values: list of their values
"""
def __init__(
self,
values: Union[Mapping[KeyType, float], Sequence[float], np.ndarray, Tensor],
):
# pyre-fixme[6]: Expected `Union[Mapping[Variable[KeyType],
# Variable[ValueType]], Sequence[Variable[ValueType]]]` for 1st param but got
# `Union[Mapping[Variable[KeyType], float], Sequence[float], Tensor,
# np.ndarray]`.
super().__init__(values)
def _init_values(
self,
values: Union[Mapping[KeyType, float], Sequence[float], np.ndarray, Tensor],
):
if isinstance(values, Tensor):
# pyre-fixme[16]: `Values` has no attribute `_values`.
self._values = values.to(dtype=torch.double)
elif isinstance(values, np.ndarray):
self._values = torch.as_tensor(values, dtype=torch.double)
elif isinstance(values, Sequence):
self._values = torch.tensor(values, dtype=torch.double)
elif isinstance(values, Mapping):
self._key_to_index = dict(zip(values.keys(), range(len(values))))
self._index_to_key = list(values.keys())
self._values = torch.tensor(list(values.values()), dtype=torch.double)
else:
raise TypeError(f"Unsupported values type {type(values)}")
def _reset(self):
super()._reset()
self._probabilities = None
self._is_normalized = False
self._sorted = None
def __getitem__(self, key: KeyType) -> float:
return super().__getitem__(key).item()
def _to_value(self, v: Tensor) -> float:
return v.item()
def __len__(self) -> int:
# pyre-fixme[16]: `Values` has no attribute `_values`.
return self._values.shape[0]
def sort(self, descending: bool = True) -> Tuple[Sequence[KeyType], Tensor]:
"""
Sort based on values
Args:
descending: sorting order
Returns:
Tuple of sorted indices and values
"""
# pyre-fixme[16]: `Values` has no attribute `_sorted`.
if self._sorted is None:
# pyre-fixme[16]: `Values` has no attribute `_values`.
rs, ids = torch.sort(self._values, descending=descending)
if self._index_to_key is not None:
self._sorted = (
[self._index_to_key[i.item()] for i in ids],
rs.detach(),
)
else:
self._sorted = ([self._to_key(i.item()) for i in ids], rs.detach())
return self._sorted
@property
def _values_copy(self) -> Tensor:
# pyre-fixme[16]: `Values` has no attribute `_values`.
return self._values.clone().detach()
def replace(
self,
values: Union[Mapping[ValueType, float], Sequence[float], Tensor, np.ndarray],
) -> "Values":
"""
Replace current values with new values, and returns the new copy.
Current Values object is not changed
Args:
values: new value
Returns:
Values object with new values
"""
copy = deepcopy(self)
if isinstance(values, Tensor):
# pyre-fixme[16]: `Values` has no attribute `_values`.
assert values.shape[0] == copy._values.shape[0]
copy._values = values.to(dtype=torch.double)
elif isinstance(values, np.ndarray):
assert values.shape[0] == copy._values.shape[0]
copy._values = torch.as_tensor(values, dtype=torch.double)
elif isinstance(values, Sequence):
assert len(values) == copy._values.shape[0]
copy._values = torch.tensor(values, dtype=torch.double)
elif isinstance(values, Mapping):
if copy._key_to_index is None:
for k, v in values.items():
copy._values[k] = v
else:
for k, v in values.items():
copy._values[copy._key_to_index[k]] = v
else:
raise TypeError(f"Unsupported values type {type(values)}")
return copy
def _normalize(self):
if self._is_normalized:
if self._probabilities is None:
raise ValueError(f"Invalid distribution {type(self._values)}")
return
self._is_normalized = True
self._probabilities = None
try:
dist = self._values.detach().clamp(min=0.0)
dist /= dist.sum()
self._probabilities = dist
except ZeroDivisionError:
pass
def probability(self, key: ValueType) -> float:
self._normalize()
# pyre-fixme[16]: `Values` has no attribute `_probabilities`.
if self._probabilities is not None:
if self._key_to_index is not None:
return self._probabilities[self._key_to_index[key]].item()
else:
return self._probabilities[key].item()
else:
return 0.0
def sample(self, size=1) -> Sequence[KeyType]:
self._normalize()
if self._index_to_key is not None:
l = [
self._index_to_key[k.item()]
# pyre-fixme[16]: `Values` has no attribute `_probabilities`.
for k in torch.multinomial(self._probabilities, size)
]
else:
l = [
self._to_key(k.item())
for k in torch.multinomial(self._probabilities, size)
]
return l
def greedy(self, size=1) -> Sequence[KeyType]:
sorted_keys, _ = self.sort()
return sorted_keys[:size]
class Items(Generic[ValueType], ABC):
"""
List of items
"""
def __init__(self, items: Union[Sequence[ValueType], int]):
if isinstance(items, int):
assert items > 0
self._items = [self._new_item(i) for i in range(items)]
self._reverse_lookup = None
else:
self._items = items
self._reverse_lookup = {v: i for i, v in enumerate(items)}
def __getitem__(self, i) -> ValueType:
return self._items[i]
def __len__(self):
return len(self._items)
def __iter__(self):
return iter(self._items)
def __int__(self):
if self._reverse_lookup is None:
return len(self._items)
else:
return 0
@abstractmethod
def _new_item(self, i: int) -> ValueType:
pass
@property
def is_sequence(self):
return self._reverse_lookup is None
def index_of(self, item: ValueType) -> int:
if self._reverse_lookup is None:
# pyre-fixme[16]: `ValueType` has no attribute `value`.
int_val = int(item.value)
if 0 <= int_val < len(self._items):
return int_val
else:
raise ValueError(f"{item} is not valid")
elif self._reverse_lookup is not None:
try:
return self._reverse_lookup[item]
except Exception:
raise ValueError(f"{item} is not valid")
else:
raise ValueError(f"{item} is not valid")
def fill(
self,
values: Union[Mapping[ValueType, float], Sequence[float], np.ndarray, Tensor],
) -> Union[Sequence[float], Mapping[ValueType, float]]:
if self._reverse_lookup is None:
if isinstance(values, Mapping):
ds = []
for a in self._items:
if a in values:
ds.append(values[a])
else:
ds.append(0.0)
return ds
else:
ds = [0.0] * len(self._items)
l = min(len(self._items), len(values))
ds[:l] = values[:l]
return ds
elif isinstance(values, Mapping):
ds = {}
for a in self._items:
if a in values:
ds[a] = values[a]
else:
ds[a] = 0.0
return ds
else:
ds = {}
for a in self._items:
try:
ds[a] = values[self._reverse_lookup[a]]
except Exception:
ds[a] = 0.0
return ds
# action type
ActionType = Union[int, Tuple[int], float, Tuple[float], np.ndarray, Tensor]
Action = TypeWrapper[ActionType]
Reward = float
Probability = float
# Action distribution: Action -> probability
# if action can be indexed, the type is either sequence of float or 1-D tensor,
# with the indices being the action
class ActionDistribution(Values[Action]):
def _to_key(self, k: int) -> Action:
return Action(k)
class ActionSpace(Items[Action]):
def _new_item(self, i: int) -> Action:
return Action(i)
@property
def space(self) -> Sequence[Action]:
return self._items
def distribution(
self, dist: Union[Mapping[Action, float], Sequence[float], np.ndarray, Tensor]
) -> ActionDistribution:
return ActionDistribution(super().fill(dist))
class Policy(ABC):
"""
Policy interface
"""
def __init__(self, action_space: ActionSpace, device=None):
self._action_space = action_space
self._device = device
@abstractmethod
def _query(self, context) -> Tuple[Action, ActionDistribution]:
pass
def __call__(self, context) -> Tuple[Action, ActionDistribution]:
return self._query(context)
@property
def action_space(self):
return self._action_space
@dataclass(frozen=True)
class TrainingData:
train_x: Tensor
train_y: Tensor
train_weight: Optional[Tensor]
validation_x: Tensor
validation_y: Tensor
validation_weight: Optional[Tensor]
@dataclass(frozen=True)
class PredictResults:
predictions: Optional[Tensor] # shape = [num_samples]
scores: Tensor # shape = [num_samples]
probabilities: Optional[Tensor] = None
class Trainer(ABC):
def __init__(self):
self._model = None
@staticmethod
def _sample(
x: Tensor,
y: Tensor,
weight: Optional[Tensor] = None,
num_samples: int = 0,
fortran_order: bool = False,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
assert x.shape[0] == y.shape[0]
x_na = x.numpy()
if fortran_order:
x_na = x_na.reshape(x.shape, order="F")
y_na = y.numpy()
w_na = weight.numpy() if weight is not None else None
if num_samples > 0 and num_samples < x.shape[0]:
cs = np.random.choice(x.shape[0], num_samples, replace=False)
x_na = x_na[cs, :]
y_na = y_na[cs]
w_na = w_na[cs] if w_na is not None else None
return x_na, y_na, w_na
def reset(self):
self._model = None
@property
@abstractmethod
def name(self) -> str:
pass
@property
def is_trained(self) -> bool:
return self._model is not None
@abstractmethod
def train(self, data: TrainingData, iterations: int = 1, num_samples: int = 0):
pass
@abstractmethod
def predict(self, x: Tensor, device=None) -> PredictResults:
pass
@abstractmethod
def score(self, x: Tensor, y: Tensor, weight: Optional[Tensor] = None) -> float:
pass
def save_model(self, file: str):
if self._model is None:
logging.error(f"{self.__class__.__name__}.save_model: _model is None ")
return
try:
with open(file, "wb") as f:
pickle.dump(self._model, f, protocol=pickle.HIGHEST_PROTOCOL)
except Exception:
logging.error(f"{file} cannot be accessed.")
def load_model(self, file: str):
try:
logging.info(f"{self.__class__.__name__}.load_model: {file}")
with open(file, "rb") as f:
self._model = pickle.load(f)
except Exception:
logging.error(f"{file} cannot be read.")
| 18,646 | 30.712585 | 88 | py |
ReAgent | ReAgent-master/reagent/ope/estimators/contextual_bandits_estimators.py | #!/usr/bin/env python3
import logging
import time
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Optional, Sequence, Tuple, Union
import numpy as np
import torch
from reagent.ope.estimators.estimator import Estimator, EstimatorResult
from reagent.ope.estimators.types import (
Action,
ActionDistribution,
ActionSpace,
Reward,
Trainer,
TrainingData,
Values,
)
from reagent.ope.utils import Clamper, RunningAverage
from torch import Tensor
logger = logging.getLogger(__name__)
Actions = Union[Sequence[Action], Tensor, np.ndarray]
PROPENSITY_THRESHOLD = 1e-6
class ActionRewards(Values[Action]):
def _to_key(self, k: int) -> Action:
return Action(k)
class BanditsModel(ABC):
@abstractmethod
def _action_rewards(self, context) -> ActionRewards:
"""
Calculate reward for each action based on context
Args:
context: task specific context
Returns: action -> reward map
"""
pass
def __call__(self, context) -> ActionRewards:
return self._action_rewards(context)
@dataclass(frozen=True)
class ModelOutputs:
tgt_reward_from_log_action: Reward
tgt_rewards: Sequence[Reward]
@dataclass(frozen=True)
class LogSample:
# task specific context
context: object
# log
log_action: Action
log_reward: Reward
log_action_probabilities: ActionDistribution
# result from target policy
tgt_action_probabilities: ActionDistribution
tgt_action: Action
model_outputs: Optional[ModelOutputs] = None
ground_truth_reward: Reward = float("nan")
item_feature: Optional[Tensor] = None
@dataclass(frozen=True)
class BanditsEstimatorInput:
action_space: ActionSpace
samples: Sequence[LogSample]
has_model_outputs: bool
class DMEstimator(Estimator):
TRAINING_VALIDATION_SPLIT = 0.8
"""
Estimating using Direct Method (DM), assuming a reward model is trained
"""
def __init__(self, trainer: Optional[Trainer] = None, device=None):
super().__init__(device)
self._trainer = trainer
def _train_model(
self, samples: Sequence[LogSample], force_train: bool = False
) -> bool:
if self._trainer is None:
logger.error("Target model trainer not set")
return False
trainer = self._trainer
assert trainer is not None
if trainer.is_trained and not force_train:
return True
logger.info(" training direct model...")
st = time.perf_counter()
sample_size = len(samples)
training_size = int(sample_size * DMEstimator.TRAINING_VALIDATION_SPLIT)
train_x = []
train_y = []
for i in range(training_size):
sample = samples[i]
if sample.item_feature is None:
continue
train_x.append(
torch.cat(
(
torch.tensor(
sample.log_action.value, dtype=torch.float
).flatten(),
sample.item_feature.flatten(),
)
)
)
train_y.append(sample.log_reward)
if len(train_x) == 0:
logger.error("Item features not provided, DM is not available")
return False
train_x = torch.stack(train_x)
train_y = torch.tensor(train_y, dtype=torch.float, device=train_x.device)
vali_x = []
vali_y = []
for i in range(training_size, sample_size):
sample = samples[i]
if sample.item_feature is None:
continue
vali_x.append(
torch.cat(
(
torch.tensor(
sample.log_action.value, dtype=torch.float
).flatten(),
sample.item_feature.flatten(),
)
)
)
vali_y.append(sample.log_reward)
if len(vali_x) == 0:
vali_x = train_x.detach().clone()
vali_y = train_y.detach().clone()
else:
vali_x = torch.stack(vali_x)
vali_y = torch.tensor(vali_y, dtype=torch.float, device=vali_x.device)
training_data = TrainingData(train_x, train_y, None, vali_x, vali_y, None)
trainer.train(training_data)
logger.info(f" training direct model done: {time.perf_counter() - st}s")
return True
def _calc_dm_reward(
self, action_space: ActionSpace, sample: LogSample
) -> Tuple[Optional[Reward], torch.Tensor, torch.Tensor]:
if sample.model_outputs is not None:
return (
sample.model_outputs.tgt_reward_from_log_action,
torch.tensor(
sample.model_outputs.tgt_rewards,
dtype=torch.float,
device=self._device,
),
torch.tensor(
# pyre-fixme[16]: `ActionDistribution` has no attribute `_values`.
sample.tgt_action_probabilities._values,
dtype=torch.float,
device=self._device,
),
)
trainer = self._trainer
if trainer is None or not trainer.is_trained:
return 0.0, torch.zeros(), torch.zeros()
assert sample.item_feature is not None
item_feature = sample.item_feature.flatten()
features = []
probs = []
idx = -1
for action in action_space:
if idx < 0 and action == sample.log_action:
idx = len(features)
features.append(
torch.cat(
(
torch.tensor(action.value, dtype=torch.float).flatten(),
item_feature,
)
)
)
probs.append(sample.tgt_action_probabilities[action])
preds = trainer.predict(torch.stack(features).float(), device=self._device)
return (
preds.scores[idx].item(),
preds.scores,
torch.tensor(probs, dtype=torch.float, device=self._device),
)
def _evaluate(
self,
input: BanditsEstimatorInput,
train_samples: Sequence[LogSample],
eval_samples: Sequence[LogSample],
force_train: bool = False,
**kwargs,
) -> Optional[EstimatorResult]:
logger.info("OPE DM Evaluating")
if (
not self._train_model(train_samples, force_train)
and not input.has_model_outputs
):
return None
log_avg = RunningAverage()
tgt_avg = RunningAverage()
tgt_vals = []
gt_avg = RunningAverage()
for sample in eval_samples:
log_avg.add(sample.log_reward)
_, tgt_scores, tgt_probs = self._calc_dm_reward(input.action_space, sample)
tgt_reward = torch.dot(tgt_scores.reshape(-1), tgt_probs.reshape(-1)).item()
tgt_avg.add(tgt_reward)
tgt_vals.append(tgt_reward)
gt_avg.add(sample.ground_truth_reward)
(
tgt_score_normalized,
tgt_std_err,
tgt_std_err_normalized,
) = self._compute_metric_data(torch.tensor(tgt_vals), log_avg.average)
return EstimatorResult(
log_reward=log_avg.average,
estimated_reward=tgt_avg.average,
ground_truth_reward=gt_avg.average,
estimated_weight=tgt_avg.count,
estimated_reward_normalized=tgt_score_normalized,
estimated_reward_std_error=tgt_std_err,
estimated_reward_normalized_std_error=tgt_std_err_normalized,
)
@staticmethod
def _calc_optional_avg(a: Optional[float], b: Optional[float]) -> Optional[float]:
# Annoying but Pyre would only take it like this
return None if a is None else (None if b is None else (a + b) / 2)
def evaluate(
self, input: BanditsEstimatorInput, **kwargs
) -> Optional[EstimatorResult]:
if input.has_model_outputs:
return self._evaluate(
input, input.samples, input.samples, force_train=True, **kwargs
)
log_avg = RunningAverage()
gt_avg = RunningAverage()
for sample in input.samples:
log_avg.add(sample.log_reward)
gt_avg.add(sample.ground_truth_reward)
# 2-fold cross "validation" as used by https://arxiv.org/pdf/1612.01205.pdf
shuffled = list(input.samples)
np.random.shuffle(shuffled)
lower_half = shuffled[: len(shuffled) // 2]
upper_half = shuffled[len(shuffled) // 2 :]
er_lower = self._evaluate(
input, lower_half, upper_half, force_train=True, **kwargs
)
er_upper = self._evaluate(
input, upper_half, lower_half, force_train=True, **kwargs
)
if er_lower is None or er_upper is None:
return None
return EstimatorResult(
log_reward=log_avg.average,
estimated_reward=(
(er_lower.estimated_reward + er_upper.estimated_reward) / 2
),
estimated_reward_normalized=(
DMEstimator._calc_optional_avg(
er_lower.estimated_reward_normalized,
er_upper.estimated_reward_normalized,
)
),
estimated_reward_normalized_std_error=(
DMEstimator._calc_optional_avg(
er_lower.estimated_reward_normalized_std_error,
er_upper.estimated_reward_normalized_std_error,
)
),
estimated_reward_std_error=(
DMEstimator._calc_optional_avg(
er_lower.estimated_reward_std_error,
er_upper.estimated_reward_std_error,
)
),
ground_truth_reward=gt_avg.average,
)
def __repr__(self):
return f"DMEstimator(trainer({None if self._trainer is None else self._trainer.name},device({self._device}))"
class IPSEstimator(Estimator):
"""
Inverse Propensity Scoring (IPS) estimator
"""
def __init__(
self,
weight_clamper: Optional[Clamper] = None,
weighted: bool = False,
device=None,
):
super().__init__(device)
self._weight_clamper = Clamper() if weight_clamper is None else weight_clamper
self._weighted = weighted
def evaluate(
self, input: BanditsEstimatorInput, **kwargs
) -> Optional[EstimatorResult]:
logger.info("OPE IPS Evaluating")
log_avg = RunningAverage()
logged_vals = []
tgt_avg = RunningAverage()
tgt_vals = []
acc_weight = RunningAverage()
gt_avg = RunningAverage()
for sample in input.samples:
log_avg.add(sample.log_reward)
logged_vals.append(sample.log_reward)
weight = 0.0
tgt_result = 0.0
if sample.log_action.value is not None:
weight = (
0.0
if sample.log_action_probabilities[sample.log_action]
< PROPENSITY_THRESHOLD
else sample.tgt_action_probabilities[sample.log_action]
/ sample.log_action_probabilities[sample.log_action]
)
weight = self._weight_clamper(weight)
tgt_result = sample.log_reward * weight
tgt_avg.add(tgt_result)
tgt_vals.append(tgt_result)
acc_weight.add(weight)
gt_avg.add(sample.ground_truth_reward)
(
tgt_score_normalized,
tgt_std_err,
tgt_std_err_normalized,
) = self._compute_metric_data(torch.tensor(tgt_vals), log_avg.average)
return EstimatorResult(
log_reward=log_avg.average,
estimated_reward=tgt_avg.average
if not self._weighted
else tgt_avg.average / acc_weight.total,
ground_truth_reward=gt_avg.average,
estimated_weight=tgt_avg.count,
estimated_reward_normalized=tgt_score_normalized,
estimated_reward_std_error=tgt_std_err,
estimated_reward_normalized_std_error=tgt_std_err_normalized,
)
def __repr__(self):
return (
f"IPSEstimator(weight_clamper({self._weight_clamper})"
f",weighted({self._weighted}),device({self._device}))"
)
class DoublyRobustEstimator(DMEstimator):
"""
Doubly Robust (DR) estimator:
reference: https://arxiv.org/abs/1103.4601 (deterministic reward model)
https://arxiv.org/abs/1612.01205 (distributed reward model)
"""
def __init__(
self,
trainer: Optional[Trainer] = None,
weight_clamper: Optional[Clamper] = None,
device=None,
):
super().__init__(trainer, device)
self._weight_clamper = Clamper() if weight_clamper is None else weight_clamper
def _evaluate(
self,
input: BanditsEstimatorInput,
train_samples: Sequence[LogSample],
eval_samples: Sequence[LogSample],
force_train: bool = False,
**kwargs,
) -> Optional[EstimatorResult]:
logger.info("OPE DR Evaluating")
self._train_model(train_samples, force_train)
log_avg = RunningAverage()
tgt_avg = RunningAverage()
tgt_vals = []
gt_avg = RunningAverage()
for sample in eval_samples:
log_avg.add(sample.log_reward)
dm_action_reward, dm_scores, dm_probs = self._calc_dm_reward(
input.action_space, sample
)
dm_reward = torch.dot(dm_scores.reshape(-1), dm_probs.reshape(-1)).item()
tgt_result = 0.0
weight = 0.0
if sample.log_action.value is not None:
weight = (
0.0
if sample.log_action_probabilities[sample.log_action]
< PROPENSITY_THRESHOLD
else sample.tgt_action_probabilities[sample.log_action]
/ sample.log_action_probabilities[sample.log_action]
)
weight = self._weight_clamper(weight)
assert dm_action_reward is not None
assert dm_reward is not None
tgt_result += (
sample.log_reward - dm_action_reward
) * weight + dm_reward
else:
tgt_result = dm_reward
tgt_avg.add(tgt_result)
tgt_vals.append(tgt_result)
gt_avg.add(sample.ground_truth_reward)
(
tgt_score_normalized,
tgt_std_err,
tgt_std_err_normalized,
) = self._compute_metric_data(torch.tensor(tgt_vals), log_avg.average)
return EstimatorResult(
log_reward=log_avg.average,
estimated_reward=tgt_avg.average,
ground_truth_reward=gt_avg.average,
estimated_weight=tgt_avg.count,
estimated_reward_normalized=tgt_score_normalized,
estimated_reward_std_error=tgt_std_err,
estimated_reward_normalized_std_error=tgt_std_err_normalized,
)
def __repr__(self):
return (
f"DoublyRobustEstimator(trainer({None if self._trainer is None else self._trainer.name})"
f",weight_clamper({self._weight_clamper}),device({self._device}))"
)
class SwitchEstimator(DMEstimator):
# For details, visit https://arxiv.org/abs/1612.01205 sections 4, 5
CANDIDATES = 21
EXP_BASE = 1.5
EPSILON = 1e-6
def __init__(
self,
trainer: Optional[Trainer] = None,
weight_clamper: Optional[Clamper] = None,
rmax: Optional[Reward] = None,
device=None,
):
"""
rmax is an a priori upper bound on any possible reward.
The tighter the bound, the better the estimator can estimate
its bias. If not provided, the estimator will use the max
reward seen in the sample data.
"""
super().__init__(trainer, device)
self._rmax = rmax
self._weight_clamper = Clamper() if weight_clamper is None else weight_clamper
def _estimate_rmax(self, input: BanditsEstimatorInput) -> Reward:
rmax = float("-inf")
for sample in input.samples:
_, dm_scores, dm_probs = self._calc_dm_reward(input.action_space, sample)
max_sample_r = max(sample.log_reward, torch.max(dm_scores).item())
rmax = max(rmax, max_sample_r)
return rmax
def _calc_weight_reward_tensors(
self, input: BanditsEstimatorInput, eval_samples: Sequence[LogSample]
) -> Tuple[
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
RunningAverage,
RunningAverage,
]:
n = len(eval_samples)
ws = torch.ones((n, len(input.action_space)))
rs = torch.zeros((n, 1))
r_est = torch.zeros((n, len(input.action_space)))
r_est_for_logged_action = torch.zeros((n, 1))
actions = torch.zeros((n, len(input.action_space)))
expected_rmax = torch.zeros((n, len(input.action_space)))
propensities = torch.zeros((n, len(input.action_space)))
log_avg = RunningAverage()
gt_avg = RunningAverage()
priori_rmax = self._estimate_rmax(input) if self._rmax is None else self._rmax
assert priori_rmax is not None
for i, sample in enumerate(eval_samples):
dm_score_for_logged_action, dm_scores, dm_probs = self._calc_dm_reward(
input.action_space, sample
)
for a in input.action_space:
weight = (
0.0
if sample.log_action_probabilities[a] < PROPENSITY_THRESHOLD
else sample.tgt_action_probabilities[a]
/ sample.log_action_probabilities[a]
)
ws[i, a] = self._weight_clamper(weight)
propensities[i, a] = sample.tgt_action_probabilities[a]
expected_rmax[i, a] = sample.tgt_action_probabilities[a] * priori_rmax
actions[i, a] = float(a == sample.log_action)
rs[i, 0] = sample.log_reward
r_est[i] = dm_scores.reshape(-1)
r_est_for_logged_action[i] = dm_score_for_logged_action
log_avg.add(sample.log_reward)
gt_avg.add(sample.ground_truth_reward)
return (
actions,
ws,
rs,
r_est,
r_est_for_logged_action,
propensities,
expected_rmax,
log_avg,
gt_avg,
)
def _calc_estimated_values(
self,
logged_rewards: torch.Tensor,
weights: torch.Tensor,
actions: torch.Tensor,
threshold: float,
est_rewards: torch.Tensor,
est_rewards_for_logged_action: torch.Tensor,
tgt_props: torch.Tensor,
) -> torch.Tensor:
ips_scores = (weights * actions).sum(dim=1, keepdim=True)
return logged_rewards * ips_scores * (ips_scores <= threshold).float() + (
est_rewards * tgt_props * (weights > threshold).float()
).sum(dim=1, keepdim=True)
def _evaluate(
self,
input: BanditsEstimatorInput,
train_samples: Sequence[LogSample],
eval_samples: Sequence[LogSample],
force_train: bool = False,
**kwargs,
) -> Optional[EstimatorResult]:
logger.info("OPE Switch Evaluating")
self._train_model(train_samples, force_train)
if "exp_base" in kwargs:
exp_base = kwargs["exp_base"]
else:
exp_base = SwitchEstimator.EXP_BASE
if "candidates" in kwargs:
num_candidates = kwargs["candidates"]
else:
num_candidates = SwitchEstimator.CANDIDATES
(
actions,
ws,
rs,
r_est,
r_est_for_logged_action,
propensities,
expected_rmax,
log_avg,
gt_avg,
) = self._calc_weight_reward_tensors(input, eval_samples)
min_w, max_w = float(torch.min(ws).item()), float(torch.max(ws).item())
diff = max_w - min_w
# The threshold lies in the range [min ips, max ips]
# Picking a small threshold -> using mainly the model-based estimator
# Picking a large threshold -> using mainly the ips-based estimator
candidates = [
min_w + ((exp_base ** x) / (exp_base ** (num_candidates - 1))) * diff
for x in range(num_candidates)
]
# This prevents the edge case where nearly all scores being min_w prevents
# switch from trying a purely DM estimate
tau = min_w - SwitchEstimator.EPSILON
loss = float("inf")
for candidate in candidates:
estimated_values = self._calc_estimated_values(
rs, ws, actions, candidate, r_est, r_est_for_logged_action, propensities
)
var = (1.0 / (estimated_values.shape[0] ** 2)) * torch.sum(
(estimated_values - torch.mean(estimated_values)) ** 2
).item()
bias = torch.mean(
torch.sum(expected_rmax * (ws > candidate).float(), dim=1, keepdim=True)
).item()
cand_loss = var + bias * bias
if cand_loss < loss:
tau = candidate
loss = cand_loss
estimated_values = self._calc_estimated_values(
rs, ws, actions, tau, r_est, r_est_for_logged_action, propensities
)
(
tgt_score_normalized,
tgt_std_err,
tgt_std_err_normalized,
) = self._compute_metric_data(estimated_values.detach(), log_avg.average)
return EstimatorResult(
log_reward=log_avg.average,
estimated_reward=torch.mean(estimated_values).item(),
ground_truth_reward=gt_avg.average,
estimated_weight=float(estimated_values.shape[0]),
estimated_reward_normalized=tgt_score_normalized,
estimated_reward_std_error=tgt_std_err,
estimated_reward_normalized_std_error=tgt_std_err_normalized,
)
def __repr__(self):
return (
f"SwitchEstimator(trainer({None if self._trainer is None else self._trainer.name})"
f",weight_clamper({self._weight_clamper}),device({self._device}))"
)
class SwitchDREstimator(SwitchEstimator):
# For details, visit https://arxiv.org/abs/1612.01205 sections 4, 5
def _calc_estimated_values(
self,
logged_rewards: torch.Tensor,
weights: torch.Tensor,
actions: torch.Tensor,
threshold: float,
est_rewards: torch.Tensor,
est_rewards_for_logged_action: torch.Tensor,
tgt_props: torch.Tensor,
) -> torch.Tensor:
ips_scores = (weights * actions).sum(dim=1, keepdim=True)
dr = ips_scores * (logged_rewards - est_rewards_for_logged_action) + (
tgt_props * est_rewards
).sum(dim=1, keepdim=True)
return dr * (ips_scores <= threshold).float() + (
est_rewards * tgt_props * (weights > threshold).float()
).sum(dim=1, keepdim=True)
def __repr__(self):
return (
f"SwitchDREstimator(trainer({None if self._trainer is None else self._trainer.name})"
f",weight_clamper({self._weight_clamper}),device({self._device}))"
)
| 23,956 | 34.971471 | 117 | py |
ReAgent | ReAgent-master/reagent/ope/estimators/estimator.py | #!/usr/bin/env python3
import logging
import math
import pickle
import tempfile
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from multiprocessing import Pool
from typing import Iterable, List, Mapping, Optional, Tuple, Union
import torch
from reagent.evaluation.cpe import bootstrapped_std_error_of_mean
from torch import Tensor
logger = logging.getLogger(__name__)
SCORE_THRESHOLD = 1e-6
class ResultDiffs:
"""
Statistics for differences, e.g., estimates vs ground truth
"""
def __init__(self, diffs: Tensor):
self._diffs = diffs
self._rmse = None
self._bias = None
self._variance = None
@property
def rmse(self) -> Tensor:
if self._rmse is None:
self._rmse = (self._diffs ** 2.0).mean().sqrt()
return self._rmse
@property
def bias(self) -> Tensor:
if self._bias is None:
self._bias = self._diffs.mean()
return self._bias
@property
def variance(self) -> Tensor:
if self._variance is None:
# pyre-fixme[16]: `Tensor` has no attribute `var`.
self._variance = self._diffs.var()
return self._variance
def __repr__(self):
return (
f"samples={self._diffs.shape[0]}, rmse={self.rmse.item()}"
f", bias={self.bias}, variance={self.variance}"
)
@dataclass(frozen=True)
class EstimatorResult:
log_reward: float
estimated_reward: float
ground_truth_reward: Optional[float] = 0.0
estimated_weight: float = 1.0
estimated_reward_normalized: Optional[float] = None
estimated_reward_std_error: Optional[float] = None
estimated_reward_normalized_std_error: Optional[float] = None
@dataclass
class EstimatorResults:
"""
Estimator results
"""
results: List[EstimatorResult] = field(default_factory=list)
device = None
def append(self, result: EstimatorResult):
"""Append a data point
Args:
result: result from an experimental run
"""
er = result.estimated_reward
if math.isnan(er) or math.isinf(er):
logging.warning(f" Invalid estimate: {er}")
return
lr = result.log_reward
gr = (
result.ground_truth_reward
if result.ground_truth_reward is not None
else 0.0
)
logging.info(
f" Append estimate [{len(self.results) + 1}]: "
f"log={lr}, estimated={er}, ground_truth={gr}"
)
self.results.append(
EstimatorResult(
log_reward=result.log_reward,
estimated_reward=result.estimated_reward,
ground_truth_reward=gr,
estimated_weight=result.estimated_weight,
)
)
def report(self):
ert = torch.tensor(
[res.estimated_reward for res in self.results],
dtype=torch.double,
device=self.device,
)
lrt = torch.tensor(
[res.log_reward for res in self.results],
dtype=torch.double,
device=self.device,
)
grt = torch.tensor(
[
res.ground_truth_reward if res.ground_truth_reward is not None else 0.0
for res in self.results
],
dtype=torch.double,
device=self.device,
)
self._estimated_log_diff = ResultDiffs(ert - lrt)
self._estimated_ground_truth_diff = ResultDiffs(ert - grt)
return (
lrt.mean().item(),
ert.mean().item(),
grt.mean().item(),
ResultDiffs(ert - grt),
ResultDiffs(ert - lrt),
torch.tensor([float(res.estimated_weight) for res in self.results])
.mean()
.item(),
)
@dataclass(frozen=True)
class EstimatorSampleResult:
log_reward: float
target_reward: float
ground_truth_reward: float
weight: float
def __repr__(self):
return (
f"EstimatorSampleResult(log={self.log_reward}"
f",tgt={self.target_reward},gt={self.ground_truth_reward}"
f",wgt={self.weight}"
)
class Estimator(ABC):
"""
Estimator interface
"""
def __init__(self, device=None):
self._device = device
def _compute_metric_data(
self, tgt_rewards: Tensor, logged_score: float
) -> Tuple[float, float, float]:
"""
Given a sequence of scores, normalizes the target score by the average logged score
and computes the standard error of the target score. Normalizing by the logged score
can provide a better metric to compare models against.
"""
if len(tgt_rewards.shape) > 1:
assert tgt_rewards.shape[1] == 1
tgt_rewards = tgt_rewards.reshape((tgt_rewards.shape[0],))
if logged_score < SCORE_THRESHOLD:
normalizer = 0.0
else:
normalizer = 1.0 / logged_score
std_err = bootstrapped_std_error_of_mean(tgt_rewards)
return (
torch.mean(tgt_rewards).item() * normalizer,
std_err,
std_err * normalizer,
)
@abstractmethod
def evaluate(
self, input, **kwargs
) -> Optional[Union[EstimatorResult, EstimatorResults]]:
pass
def __repr__(self):
return f"{self.__class__.__name__}(device({self._device}))"
def run_evaluation(
file_name: str,
) -> Optional[Mapping[str, Iterable[EstimatorResults]]]:
logger.info(f"received filename {file_name}")
try:
with open(file_name, "rb") as fp:
estimators, inputs = pickle.load(fp)
except Exception as err:
return None
results = {}
for estimator in estimators:
estimator_name = repr(estimator)
estimator_results = []
for input in inputs:
try:
estimator_results.append(estimator.evaluate(input))
except Exception as err:
logger.error(f"{estimator_name} error {err}")
results[repr(estimator)] = estimator_results
return results
class Evaluator:
"""
Multiprocessing evaluator
"""
def __init__(
self,
experiments: Iterable[Tuple[Iterable[Estimator], object]],
max_num_workers: int,
):
"""
Args:
estimators: estimators to be evaluated
experiments:
max_num_workers: <= 0 no multiprocessing
otherwise create max_num_workers processes
"""
self._experiments = experiments
self._tasks = None
if max_num_workers > 0:
self._tasks = [[] for _ in range(max_num_workers)]
for i, experiment in enumerate(experiments):
self._tasks[i % max_num_workers].append(experiment)
def evaluate(self) -> Mapping[str, EstimatorResults]:
results = {}
if self._tasks is None:
for estimators, input in self._experiments:
for estimator in estimators:
estimator_name = repr(estimator)
if estimator_name in results:
result = results[estimator_name]
else:
result = EstimatorResults()
results[estimator_name] = result
result.append(estimator.evaluate(input))
else:
tmp_files = []
tmp_file_names = []
for task in self._tasks:
fp = tempfile.NamedTemporaryFile()
pickle.dump(task, fp, protocol=pickle.HIGHEST_PROTOCOL)
fp.flush()
tmp_files.append(fp)
tmp_file_names.append(fp.name)
with Pool(len(tmp_file_names)) as pool:
evaluation_results = pool.map(run_evaluation, tmp_file_names)
for tmp_file in tmp_files:
tmp_file.close()
for evaluation_result in evaluation_results:
if evaluation_result is None:
continue
for estimator_name, estimator_results in evaluation_result.items():
if estimator_name in results:
result = results[estimator_name]
else:
result = EstimatorResults()
results[estimator_name] = result
for estimator_result in estimator_results:
result.append(estimator_result)
return results
@staticmethod
def report_results(results: Mapping[str, EstimatorResults]):
for name, result in results.items():
log_r, tgt_r, gt_r, tgt_gt, tgt_log, weight = result.report()
print(
f"{name} rewards: log_reward{log_r} tgt_reward[{tgt_r}] gt_reward[{gt_r}]"
f", diffs: tgt-gt[{tgt_gt}] tgt-log[{tgt_log}]",
flush=True,
)
| 9,051 | 30.430556 | 92 | py |
ReAgent | ReAgent-master/reagent/ope/test/mslr_slate.py | #!/usr/bin/env python3
import argparse
import itertools
import json
import logging
import os
import pickle
import random
import sys
import time
from collections import OrderedDict
from typing import Iterable, List, Optional, Tuple
import numpy as np
import torch
import torch.multiprocessing as mp
from reagent.ope.estimators.estimator import Evaluator
from reagent.ope.estimators.slate_estimators import (
DCGSlateMetric,
DMEstimator,
DoublyRobustEstimator,
ERRSlateMetric,
IPSEstimator,
LogSample,
NDCGSlateMetric,
PassThruDistribution,
PBMEstimator,
PseudoInverseEstimator,
RankingDistribution,
RewardDistribution,
SlateContext,
SlateEstimator,
SlateEstimatorInput,
SlateItemFeatures,
SlateItemValues,
SlateModel,
SlateQuery,
SlateSlots,
)
from reagent.ope.estimators.types import Trainer, TrainingData
from reagent.ope.trainers.linear_trainers import DecisionTreeTrainer, LassoTrainer
from reagent.ope.utils import Clamper
from torch import Tensor
# Slate test using Microsoft Learning to Rank Datasets (MSLR-WEB30K):
# https://www.microsoft.com/en-us/research/project/mslr/
class MSLRDatasets:
def __init__(
self,
params,
num_columns: int,
anchor_url_features: List[int],
body_features: List[int],
dataset_name: str = "",
device=None,
):
if "folder" not in params:
raise Exception('Please define "folder" in "dataset"')
self._folder = params["folder"]
self._source_file = (
params["source_file"] if "source_file" in params else ["train.txt"]
)
self._cache_file = params["cache_file"] if "cache_file" in params else ""
self._num_columns = num_columns
self._anchor_url_features = anchor_url_features
self._body_features = body_features
self._device = device
self._dict = None
self._queries = None
self._features = None
self._relevances = None
self._sample_weights = None
self._train_data = None
self._validation_data = None
self._test_data = None
self._name = dataset_name
@property
def name(self) -> str:
return self._name
def _add(self, qid: Optional[int], feature_list: List[Tuple[float, Tensor]]):
if qid is None or len(feature_list) == 0:
return
if qid in self._dict:
self._dict[qid].extend(feature_list)
else:
self._dict[qid] = feature_list
def load(self):
pickle_file = os.path.join(self._folder, self._cache_file)
if len(self._cache_file) > 0 and os.access(pickle_file, os.R_OK):
logging.info(f"loading {pickle_file}")
with open(pickle_file, "rb") as f:
self._queries, self._features, self._relevances = pickle.load(f)
self._cache_file = ""
del f
else:
self._dict = OrderedDict()
text_file = os.path.join(self._folder, self._source_file)
logging.info(f"loading {text_file}")
if not os.access(text_file, os.R_OK):
logging.warning(f"{text_file} cannot be accessed.")
return
with open(text_file, "r") as f:
c = 0
st = time.process_time()
# feature index starts with 1, so leave features[0] as padding
features = list(range(self._num_columns - 1))
features_list = []
prev_qid = None
for line in f.readlines():
tokens = line.strip().split()
if len(tokens) != self._num_columns:
continue
rel = int(tokens[0])
qid = int(tokens[1].split(":")[1])
for i in range(2, self._num_columns):
feature = tokens[i].split(":")
features[i - 1] = float(feature[1])
f_tensor = torch.tensor(features, device=self._device)
if prev_qid is None:
prev_qid = qid
features_list.append((rel, f_tensor))
elif prev_qid != qid:
self._add(prev_qid, features_list)
prev_qid = qid
features_list = []
features_list.append((rel, f_tensor))
else:
features_list.append((rel, f_tensor))
c += 1
if c % 100000 == 0:
print(f"{c} - {(time.process_time() - st) / c}")
self._add(prev_qid, features_list)
def save(self):
if len(self._cache_file) == 0 or self._dict is None:
return
pickle_file = os.path.join(self._folder, self._cache_file)
try:
with open(pickle_file, "wb") as f:
self._load_features()
pickle.dump(
(self.queries, self._features, self.relevances),
f,
protocol=pickle.HIGHEST_PROTOCOL,
)
except Exception:
logging.error(f"{pickle_file} cannot be accessed.")
@property
def queries(self) -> Tensor:
if self._queries is None:
rows = []
c = 0
for i in self._dict.items():
s = len(i[1])
rows.append([i[0], c, s])
c += s
self._queries = torch.tensor(rows, dtype=torch.int, device=self._device)
return self._queries
def _load_features(self):
if self._features is None:
self._features = torch.stack([r[1] for v in self._dict.values() for r in v])
@property
def features(self) -> Tensor:
self._load_features()
return self._features[:, 1:]
@property
def all_features(self) -> Tensor:
return self.features
@property
def anchor_url_features(self) -> Tensor:
self._load_features()
return (
self._features[:, self._anchor_url_features]
if self._anchor_url_features is not None
else None
)
@property
def body_features(self) -> Tensor:
self._load_features()
return (
self._features[:, self._body_features]
if self._body_features is not None
else None
)
@property
def relevances(self) -> Tensor:
if self._relevances is None:
self._relevances = torch.tensor(
[r[0] for r in itertools.chain(self._dict.values())],
device=self._device,
)
return self._relevances
@property
def sample_weights(self) -> Tensor:
if self._sample_weights is None:
samples = self.queries[:, 2]
self._sample_weights = torch.repeat_interleave(
samples.to(dtype=torch.float).reciprocal(), samples.to(dtype=torch.long)
)
return self._sample_weights
@property
def folder(self) -> str:
return self._folder
@property
def source_file(self) -> List[str]:
return self._source_file
@property
def cache_file(self) -> str:
return self._cache_file
def train(
trainer: Trainer,
train_dataset: MSLRDatasets,
vali_dataset: MSLRDatasets,
prefix: str = "",
):
logging.info("training all features...")
st = time.process_time()
training_data = TrainingData(
train_dataset.all_features,
train_dataset.relevances,
train_dataset.sample_weights,
vali_dataset.all_features,
vali_dataset.relevances,
vali_dataset.sample_weights,
)
trainer.train(training_data)
logging.info(f" training time: {time.process_time() - st}")
trainer.save_model(
os.path.join(
train_dataset.folder, trainer.name + "_" + prefix + "_all_features.pickle"
)
)
logging.info("scoring...")
score = trainer.score(
vali_dataset.all_features, vali_dataset.relevances, vali_dataset.sample_weights
)
logging.info(f" score: {score}")
logging.info("training anchor_url features...")
st = time.process_time()
trainer.train(
TrainingData(
train_dataset.anchor_url_features,
train_dataset.relevances,
train_dataset.sample_weights,
vali_dataset.anchor_url_features,
vali_dataset.relevances,
vali_dataset.sample_weights,
)
)
logging.info(f" training time: {time.process_time() - st}")
trainer.save_model(
os.path.join(
train_dataset.folder,
trainer.name + "_" + prefix + "_anchor_url_features.pickle",
)
)
logging.info("scoring...")
score = trainer.score(
vali_dataset.anchor_url_features,
vali_dataset.relevances,
vali_dataset.sample_weights,
)
logging.info(f" score: {score}")
logging.info("training body features...")
st = time.process_time()
trainer.train(
TrainingData(
train_dataset.body_features,
train_dataset.relevances,
train_dataset.sample_weights,
vali_dataset.body_features,
vali_dataset.relevances,
vali_dataset.sample_weights,
)
)
logging.info(f" training time: {time.process_time() - st}")
trainer.save_model(
os.path.join(
train_dataset.folder, trainer.name + "_" + prefix + "_body_features.pickle"
)
)
logging.info("scoring...")
score = trainer.score(
vali_dataset.body_features, vali_dataset.relevances, vali_dataset.sample_weights
)
logging.info(f" score: {score}")
def load_dataset(
params, num_columns, anchor_url_features, body_features, dataset_name=""
) -> MSLRDatasets:
logging.info(f"loading {params['source_file']}")
dataset = MSLRDatasets(
params, num_columns, anchor_url_features, body_features, dataset_name
)
st = time.process_time()
dataset.load()
logging.info(f" load time: {time.process_time() - st}")
st = time.process_time()
dataset.save()
logging.info(f" save time: {time.process_time() - st}")
logging.info(
f" queries: {dataset.queries.shape}"
f", features: {dataset.features.shape}"
f", sample_weights: {dataset.sample_weights.shape}"
f", relevance: {dataset.relevances.shape}"
f", anchor_url: {dataset.anchor_url_features.shape}"
f", body: {dataset.body_features.shape}"
)
return dataset
def train_all(train_dataset, vali_dataset, prefix: str = ""):
# train(DecisionTreeClassifierTrainer(), train_dataset, vali_dataset)
train(DecisionTreeTrainer(), train_dataset, vali_dataset, prefix)
train(LassoTrainer(), train_dataset, vali_dataset, prefix)
# train(LogisticRegressionTrainer(), train_dataset, vali_dataset)
# train(SGDClassifierTrainer(), train_dataset, vali_dataset)
def train_models(params):
all_dataset = load_dataset(
params["all_set"], num_columns, anchor_url_features, body_features
)
half_dataset = load_dataset(
params["first_set"], num_columns, anchor_url_features, body_features
)
vali_dataset = load_dataset(
params["vali_set"], num_columns, anchor_url_features, body_features
)
train_all(all_dataset, vali_dataset, "all")
train_all(half_dataset, vali_dataset, "half")
class MSLRModel(SlateModel):
def __init__(self, relevances: Tensor, device=None):
self._relevances = relevances
self._device = device
def item_relevances(self, context: SlateContext) -> Tensor:
qv = context.query.value
if context.params is None:
relevances = self._relevances[qv[1] : (qv[1] + qv[2])].detach().clone()
else:
relevances = (
self._relevances[qv[1] : (qv[1] + qv[2])][context.params]
.detach()
.clone()
)
return relevances
def item_rewards(self, context: SlateContext) -> SlateItemValues:
return SlateItemValues(self.item_relevances(context))
def evaluate(
experiments: Iterable[Tuple[Iterable[SlateEstimator], int]],
dataset: MSLRDatasets,
slate_size: int,
item_size: int,
metric_func: str,
log_trainer: Trainer,
log_distribution: RewardDistribution,
log_features: str,
tgt_trainer: Trainer,
tgt_distribution: RewardDistribution,
tgt_features: str,
dm_features: str,
max_num_workers: int,
device=None,
):
assert slate_size < item_size
print(
f"Evaluate All:"
f" slate_size={slate_size}, item_size={item_size}, metric={metric_func}"
f", Log=[{log_trainer.name}, {log_distribution}, {log_features}]"
f", Target=[{tgt_trainer.name}, {tgt_distribution}, {tgt_features}]"
f", DM=[{dm_features}]"
f", Workers={max_num_workers}, device={device}",
flush=True,
)
logging.info("Preparing models and policies...")
st = time.perf_counter()
log_trainer.load_model(
os.path.join(
dataset.folder, log_trainer.name + "_all_" + log_features + ".pickle"
)
)
# calculate behavior model scores
log_pred = log_trainer.predict(getattr(dataset, log_features))
tgt_trainer.load_model(
os.path.join(
dataset.folder, tgt_trainer.name + "_all_" + tgt_features + ".pickle"
)
)
# calculate target model scores
tgt_pred = tgt_trainer.predict(getattr(dataset, tgt_features))
dm_train_features = getattr(dataset, dm_features)
slots = SlateSlots(slate_size)
dt = time.perf_counter() - st
logging.info(f"Preparing models and policies done: {dt}s")
total_samples = 0
for _, num_samples in experiments:
total_samples += num_samples
logging.info(f"Generating log: total_samples={total_samples}")
st = time.perf_counter()
tasks = []
samples_generated = 0
total_queries = dataset.queries.shape[0]
for estimators, num_samples in experiments:
samples = []
for _ in range(num_samples):
# randomly sample a query
q = dataset.queries[random.randrange(total_queries)]
doc_size = int(q[2])
if doc_size < item_size:
# skip if number of docs is less than item_size
continue
si = int(q[1])
ei = si + doc_size
# using top item_size docs for logging
log_scores, item_choices = log_pred.scores[si:ei].sort(
dim=0, descending=True
)
log_scores = log_scores[:item_size]
item_choices = item_choices[:item_size]
log_item_probs = log_distribution(SlateItemValues(log_scores))
tgt_scores = tgt_pred.scores[si:ei][item_choices].detach().clone()
tgt_item_probs = tgt_distribution(SlateItemValues(tgt_scores))
tgt_slot_expectation = tgt_item_probs.slot_item_expectations(slots)
gt_item_rewards = SlateItemValues(dataset.relevances[si:ei][item_choices])
gt_rewards = tgt_slot_expectation.expected_rewards(gt_item_rewards)
if metric_func == "dcg":
metric = DCGSlateMetric(device=device)
elif metric_func == "err":
metric = ERRSlateMetric(4.0, device=device)
else:
metric = NDCGSlateMetric(gt_item_rewards, device=device)
query = SlateQuery((si, ei))
context = SlateContext(query, slots, item_choices)
slot_weights = metric.slot_weights(slots)
gt_reward = metric.calculate_reward(slots, gt_rewards, None, slot_weights)
if tgt_item_probs.is_deterministic:
tgt_slate_prob = 1.0
log_slate = tgt_item_probs.sample_slate(slots)
log_reward = gt_reward
else:
tgt_slate_prob = float("nan")
log_slate = log_item_probs.sample_slate(slots)
log_rewards = log_slate.slot_values(gt_item_rewards)
log_reward = metric.calculate_reward(
slots, log_rewards, None, slot_weights
)
log_slate_prob = log_item_probs.slate_probability(log_slate)
item_features = SlateItemFeatures(dm_train_features[si:ei][item_choices])
sample = LogSample(
context,
metric,
log_slate,
log_reward,
log_slate_prob,
None,
log_item_probs,
tgt_slate_prob,
None,
tgt_item_probs,
gt_reward,
slot_weights,
None,
item_features,
)
samples.append(sample)
samples_generated += 1
if samples_generated % 1000 == 0:
logging.info(
f" samples generated: {samples_generated}, {100 * samples_generated / total_samples:.1f}%"
)
tasks.append((estimators, SlateEstimatorInput(samples)))
dt = time.perf_counter() - st
logging.info(f"Generating log done: {total_samples} samples in {dt}s")
logging.info("start evaluating...")
st = time.perf_counter()
evaluator = Evaluator(tasks, max_num_workers)
Evaluator.report_results(evaluator.evaluate())
logging.info(f"evaluating done in {time.perf_counter() - st}s")
if __name__ == "__main__":
mp.set_start_method("spawn")
logging.basicConfig(
format="%(asctime)-15s_%(levelname)s: %(message)s", level=logging.INFO
)
logging.info(f"working dir - {os.getcwd()}")
random.seed(1234)
np.random.seed(1234)
torch.random.manual_seed(1234)
parser = argparse.ArgumentParser(description="Read command line parameters.")
parser.add_argument("-p", "--parameters", help="Path to config file.")
args = parser.parse_args(sys.argv[1:])
with open(args.parameters, "r") as f:
params = json.load(f)
if "train_set" not in params:
logging.error('"train_set" not defined')
exit(1)
if "vali_set" not in params:
logging.error('"vali_set" not defined')
exit(1)
if "test_set" not in params:
logging.error('"test_set" not defined')
exit(1)
# device = torch.device("cuda") if torch.cuda.is_available() else None
device = None
num_columns = params["num_columns"] if "num_columns" in params else 138
anchor_url_features = (
params["anchor_url_features"] if "anchor_url_features" in params else None
)
body_features = params["body_features"] if "body_features" in params else None
# uncomment to train behavior and target models
# train_models(params)
test_dataset = load_dataset(
params["second_set"],
num_columns,
anchor_url_features,
body_features,
"second_set",
)
weight_clamper = Clamper(min_v=0.0)
estimators = [
DMEstimator(DecisionTreeTrainer(), 0.5, device=device),
IPSEstimator(weight_clamper=weight_clamper, device=device),
DoublyRobustEstimator(
DecisionTreeTrainer(), 0.5, weight_clamper, False, device
),
DoublyRobustEstimator(DecisionTreeTrainer(), 0.5, weight_clamper, True, device),
PseudoInverseEstimator(weight_clamper=weight_clamper, device=device),
PBMEstimator(weight_clamper=weight_clamper, device=device),
]
metrics = ["ndcg", "err"]
alphas = [0.0, 1.0, 2.0]
trainers = [
(DecisionTreeTrainer(), LassoTrainer()),
(LassoTrainer(), DecisionTreeTrainer()),
]
for log_trainer, tgt_trainers in trainers:
for metric in metrics:
for alpha in alphas:
evaluate(
[(estimators, 200)] * 4,
test_dataset,
5,
20,
metric,
log_trainer,
RankingDistribution(alpha),
"anchor_url_features",
tgt_trainers,
PassThruDistribution(),
"body_features",
"all_features",
4,
)
| 20,558 | 32.648118 | 111 | py |
ReAgent | ReAgent-master/reagent/ope/test/cartpole.py | #!/usr/bin/env python3
import logging
import gym
import torch
from reagent.ope.estimators.sequential_estimators import (
Action,
ActionDistribution,
ActionSpace,
IPSEstimator,
Model,
NeuralDualDICE,
RandomRLPolicy,
RewardProbability,
RLEstimatorInput,
RLPolicy,
State,
StateDistribution,
Transition,
)
from reagent.ope.utils import RunningAverage
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
NUM_EPISODES = 200
MAX_HORIZON = 250
GAMMA = 0.99
class ComboPolicy(RLPolicy):
# Weighted combination between two given policies
def __init__(self, action_space: ActionSpace, weights, policies):
assert len(weights) == len(policies)
self._weights = weights
self._policies = policies
self._action_space = action_space
def action_dist(self, state: State) -> ActionDistribution:
weighted_policies = [
w * p(state).values for w, p in zip(self._weights, self._policies)
]
weighted = torch.stack(weighted_policies).sum(0)
return self._action_space.distribution(weighted)
class PyTorchPolicy(RLPolicy):
def __init__(self, action_space: ActionSpace, model):
self._action_space = action_space
self._model = model
self._softmax = torch.nn.Softmax(dim=0)
def action_dist(self, state: State) -> ActionDistribution:
self._model.eval()
dist = self._model(torch.tensor(state.value, dtype=torch.float).reshape(1, -1))[
0
]
return self._action_space.distribution(self._softmax(dist))
class EnvironmentModel(torch.nn.Module):
def __init__(self, state_dim, action_dim, hidden_dim, hidden_layers, activation):
super(EnvironmentModel.Network, self).__init__()
self._state_dim = state_dim
self._action_dim = action_dim
self._hidden_dim = hidden_dim
self._hidden_layers = hidden_layers
self._activation = activation
self.layers = []
dim = self._state_dim + self._action_dim
for _ in range(self._hidden_layers):
self.layers.append(torch.nn.Linear(dim, self._hidden_dim))
self.layers.append(self._activation())
dim = self._hidden_dim
# Output is the next state and its reward
self.layers.append(torch.nn.Linear(dim, self._state_dim + 1))
self.model = torch.nn.Sequential(*self.layers)
def forward(self, state: torch.Tensor, action: torch.Tensor):
x = torch.cat((state, action), dim=1)
return self.model(x)
class ModelWrapper(Model):
def __init__(self, model: EnvironmentModel, device=None):
self._model = model
self._device = device
self._model.to(self._device)
def next_state_reward_dist(self, state: State, action: Action) -> StateDistribution:
self._model.eval()
state_reward_tensor = (
self._model(
torch.tensor(state.value, dtype=torch.float)
.reshape(-1, self._model._state_dim)
.to(self._device),
torch.nn.functional.one_hot(
torch.tensor(action.value, dtype=torch.long),
self._model._action_dim,
)
.reshape(-1, self._model._action_dim)
.float()
.to(self._device),
)
.reshape(-1)
.cpu()
)
return {
State(state_reward_tensor[: self._model._state_dim]): RewardProbability(
state_reward_tensor[-1].item()
)
}
def to(self, device):
self._model.to(device)
def generate_logs(episodes: int, max_horizon: int, policy: RLPolicy):
"""
Args:
episodes: number of episodes to generate
max_horizon: max horizon of each episode
policy: RLPolicy which uses real-valued states
"""
log = []
env = gym.make("CartPole-v0")
for _ in range(episodes):
init_state = env.reset()
cur_state = init_state
mdp = []
for _ in range(max_horizon):
action_dist = policy(State(cur_state))
action = action_dist.sample()[0].value
action_prob = action_dist.probability(Action(action))
next_state, _, done, _ = env.step(action)
mdp.append(
Transition(
last_state=State(cur_state),
action=Action(action),
action_prob=action_prob,
state=State(next_state),
reward=1.0,
status=Transition.Status.NORMAL,
)
)
cur_state = next_state
if done:
log.append(mdp)
break
log.append(mdp)
return log
def zeta_nu_loss_callback(losses, estimated_values, input: RLEstimatorInput):
def callback_fn(zeta_loss, nu_loss, estimator):
losses.append((zeta_loss, nu_loss))
estimated_values.append(estimator._compute_estimates(input))
return callback_fn
def estimate_value(episodes: int, max_horizon: int, policy: RLPolicy, gamma: float):
avg = RunningAverage()
env = gym.make("CartPole-v0")
for _ in range(episodes):
init_state = env.reset()
cur_state = init_state
r = 0.0
discount = 1.0
for _ in range(max_horizon):
action_dist = policy(State(cur_state))
action = action_dist.sample()[0].value
next_state, _, done, _ = env.step(action)
reward = 1.0
r += reward * discount
discount *= gamma
if done:
break
cur_state = next_state
avg.add(r)
return avg.average
def run_dualdice_test(model_path: str, alpha: float):
device = torch.device("cuda") if torch.cuda.is_available() else None
logger.info(f"Device - {device}")
model = torch.jit.load(model_path)
model = model.dqn_with_preprocessor.model
random_policy = RandomRLPolicy(ActionSpace(2))
model_policy = PyTorchPolicy(ActionSpace(2), model)
target_policy = ComboPolicy(
ActionSpace(2), [0.7, 0.3], [model_policy, random_policy]
)
behavior_policy = ComboPolicy(
ActionSpace(2),
[0.55 + 0.15 * alpha, 0.45 - 0.15 * alpha],
[model_policy, random_policy],
)
ground_truth = estimate_value(NUM_EPISODES, MAX_HORIZON, target_policy, GAMMA)
log_policy_value = estimate_value(NUM_EPISODES, MAX_HORIZON, behavior_policy, GAMMA)
trained_policy_value = estimate_value(
NUM_EPISODES, MAX_HORIZON, model_policy, GAMMA
)
logger.info(f"Target Policy Ground Truth value: {ground_truth}")
logger.info(f"Behavior Policy Ground Truth value: {log_policy_value}")
logger.info(f"Model Policy Ground Truth value: {trained_policy_value}")
log = generate_logs(NUM_EPISODES, MAX_HORIZON, behavior_policy)
inp = RLEstimatorInput(
gamma=GAMMA, log=log, target_policy=target_policy, discrete_states=False
)
ips = IPSEstimator()
dualdice_losses = []
dualdice_values = []
dualdice = NeuralDualDICE(
state_dim=4,
action_dim=2,
deterministic_env=True,
average_next_v=False,
value_lr=0.003,
zeta_lr=0.003,
batch_size=2048,
reporting_frequency=1000,
training_samples=100000,
loss_callback_fn=zeta_nu_loss_callback(dualdice_losses, dualdice_values, inp),
device=device,
)
ips_result = ips.evaluate(inp)
dd_result = dualdice.evaluate(inp)
return {
"ips_estimate": ips_result,
"dualdice_estimate": dd_result,
"ground_truth": ground_truth,
"dualdice_losses": dualdice_losses,
"dualdice_estimates_per_epoch": dualdice_values,
}
if __name__ == "__main__":
run_dualdice_test(
"/mnt/vol/gfsfblearner-nebraska/flow/data/2020-07-27/a56cd422-794b-4866-9b73-5de95fb65700/207851498_207851498_0.pt",
0.0,
)
| 8,083 | 31.079365 | 124 | py |
ReAgent | ReAgent-master/reagent/ope/test/gridworld.py | #!/usr/bin/env python3
import logging
import random
from typing import Iterable, Optional, Sequence, Tuple
import numpy as np
import torch
from reagent.ope.estimators.sequential_estimators import (
DMEstimator,
DoublyRobustEstimator,
EpsilonGreedyRLPolicy,
IPSEstimator,
MAGICEstimator,
NeuralDualDICE,
RandomRLPolicy,
RewardProbability,
RLEstimatorInput,
State,
StateDistribution,
StateReward,
ValueFunction,
)
from reagent.ope.estimators.types import Action, ActionSpace
from reagent.ope.test.envs import Environment, PolicyLogGenerator
from reagent.ope.trainers.rl_tabular_trainers import (
DPTrainer,
DPValueFunction,
EstimatedStateValueFunction,
TabularPolicy,
)
class GridWorld(Environment):
def __init__(
self,
size: Tuple[int, int],
start: Tuple[int, int],
goal: Tuple[int, int],
max_horizon: int = -1,
walls: Iterable[Tuple[int, int]] = (),
use_taxicab_reward: bool = False,
):
super().__init__(max_horizon)
self.size = size
self.start = start
self.goal = goal
self.walls = set(walls)
self.use_taxicab_reward = use_taxicab_reward
self.reset()
@classmethod
def from_grid(
cls,
grid: Sequence[Sequence[str]],
max_horizon: int = -1,
use_taxicab_reward: bool = False,
):
size = (len(grid), len(grid[0]))
start = (0, 0)
goal = (0, 0)
walls = []
for x, r in enumerate(grid):
for y, c in enumerate(r):
g = c.lower()
if g == "s":
start = (x, y)
elif g == "g":
goal = (x, y)
elif g == "w":
walls += ((x, y),)
return cls(size, start, goal, max_horizon, walls, use_taxicab_reward)
@classmethod
def random_grid(
cls,
length: int,
max_horizon: int = -1,
wall_prob: float = 0.1,
use_taxicab_reward: bool = False,
):
"""
Generates a random grid of size length x length with start = (0, 0) and
goal = (length-1, length-1)
"""
size = (length, length)
start = (0, 0)
goal = (length - 1, length - 1)
walls = []
for r in range(length):
for c in range(length):
if (r, c) == start or (r, c) == goal:
continue
else:
if random.uniform(0, 1) < wall_prob:
walls.append((r, c))
return cls(size, start, goal, max_horizon, walls, use_taxicab_reward)
def reset(self, state: Optional[State] = None):
super().reset(state)
if self._current_state is None:
self._current_state = State(self.start)
return self._current_state
def close(self):
pass
def _validate(self, pos: Tuple[int, int]) -> bool:
return (
0 <= pos[0] < self.size[0]
and 0 <= pos[1] < self.size[1]
and pos not in self.walls
)
def _transit(
self, from_pos: Tuple[int, int], to_pos: Tuple[int, int]
) -> Tuple[Tuple[int, int], float, bool]:
if not self._validate(to_pos):
return from_pos, 0.0, False
elif to_pos == self.goal:
return to_pos, 1.0, True
else:
return (
to_pos,
0.0
if not self.use_taxicab_reward
else np.exp(-2 * self._taxi_distance(to_pos, self.goal) / self.size[0]),
False,
)
def _taxi_distance(
self, from_pos: Tuple[int, int], to_pos: Tuple[int, int]
) -> float:
return abs(from_pos[0] - to_pos[0]) + abs(from_pos[1] - to_pos[1])
def _next_state_reward(self, state: State, action: Action) -> StateReward:
value = state.value
assert isinstance(value, tuple), f"got type {type(value)} instead of tuple"
# pyre-fixme[23]: Unable to unpack single value, 2 were expected.
(x, y) = value
assert isinstance(x, int) and isinstance(
y, int
), "Gridworld expects states to be Tuple[int, int]"
if state.value in self.walls or state.value == self.goal:
return StateReward(State((x, y), state.is_terminal), 0.0)
if action.value == 0:
to_pos, reward, is_end = self._transit((x, y), (x + 1, y))
elif action.value == 1:
to_pos, reward, is_end = self._transit((x, y), (x, y + 1))
elif action.value == 2:
to_pos, reward, is_end = self._transit((x, y), (x - 1, y))
else:
to_pos, reward, is_end = self._transit((x, y), (x, y - 1))
return StateReward(State(to_pos, is_end), reward)
def next_state_reward_dist(self, state: State, action: Action) -> StateDistribution:
sr = self._next_state_reward(state, action)
assert sr.state is not None
return {sr.state: RewardProbability(sr.reward, 1.0)}
@property
def observation_space(self):
return (2,), ((0, self.size[0]), (0, self.size[1]))
@property
def states(self):
for x in range(self.size[0]):
for y in range(self.size[1]):
state = (x, y)
if state != self.goal and state not in self.walls:
yield State((x, y))
def __repr__(self):
dump = ""
for x in range(self.size[0]):
for y in range(self.size[1]):
pos = (x, y)
if pos == self.start:
dump += "\u2b55"
elif pos == self.goal:
dump += "\u2b50"
elif pos in self.walls:
dump += "\u2b1b"
else:
dump += "\u2b1c"
dump += "\n"
return dump
def dump_state_values(self, state_values) -> str:
dump = ""
for x in range(self.size[0]):
for y in range(self.size[1]):
pos = State((x, y))
value = 0.0
if pos in state_values:
value = state_values[pos]
dump += "{:6.3}".format(value)
dump += "\n"
return dump
def dump_value_func(self, valfunc: ValueFunction) -> str:
dump = ""
for x in range(self.size[0]):
for y in range(self.size[1]):
dump += "{:6.3}".format(valfunc(State((x, y))))
dump += "\n"
return dump
def dump_policy(self, policy) -> str:
dump = ""
for x in range(self.size[0]):
for y in range(self.size[1]):
pos = (x, y)
if pos == self.start:
dump += "\u2b28"
elif pos == self.goal:
dump += "\u2b27"
elif pos in self.walls:
dump += "\u2588"
else:
action = policy(State(pos)).greedy()[0]
if action.value == 0:
dump += "\u21e9"
elif action.value == 1:
dump += "\u21e8"
elif action.value == 2:
dump += "\u21e7"
else:
dump += "\u21e6"
dump += "\n"
return dump
class ThomasGridWorld(GridWorld):
"""
GridWorld set up in https://people.cs.umass.edu/~pthomas/papers/Thomas2015c.pdf
"""
def __init__(self):
super().__init__((4, 4), (0, 0), (3, 3), 100)
def _transit(
self, from_pos: Tuple[int, int], to_pos: Tuple[int, int]
) -> Tuple[Tuple[int, int], float, bool]:
if not self._validate(to_pos):
return from_pos, 0.0, False
elif to_pos == (1, 2):
return to_pos, -10.0, False
elif to_pos == (1, 3):
return to_pos, 1.0, False
elif to_pos == self.goal:
return to_pos, 10.0, True
else:
return to_pos, -1.0, False
class NoiseGridWorldModel(Environment):
def __init__(
self,
gridworld: GridWorld,
action_space: ActionSpace,
epsilon: float = 0.1,
max_horizon: int = -1,
):
super().__init__(max_horizon)
self._gridworld = gridworld
self.action_space = action_space
self.epsilon = epsilon
self.noise_prob = epsilon / (len(action_space) - 1)
def reset(self, state: Optional[State] = None):
self._gridworld.reset(state)
self._steps_taken = 0
return self._gridworld.current_state
def close(self):
pass
def next_state_reward_dist(self, state: State, action: Action) -> StateDistribution:
probs = [self.noise_prob] * len(self.action_space)
assert isinstance(
action.value, int
), f"got type {type(action.value)} instead of int"
# pyre-fixme[16]: `int` has no attribute `__setitem__`.
probs[action.value] = 1 - self.epsilon
states = {}
for a in self.action_space:
sr = self._gridworld._next_state_reward(state, a)
if sr.state in states:
rp = states[sr.state]
states[sr.state] = RewardProbability(
rp.reward + sr.reward,
# pyre-fixme[16]: `int` has no attribute `__getitem__`.
rp.prob + probs[a.value],
)
else:
states[sr.state] = RewardProbability(sr.reward, probs[a.value])
return states
@property
def observation_space(self):
return self._gridworld.observation_space
@property
def states(self):
return self._gridworld.states
@property
def current_state(self):
return self._gridworld.current_state
@current_state.setter
def current_state(self, state: Optional[None]):
self._gridworld._current_state = state
GAMMA = 0.9
USE_DP_VALUE_FUNC = True
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
random.seed(1234)
np.random.seed(1234)
torch.random.manual_seed(1234)
device = torch.device("cuda") if torch.cuda.is_available() else None
print(f"device - {device}")
gridworld = GridWorld.random_grid(10, max_horizon=250, use_taxicab_reward=True)
logging.info(f"GridWorld:\n{gridworld}")
action_space = ActionSpace(4)
opt_policy = TabularPolicy(action_space)
trainer = DPTrainer(gridworld, opt_policy)
value_func = trainer.train(gamma=GAMMA)
logging.info(f"Opt Policy:\n{gridworld.dump_policy(opt_policy)}")
logging.info(f"Opt state values:\n{gridworld.dump_value_func(value_func)}")
# behavivor_policy = RandomRLPolicy(action_space)
behavivor_policy = EpsilonGreedyRLPolicy(opt_policy, 0.7)
target_policy = EpsilonGreedyRLPolicy(opt_policy, 0.3)
model = NoiseGridWorldModel(gridworld, action_space, epsilon=0.1, max_horizon=1000)
value_func = DPValueFunction(target_policy, model, GAMMA)
ground_truth: Optional[ValueFunction] = None
if USE_DP_VALUE_FUNC:
ground_truth = DPValueFunction(target_policy, gridworld, GAMMA)
else:
ground_truth = EstimatedStateValueFunction(target_policy, gridworld, GAMMA)
logging.info(
f"Target Policy ground truth values:\n"
f"{gridworld.dump_value_func(ground_truth)}"
)
logging.info(
f"Logging Policy values:\n"
f"{gridworld.dump_value_func(DPValueFunction(behavivor_policy, model, GAMMA))}"
)
log = []
log_generator = PolicyLogGenerator(gridworld, behavivor_policy)
num_episodes = 50
for state in gridworld.states:
for _ in range(num_episodes):
log.append(log_generator.generate_log(state))
logging.info(f"Generated {num_episodes} logs for {state}")
estimator_input = RLEstimatorInput(
gamma=GAMMA,
log=log,
target_policy=target_policy,
value_function=value_func,
ground_truth=ground_truth,
)
NeuralDualDICE(
device=device,
state_dim=2,
action_dim=4,
deterministic_env=True,
average_next_v=False,
value_lr=0.001,
zeta_lr=0.0001,
batch_size=512,
).evaluate(estimator_input)
DMEstimator(device=device).evaluate(estimator_input)
IPSEstimator(weight_clamper=None, weighted=False, device=device).evaluate(
estimator_input
)
IPSEstimator(weight_clamper=None, weighted=True, device=device).evaluate(
estimator_input
)
DoublyRobustEstimator(weight_clamper=None, weighted=False, device=device).evaluate(
estimator_input
)
DoublyRobustEstimator(weight_clamper=None, weighted=True, device=device).evaluate(
estimator_input
)
MAGICEstimator(device=device).evaluate(
estimator_input, num_resamples=10, loss_threhold=0.0000001, lr=0.00001
)
| 13,049 | 30.90709 | 88 | py |
ReAgent | ReAgent-master/reagent/ope/test/multiclass_bandits.py | #!/usr/bin/env python3
import argparse
import json
import logging
import os
import random
import sys
from dataclasses import dataclass
from pathlib import PurePath
from typing import Iterable, Tuple
import numpy as np
import pandas as pd
import torch
from reagent.ope.estimators.contextual_bandits_estimators import (
Action,
ActionDistribution,
ActionRewards,
BanditsEstimatorInput,
BanditsModel,
DMEstimator,
DoublyRobustEstimator,
IPSEstimator,
LogSample,
)
from reagent.ope.estimators.estimator import Estimator, Evaluator
from reagent.ope.estimators.types import ActionSpace, Policy, Trainer, TrainingData
from reagent.ope.trainers.linear_trainers import (
DecisionTreeTrainer,
LogisticRegressionTrainer,
SGDClassifierTrainer,
)
from torch import Tensor
@dataclass(frozen=True)
class MultiClassDataRow(object):
feature: torch.Tensor
label: torch.Tensor
one_hot: torch.Tensor
class UCIMultiClassDataset:
"""
To load and hold UCI classification datasets:
https://archive.ics.uci.edu/ml/datasets.php?task=cla&sort=nameUp&view=table
Also to convert it to contextual bandits problems
References: https://arxiv.org/abs/1103.4601
"""
def __init__(self, params, device=None):
if "file" not in params:
raise Exception('Please define "file" in "dataset"')
if "label_col" not in params:
raise Exception('Please define "label_col" in "dataset"')
index_col = params["index_col"] if "index_col" in params else None
label_col = params["label_col"]
sep = params["sep"] if "sep" in params else ","
self._config_file = params["file"]
self._data_frame = pd.read_csv(
self._config_file,
sep=sep,
header=None,
index_col=index_col if index_col is not None else False,
)
file_col_len = self._data_frame.shape[1] + (1 if index_col is not None else 0)
if label_col < 0:
label_col = file_col_len + label_col
frame_label_col = label_col
if index_col is not None and label_col > index_col:
label_col = label_col - 1
self._features = torch.as_tensor(
self._data_frame.iloc[
:, [i for i in range(self._data_frame.shape[1]) if i != label_col]
].values,
dtype=torch.float32,
device=device,
)
self._classes = self._data_frame[frame_label_col].unique()
self._classes.sort()
self._labels = self._data_frame[frame_label_col].values
self._class_indices = torch.tensor(
[np.where(self._classes == i)[0][0] for i in self._labels],
dtype=torch.long,
device=device,
)
self._one_hots = torch.zeros(
(self._class_indices.shape[0], len(self._classes)),
dtype=torch.int,
device=device,
)
self._one_hots[
torch.arange(self._one_hots.shape[0], dtype=torch.long), self._class_indices
] = 1
self.device = device
def __len__(self):
return len(self._data_frame)
def __getitem__(self, idx) -> MultiClassDataRow:
return MultiClassDataRow(
self._features[idx], self._class_indices[idx], self._one_hots[idx]
)
@property
def config_file(self) -> str:
return self._config_file
@property
def num_features(self) -> int:
return self._features.shape[1]
@property
def num_actions(self) -> int:
return len(self._classes)
@property
def features(self) -> torch.Tensor:
return self._features
@property
def labels(self) -> torch.Tensor:
return self._class_indices
@property
def one_hots(self) -> torch.Tensor:
return self._one_hots
def train_val_test_split(
self, ratios: Tuple[float, float] = (0.8, 0.8), device=None
):
total_len = len(self._data_frame)
train_len = int(total_len * ratios[0])
train_choices = random.sample(range(total_len), train_len)
train_x = np.take(self._features, train_choices, axis=0)
train_y = np.take(self._class_indices, train_choices)
train_r = np.take(self._one_hots, train_choices, axis=0)
fit_len = int(train_len * ratios[1])
fit_choices = random.sample(range(train_len), fit_len)
fit_x = np.take(train_x, fit_choices, axis=0)
fit_y = np.take(train_y, fit_choices)
fit_r = np.take(train_r, fit_choices, axis=0)
val_x = np.delete(train_x, fit_choices, axis=0)
val_y = np.delete(train_y, fit_choices)
val_r = np.delete(train_r, fit_choices, axis=0)
test_x = np.delete(self._features, train_choices, axis=0)
test_y = np.delete(self._class_indices, train_choices)
test_r = np.delete(self._one_hots, train_choices, axis=0)
return (
torch.as_tensor(fit_x, dtype=torch.float, device=device),
torch.as_tensor(fit_y, dtype=torch.float, device=device),
torch.as_tensor(fit_r, dtype=torch.float, device=device),
torch.as_tensor(val_x, dtype=torch.float, device=device),
torch.as_tensor(val_y, dtype=torch.float, device=device),
torch.as_tensor(val_r, dtype=torch.float, device=device),
torch.as_tensor(test_x, dtype=torch.float, device=device),
torch.as_tensor(test_y, dtype=torch.float, device=device),
torch.as_tensor(test_r, dtype=torch.float, device=device),
train_choices,
)
@dataclass(frozen=True)
class MultiClassContext:
query_id: int
class MultiClassModel(BanditsModel):
def __init__(self, features: Tensor, rewards: Tensor):
self._features = features
self._rewards = rewards
def _action_rewards(self, context: MultiClassContext) -> ActionRewards:
return ActionRewards(self._rewards[context.query_id])
class MultiClassPolicy(Policy):
def __init__(
self,
action_space: ActionSpace,
action_distributions: Tensor,
epsilon: float,
device=None,
):
super().__init__(action_space, device)
self._action_distributions = action_distributions
self._exploitation_prob = 1.0 - epsilon
self._exploration_prob = epsilon / len(self.action_space)
def _query(self, context: int) -> Tuple[Action, ActionDistribution]:
dist = self._action_distributions[context]
if len(dist.shape) > 1 and dist.shape[0] == 1:
dist = dist[0]
if dist.shape[0] < len(self.action_space):
dist = torch.cat(
(dist, torch.zeros([len(self.action_space) - dist.shape[0]]))
)
dist = dist * self._exploitation_prob + self._exploration_prob
action = torch.multinomial(dist, 1).item()
return Action(action), ActionDistribution(dist)
def evaluate_all(
experiments: Iterable[Tuple[Iterable[Estimator], int]],
dataset: UCIMultiClassDataset,
log_trainer: Trainer,
log_epsilon: float,
tgt_trainer: Trainer,
tgt_epsilon: float,
max_num_workers: int,
random_reward_prob: float = 0.0,
device=None,
):
action_space = ActionSpace(dataset.num_actions)
config_path = PurePath(dataset.config_file)
data_name = config_path.stem
log_model_name = data_name + "_" + log_trainer.__class__.__name__ + ".pickle"
log_model_file = str(config_path.with_name(log_model_name))
tgt_model_name = data_name + "_" + tgt_trainer.__class__.__name__ + ".pickle"
tgt_model_file = str(config_path.with_name(tgt_model_name))
log_trainer.load_model(log_model_file)
tgt_trainer.load_model(tgt_model_file)
if not log_trainer.is_trained or not tgt_trainer.is_trained:
(
train_x,
train_y,
train_r,
val_x,
val_y,
val_r,
test_x,
test_y,
test_r,
train_choices,
) = dataset.train_val_test_split((0.2, 0.8))
trainer_data = TrainingData(train_x, train_y, None, val_x, val_y, None)
if not log_trainer.is_trained:
log_trainer.train(trainer_data)
log_trainer.save_model(log_model_file)
if not tgt_trainer.is_trained:
tgt_trainer.train(trainer_data)
tgt_trainer.save_model(tgt_model_file)
log_results = log_trainer.predict(dataset.features)
assert log_results.probabilities is not None
log_policy = MultiClassPolicy(action_space, log_results.probabilities, log_epsilon)
tgt_results = tgt_trainer.predict(dataset.features)
assert tgt_results.probabilities is not None
tgt_policy = MultiClassPolicy(action_space, tgt_results.probabilities, tgt_epsilon)
tasks = []
# pyre-fixme[61]: `train_choices` may not be initialized here.
test_queries = list(set(range(len(dataset))) - set(train_choices))
for estimators, num_samples in experiments:
samples = []
for _ in range(num_samples):
qid = random.sample(test_queries, 1)
label = int(dataset.labels[qid].item())
log_action, log_action_probabilities = log_policy(qid)
log_reward = 1.0 if log_action.value == label else 0.0
tgt_action, tgt_action_probabilities = tgt_policy(qid)
ground_truth_reward = 1.0 if tgt_action.value == label else 0.0
item_feature = dataset.features[qid]
random_reward = random.random() < random_reward_prob
samples.append(
LogSample(
context=qid,
log_action=log_action,
log_reward=random.randint(0, 1) if random_reward else log_reward,
log_action_probabilities=log_action_probabilities,
tgt_action_probabilities=tgt_action_probabilities,
tgt_action=tgt_action,
ground_truth_reward=ground_truth_reward,
item_feature=item_feature,
)
)
tasks.append((estimators, BanditsEstimatorInput(action_space, samples, False)))
evaluator = Evaluator(tasks, max_num_workers)
results = evaluator.evaluate()
Evaluator.report_results(results)
return results
DEFAULT_ITERATIONS = 500
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
logging.info(f"working dir - {os.getcwd()}")
parser = argparse.ArgumentParser(description="Read command line parameters.")
parser.add_argument("-p", "--parameters", help="Path to config file.")
args = parser.parse_args(sys.argv[1:])
with open(args.parameters, "r") as f:
params = json.load(f)
if "dataset" not in params:
raise Exception('Please define "dataset" in config file')
random.seed(1234)
np.random.seed(1234)
torch.random.manual_seed(1234)
dataset = UCIMultiClassDataset(params["dataset"])
log_trainer = LogisticRegressionTrainer()
log_epsilon = 0.1
tgt_trainer = SGDClassifierTrainer()
tgt_epsilon = 0.1
dm_trainer = DecisionTreeTrainer()
experiments = [
(
(
DMEstimator(DecisionTreeTrainer()),
IPSEstimator(),
DoublyRobustEstimator(DecisionTreeTrainer()),
),
1000,
)
for _ in range(100)
]
evaluate_all(
experiments, dataset, log_trainer, log_epsilon, tgt_trainer, tgt_epsilon, 0
)
| 11,560 | 33.927492 | 88 | py |
ReAgent | ReAgent-master/reagent/ope/test/yandex_web_search.py | #!/usr/bin/env python3
import argparse
import json
import logging
import os
import pickle
import random
import sys
import time
from typing import (
Dict,
Iterable,
List,
Mapping,
MutableMapping,
Optional,
Sequence,
Tuple,
Union,
)
import numpy as np
import torch
import torch.multiprocessing as mp
from reagent.ope.estimators.estimator import Evaluator
from reagent.ope.estimators.slate_estimators import (
DCGSlateMetric,
ERRSlateMetric,
FrechetDistribution,
IPSEstimator,
LogSample,
NDCGSlateMetric,
PBMEstimator,
PseudoInverseEstimator,
RankingDistribution,
RewardDistribution,
SlateContext,
SlateEstimator,
SlateEstimatorInput,
SlateItemValues,
SlateModel,
SlateQuery,
SlateSlots,
SlateSlotValues,
)
from reagent.ope.utils import RunningAverage
# Slate test using Yandex Personalized Web Search Dataset:
# https://www.kaggle.com/c/yandex-personalized-web-search-challenge/
RELEVANT_THRESHOLD = 49
HIGHLY_RELEVANT_THRESHOLD = 399
MAX_SLATE_SIZE = 10
MIN_QUERY_COUNT = 10
def click_to_relevances(
clicks: Iterable[Tuple[int, int]], urls: Sequence[Tuple[int, int]]
) -> Tuple[List[float], Mapping[Tuple[int, int], float]]:
position_relevances = [0.0] * max(len(urls), MAX_SLATE_SIZE)
url_relevances = {url: 0.0 for url in urls}
for i, dt in clicks:
r = 0.0
if dt > HIGHLY_RELEVANT_THRESHOLD:
r = 2.0
elif dt > RELEVANT_THRESHOLD:
r = 1.0
position_relevances[i] = r
url_relevances[urls[i]] = r
return position_relevances, url_relevances
class LoggedQuery:
def __init__(
self,
user_id: int,
query_id: int,
query_terms: Tuple[int],
list: Sequence[Tuple[int, int]],
):
self._user_id = user_id
self._query_id = query_id
self._query_terms = query_terms
self._list = list
self._clicks: List[Tuple[int, int]] = []
self._position_relevances: Optional[List[float]] = None
self._url_relevances: Optional[MutableMapping[Tuple[int, int], float]] = None
def click(self, url_id: int, dwell_time: int):
self._position_relevances = None
self._url_relevances = None
i = 0
for r in self.list:
if url_id == r[0]:
self.clicks.append((i, dwell_time))
break
i += 1
@property
def user_id(self):
return self._user_id
@property
def query_id(self):
return self._query_id
@property
def query_terms(self):
return self._query_terms
@property
def list(self):
return self._list
@property
def clicks(self):
return self._clicks
def _click_to_relevances(self):
self._position_relevances = [0.0] * max(len(self._list), MAX_SLATE_SIZE)
self._url_relevances = {url: 0.0 for url in self._list}
for i, dt in self.clicks:
r = 0.0
if dt > HIGHLY_RELEVANT_THRESHOLD:
r = 2.0
elif dt > RELEVANT_THRESHOLD:
r = 1.0
self._position_relevances[i] = r
self._url_relevances[self._list[i]] = r
@property
def position_relevances(self):
if self._position_relevances is None:
self._click_to_relevances()
return self._position_relevances
@property
def url_relevances(self):
if self._url_relevances is None:
self._click_to_relevances()
return self._url_relevances
class TrainingQuery:
def __init__(self, query_id: int, query_terms: Tuple[int]):
self._query_id = query_id
self._query_terms = query_terms
self._count = 0
self._url_relevances: MutableMapping[Tuple[int, int], RunningAverage] = {}
self._position_relevances = [RunningAverage() for _ in range(MAX_SLATE_SIZE)]
def add(self, query: LoggedQuery):
self._count += 1
urs = query.url_relevances
for item_id, r in urs.items():
if item_id not in self._url_relevances:
self._url_relevances[item_id] = RunningAverage(r)
else:
self._url_relevances[item_id].add(r)
prs = query.position_relevances
for i in range(MAX_SLATE_SIZE):
self._position_relevances[i].add(prs[i])
def merge(self, other: "TrainingQuery"):
for i, r in other.url_relevances.items():
if i not in self._url_relevances:
self._url_relevances[i] = RunningAverage(r)
else:
self._url_relevances[i].add(r)
for i in range(MAX_SLATE_SIZE):
self._position_relevances[i].add(other.position_relevances[i])
def finalize(self):
self._url_relevances = {k: v.average for k, v in self._url_relevances.items()}
self._position_relevances = [v.average for v in self._position_relevances]
def pack(self):
if isinstance(self._url_relevances, Mapping):
self._url_relevances = list(self._url_relevances.items())
def _unpack(self):
if isinstance(self._url_relevances, Sequence):
self._url_relevances = {v[0]: v[1] for v in self._url_relevances}
@property
def count(self):
return self._count
@property
def query_id(self):
return self._query_id
@property
def query_terms(self):
return self._query_terms
@property
def url_relevances(self):
self._unpack()
return self._url_relevances
@property
def position_relevances(self):
return self._position_relevances
def create_cache(params):
if "folder" not in params:
raise Exception('Please define "folder" in "raw_data"')
folder = params["folder"] if "folder" in params else ""
if len(folder) == 0:
folder = os.getcwd()
cache_folder = params["cache_folder"] if "cache_folder" in params else folder
if len(cache_folder) == 0:
cache_folder = folder
source_file = params["source_file"] if "source_file" in params else ""
if len(source_file) == 0:
raise Exception('"source_file" not defined!')
total_days = params["total_days"] if "total_days" in params else 27
text_file = os.path.join(folder, source_file)
logging.info(f"loading {text_file}")
if not os.access(text_file, os.R_OK):
logging.warning(f"{text_file} cannot be accessed.")
return
for d in range(1, total_days + 1):
pickle_file = os.path.join(cache_folder, f"{source_file}_{d:02}.pickle")
logging.info(f"creating cache for day {d:02}: {pickle_file}")
queries = []
st = time.process_time()
with open(text_file, "r") as f:
curr_sess = None
curr_user = -1
num_sess = 0
last_click = None
for line in f:
tokens = line.strip().split()
tlen = len(tokens)
if tlen == 4 and tokens[1] == "M":
if last_click is not None:
query = curr_sess[2][last_click[0]]
query.click(last_click[1], 10000)
last_click = None
day = int(tokens[2])
if day != d:
continue
num_sess += 1
if num_sess % 100000 == 0:
logging.info(f" {num_sess} session processed...")
if curr_sess is not None:
qids = set()
for q in curr_sess[2].values():
if len(q.clicks) > 0:
queries.append(q)
elif q.query_id not in qids:
queries.append(q)
qids.add(q.query_id)
del qids
curr_sess = (int(tokens[0]), int(tokens[2]), {})
curr_user = int(tokens[3])
elif (
curr_sess is not None
and tlen > 4
and int(tokens[0]) == curr_sess[0]
):
t = int(tokens[1])
if last_click is not None:
query = curr_sess[2][last_click[0]]
query.click(last_click[1], t - last_click[2])
last_click = None
if tokens[2] == "Q":
serp_id = int(tokens[3])
query_id = int(tokens[4])
query_terms = tuple([int(s) for s in tokens[5].split(",")])
results = []
for r in tokens[6:]:
rs = r.split(",")
results.append((int(rs[0]), int(rs[1])))
query = LoggedQuery(curr_user, query_id, query_terms, results)
curr_sess[2][serp_id] = query
elif tokens[2] == "C":
last_click = (int(tokens[3]), int(tokens[4]), t)
else:
logging.warning(f"unknown record type: {tokens[2]}")
logging.info(f" loading time: {time.process_time() - st}")
st = time.process_time()
try:
with open(pickle_file, "wb") as f:
pickle.dump(queries, f, protocol=pickle.HIGHEST_PROTOCOL)
except Exception:
logging.error(f"{pickle_file} cannot be accessed.")
logging.info(f" saving time: {time.process_time() - st}")
def load_logged_queries(params) -> Sequence[TrainingQuery]:
logging.info("loading logged queries...")
if "folder" not in params:
raise Exception('Please define "folder" in "raw_data"')
folder = params["folder"] if "folder" in params else ""
if len(folder) == 0:
folder = os.getcwd()
cache_file_name = params["cache_file_name"] if "cache_file_name" in params else ""
cache_file = os.path.join(folder, f"{cache_file_name}.pickle")
if len(cache_file_name) > 0 and os.access(cache_file, os.R_OK):
logging.info(f" loading {cache_file}")
try:
st = time.perf_counter()
with open(cache_file, "rb") as f:
logged_queries = pickle.load(f)
logging.info(f" loading time {time.perf_counter() - st}")
return logged_queries
except Exception as err:
logging.warning(f" loading error {err}")
base_file_name = params["base_file_name"] if "base_file_name" in params else ""
if len(base_file_name) == 0:
raise Exception('"base_file_name" not defined!')
days = params["days"] if "days" in params else []
all_queries = {}
st = time.perf_counter()
for day in days:
pickle_file = os.path.join(folder, f"{base_file_name}_{day:02}.pickle")
if os.access(pickle_file, os.R_OK):
logging.info(f" loading {pickle_file}")
with open(pickle_file, "rb") as f:
queries = pickle.load(f)
if queries is None:
logging.warning(f" loading {pickle_file} failed!")
else:
logging.info(f" loaded queries: {len(queries)}")
for q in queries:
if q.query_id in all_queries:
tq = all_queries[q.query_id]
else:
tq = TrainingQuery(q.query_id, q.query_terms)
all_queries[q.query_id] = tq
tq.add(q)
else:
logging.warning(f" {pickle_file} not accessible!")
logging.info(f" loading time {time.perf_counter() - st}")
logged_queries = tuple(all_queries.values())
for v in logged_queries:
v.finalize()
if len(cache_file_name) > 0:
logging.info(f" saving logged queries to {cache_file}")
try:
st = time.perf_counter()
with open(cache_file, "wb") as f:
pickle.dump(logged_queries, f, protocol=pickle.HIGHEST_PROTOCOL)
logging.info(f" saving time {time.perf_counter() - st}")
except Exception:
logging.warning(f" {cache_file} not accessible!")
return logged_queries
class TrainingDataset:
def __init__(self, params, device=None):
if "folder" not in params:
raise Exception('Please define "folder" in "dataset"')
self._folder = params["folder"]
self._days = params["days"] if "days" in params else []
self._base_file_name = (
params["base_file_name"] if "base_file_name" in params else "train"
)
self._min_query_count = (
params["min_query_count"]
if "min_query_count" in params
else MIN_QUERY_COUNT
)
self._cache_file = params["cache_file"] if "cache_file" in params else ""
self._device = device
self._queries = None
self._query_ids = None
self._query_terms = None
self._position_relevances = None
def load_queries(self, reload=False):
logging.info("loading training queries...")
pickle_file = os.path.join(self._folder, self._cache_file)
if not reload and len(self._cache_file) > 0 and os.access(pickle_file, os.R_OK):
logging.info(f" loading {pickle_file}")
st = time.process_time()
with open(pickle_file, "rb") as f:
(
min_query_count,
days,
queries,
query_ids,
query_terms,
position_relevances,
) = pickle.load(f)
if min_query_count != self._min_query_count or days != self._days:
logging.info(" updated config from last cache, reload")
self.load_queries(True)
else:
self._queries = queries
self._query_ids = query_ids
self._query_terms = query_terms
self._position_relevances = position_relevances
logging.info(
f" loaded {len(self._queries)}, "
f" time {time.process_time() - st}"
)
else:
all_queries = {}
for d in self._days:
cache_file = os.path.join(
self._folder, f"{self._base_file_name}_{d:02}.pickle"
)
if os.access(cache_file, os.R_OK):
logging.info(f" loading {cache_file}")
st = time.process_time()
with open(cache_file, "rb") as f:
queries = pickle.load(f)
if queries is None:
logging.warning(f" loading {cache_file} failed!")
continue
logging.info(f" loaded queries: {len(queries)}")
logging.info(f" loading time {time.process_time() - st}")
st = time.process_time()
for q in queries:
if q.query_id not in all_queries:
qr = TrainingQuery(q.query_id, q.query_terms)
all_queries[q.query_id] = qr
else:
qr = all_queries[q.query_id]
qr.add(q)
logging.info(f" process time {time.process_time() - st}")
else:
logging.warning(f" {cache_file} not accessible!")
self._queries = []
for v in all_queries.values():
if v.count >= self._min_query_count:
v.finalize()
v.pack()
self._queries.append(v)
self._query_ids = None
self._query_terms = None
self._position_relevances = None
if len(self._cache_file) > 0:
logging.info(f"saving training queries to {pickle_file}")
try:
st = time.process_time()
with open(pickle_file, "wb") as f:
self._process_training_queries()
pickle.dump(
(
self._min_query_count,
self._days,
self._queries,
self._query_ids,
self._query_terms,
self._position_relevances,
),
f,
protocol=pickle.HIGHEST_PROTOCOL,
)
logging.info(f" saving time {time.process_time() - st}")
except Exception:
logging.warning(f" {pickle_file} not accessible!")
# self._query_ids = None
# self._query_terms = None
# self._position_relevances = None
logging.info(f"loaded training queries: {len(self._queries)}")
def _process_training_queries(self):
if (
self._query_ids is not None
and self._query_terms is not None
and self._position_relevances is not None
):
return
logging.info("processing training queries...")
st = time.process_time()
self._query_ids = {}
self._query_terms = {}
self._position_relevances = [RunningAverage() for _ in range(MAX_SLATE_SIZE)]
for q in self._queries:
self._query_ids[q.query_id] = q
for t in q.query_terms:
if t in self._query_terms:
self._query_terms[t].merge(q)
else:
mq = TrainingQuery(0, (t,))
mq.merge(q)
self._query_terms[t] = mq
for ra, r in zip(self._position_relevances, q.position_relevances):
ra.add(r)
for q in self._query_terms.values():
q.finalize()
self._position_relevances = [v.average for v in self._position_relevances]
logging.info(f"processing time {time.process_time() - st}")
@property
def training_queries(self):
return self._queries
def item_relevances(
self, query_id: int, query_terms: Tuple[int], items: Iterable[Tuple[int, int]]
) -> SlateItemValues:
self._process_training_queries()
if query_id in self._query_ids:
q = self._query_ids[query_id]
rels = q.url_relevances
else:
ras = {}
for t in query_terms:
if t in self._query_terms:
q = self._query_terms[t]
for i, r in q.url_relevances:
if i in ras:
ra = ras[i]
else:
ra = RunningAverage()
ras[i] = ra
ra.add(r)
rels = {i: r.average for i, r in ras.items()}
item_rels = {}
for i in items:
if i in rels:
item_rels[i] = rels[i]
else:
item_rels[i] = 0.0
return SlateItemValues(item_rels)
def slot_relevances(self, slots: SlateSlots) -> SlateSlotValues:
return SlateSlotValues(self._position_relevances[: len(slots)])
class YandexSlateModel(SlateModel):
def __init__(self, dataset: TrainingDataset):
self._dataset = dataset
def item_rewards(self, context: SlateContext) -> SlateItemValues:
query = context.query.value
# pyre-fixme[20]: Call `TrainingDataset.item_relevances` expects argument `items`.
return self._dataset.item_relevances(query[0], query[1:])
def slot_probabilities(self, context: SlateContext) -> SlateSlotValues:
return self._dataset.slot_relevances(context.slots)
def evaluate(
experiments: Iterable[Tuple[Iterable[SlateEstimator], int]],
log_dataset: TrainingDataset,
log_distribution: RewardDistribution,
tgt_dataset: TrainingDataset,
tgt_distribution: RewardDistribution,
log_queries: Sequence[TrainingQuery],
slate_size: int,
item_size: int,
metric_func: str,
max_num_workers: int,
device=None,
):
log_length = len(log_queries)
slots = SlateSlots(slate_size)
logging.info("Generating log...")
st = time.perf_counter()
tasks = []
total_samples = 0
for estimators, num_samples in experiments:
samples = []
if num_samples * 10 > log_length:
logging.warning(f"not enough log data, needs {num_samples * 10}")
continue
query_choices = np.random.choice(log_length, num_samples, replace=False)
for i in query_choices:
q = log_queries[i]
# pyre-fixme[60]: Expected to unpack an iterable, but got `unknown`.
context = SlateContext(SlateQuery((q.query_id, *(q.query_terms))), slots)
url_relevances = q.url_relevances
if len(url_relevances) > item_size:
url_relevances = {
k: v
for k, v in sorted(
url_relevances.items(), key=lambda item: item[1]
)[:item_size]
}
items = url_relevances.keys()
log_item_rewards = log_dataset.item_relevances(
q.query_id, q.query_terms, items
)
log_item_probs = log_distribution(log_item_rewards)
tgt_item_rewards = tgt_dataset.item_relevances(
q.query_id, q.query_terms, items
)
tgt_item_probs = tgt_distribution(tgt_item_rewards)
tgt_slot_expectation = tgt_item_probs.slot_item_expectations(slots)
gt_item_rewards = SlateItemValues(url_relevances)
if metric_func == "dcg":
metric = DCGSlateMetric(device=device)
elif metric_func == "err":
metric = ERRSlateMetric(4.0, device=device)
else:
metric = NDCGSlateMetric(gt_item_rewards, device=device)
slot_weights = metric.slot_weights(slots)
if tgt_item_probs.is_deterministic:
tgt_slate_prob = 1.0
log_slate = tgt_item_probs.sample_slate(slots)
else:
tgt_slate_prob = float("nan")
log_slate = log_item_probs.sample_slate(slots)
log_slate_prob = log_item_probs.slate_probability(log_slate)
log_rewards = log_slate.slot_values(gt_item_rewards)
log_reward = metric.calculate_reward(slots, log_rewards, None, slot_weights)
gt_slot_rewards = tgt_slot_expectation.expected_rewards(gt_item_rewards)
gt_reward = metric.calculate_reward(
slots, gt_slot_rewards, None, slot_weights
)
samples.append(
LogSample(
context,
metric,
log_slate,
log_reward,
log_slate_prob,
None,
log_item_probs,
tgt_slate_prob,
None,
tgt_item_probs,
gt_reward,
slot_weights,
)
)
total_samples += 1
tasks.append((estimators, SlateEstimatorInput(samples)))
dt = time.perf_counter() - st
logging.info(f"Generating log done: {total_samples} samples in {dt}s")
logging.info("start evaluating...")
st = time.perf_counter()
evaluator = Evaluator(tasks, max_num_workers)
Evaluator.report_results(evaluator.evaluate())
logging.info(f"evaluating done in {time.perf_counter() - st}s")
if __name__ == "__main__":
mp.set_start_method("spawn")
logging.basicConfig(
format="%(asctime)-15s_%(levelname)s: %(message)s", level=logging.INFO
)
logging.info(f"working dir - {os.getcwd()}")
random.seed(1234)
np.random.seed(1234)
torch.random.manual_seed(1234)
parser = argparse.ArgumentParser(description="Read command line parameters.")
parser.add_argument("-p", "--parameters", help="Path to config file.")
args = parser.parse_args(sys.argv[1:])
with open(args.parameters, "r") as f:
params = json.load(f)
# uncomment to create cache for faster data loading
# create_cache(params["raw_data"])
# device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
device = None
logging.info('loading "log_data"')
log_dataset = TrainingDataset(params["log_data"])
st = time.perf_counter()
log_dataset.load_queries()
logging.info(f"load time: {time.perf_counter() - st}")
logging.info('loading "target_data"')
tgt_dataset = TrainingDataset(params["target_data"])
st = time.perf_counter()
tgt_dataset.load_queries()
logging.info(f"load time: {time.perf_counter() - st}")
logging.info('loading "test_data"')
st = time.perf_counter()
log_queries = load_logged_queries(params["test_data"])
logging.info(f"load time: {time.perf_counter() - st}")
estimators = [IPSEstimator(), PseudoInverseEstimator(), PBMEstimator()]
evaluate(
[(estimators, 200)] * 4,
log_dataset,
RankingDistribution(1.0),
tgt_dataset,
FrechetDistribution(2.0, True),
log_queries,
5,
10,
"ndcg",
2,
)
| 25,761 | 35.855508 | 90 | py |
ReAgent | ReAgent-master/reagent/ope/test/unit_tests/test_slate_estimators.py | #!/usr/bin/env python3
import random
import unittest
import torch
from reagent.ope.estimators.slate_estimators import (
DCGSlateMetric,
NDCGSlateMetric,
Slate,
SlateItem,
SlateItemProbabilities,
SlateItemValues,
SlateSlotItemProbabilities,
SlateSlots,
)
class TestEstimator(unittest.TestCase):
def setUp(self) -> None:
random.seed(1234)
torch.random.manual_seed(1234)
self._item_relevances = [1.0, 0.5, 2.5, 2.0, 5.0]
self._slot_item_relevances = [
[1.0, 0.5, 2.5, 2.0, 5.0],
[1.5, 1.0, 2.0, 1.0, 4.0],
[3.0, 2.5, 0.5, 4.0, 2.0],
]
self._item_rewards = [3.0, 8.0, 0.0, 4.0, 1.0]
self._item_size = len(self._item_relevances)
self._slate_size = 3
self._slots = SlateSlots(self._slate_size)
def test_slate_item_probabilities(self):
probs = SlateItemProbabilities(self._item_relevances)
slate = probs.sample_slate(self._slots)
slate_prob = probs.slate_probability(slate)
self.assertAlmostEqual(slate_prob, 0.017825312, places=2)
slot_item_expectations = probs.slot_item_expectations(self._slots)
slot_rewards = slot_item_expectations.expected_rewards(
SlateItemValues(self._item_rewards)
)
diff = slot_rewards.values - torch.tensor([1.81818, 2.13736, 2.66197])
self.assertAlmostEqual(diff.sum().item(), 0, places=5)
def test_slate_slot_item_probabilities(self):
probs = SlateSlotItemProbabilities(
[SlateItemValues(vs) for vs in self._slot_item_relevances]
)
slate = probs.sample_slate(self._slots)
slate_prob = probs.slate_probability(slate)
self.assertAlmostEqual(slate_prob, 0.02139037, places=2)
slot_item_expectations = probs.slot_item_expectations()
slot_rewards = slot_item_expectations.expected_rewards(
SlateItemValues(self._item_rewards)
)
diff = slot_rewards.values - torch.tensor([1.81818, 2.51352, 7.36929])
self.assertAlmostEqual(diff.sum().item(), 0, places=5)
def test_metrics(self):
dcg = DCGSlateMetric()
ndcg = NDCGSlateMetric(SlateItemValues([1.0, 2.5, 2.0, 3.0, 1.5, 0.0]))
item_rewards = SlateItemValues([2.0, 1.0, 0.0, 3.0, 1.5, 2.5])
slate = Slate([SlateItem(1), SlateItem(3), SlateItem(2)])
reward = dcg(slate.slots, slate.slot_values(item_rewards))
self.assertAlmostEqual(reward, 5.416508275)
reward = ndcg(slate.slots, slate.slot_values(item_rewards))
self.assertAlmostEqual(reward, 0.473547669)
slate = Slate([SlateItem(5), SlateItem(0), SlateItem(4)])
reward = dcg(slate.slots, slate.slot_values(item_rewards))
self.assertAlmostEqual(reward, 7.463857073)
reward = ndcg(slate.slots, slate.slot_values(item_rewards))
self.assertAlmostEqual(reward, 0.652540703)
| 2,939 | 38.72973 | 79 | py |
ReAgent | ReAgent-master/reagent/ope/test/unit_tests/test_types.py | #!/usr/bin/env python3
import unittest
from typing import Tuple, Union
import numpy as np
import torch
from reagent.ope.estimators.types import (
ActionDistribution as Distribution,
TypeWrapper,
Values,
)
class TestTypes(unittest.TestCase):
TestType = Union[int, Tuple[int], float, Tuple[float], np.ndarray, torch.Tensor]
TestClass = TypeWrapper[TestType]
def setUp(self) -> None:
self._test_list = [0, 1, 2, 3, 5]
def test_int_type(self):
int_val = TestTypes.TestClass(3)
self.assertEqual(self._test_list[int_val], 3)
self.assertEqual(hash(int_val), hash(3))
int_val_other = TestTypes.TestClass(3)
self.assertEqual(int_val, int_val_other)
int_val_other = TestTypes.TestClass(4)
self.assertNotEqual(int_val, int_val_other)
def test_float_type(self):
float_val = TestTypes.TestClass(3.2)
self.assertEqual(self._test_list[float_val], 3)
self.assertEqual(hash(float_val), hash(3.2))
float_val_other = TestTypes.TestClass(3.2)
self.assertEqual(float_val, float_val_other)
float_val_other = TestTypes.TestClass(4.3)
self.assertNotEqual(float_val, float_val_other)
def test_tuple_int_type(self):
tuple_int_val = TestTypes.TestClass((1, 2, 3))
with self.assertRaises(ValueError):
self._test_list[tuple_int_val] = 1
self.assertEqual(hash(tuple_int_val), hash((1, 2, 3)))
tuple_int_val_other = TestTypes.TestClass((1, 2, 3))
self.assertEqual(tuple_int_val, tuple_int_val_other)
tuple_int_val_other = TestTypes.TestClass((2, 3, 1))
self.assertNotEqual(tuple_int_val, tuple_int_val_other)
def test_tuple_float_type(self):
tuple_float_val = TestTypes.TestClass((1.1, 2.2, 3.3))
with self.assertRaises(ValueError):
self._test_list[tuple_float_val] = 1
self.assertEqual(hash(tuple_float_val), hash((1.1, 2.2, 3.3)))
tuple_float_val_other = TestTypes.TestClass((1.1, 2.2, 3.3))
self.assertEqual(tuple_float_val, tuple_float_val_other)
tuple_float_val_other = TestTypes.TestClass((2.2, 3.3, 1.1))
self.assertNotEqual(tuple_float_val, tuple_float_val_other)
def test_ndarray_type(self):
ndarray_val = TestTypes.TestClass(np.array(3))
self.assertEqual(self._test_list[ndarray_val], 3)
self.assertEqual(hash(ndarray_val), hash((3,)))
ndarray_val_other = TestTypes.TestClass(np.array(3))
self.assertEqual(ndarray_val, ndarray_val_other)
int_val_other = TestTypes.TestClass(3)
self.assertEqual(ndarray_val, int_val_other)
ndarray_val_other = TestTypes.TestClass(np.array(4))
self.assertNotEqual(ndarray_val, ndarray_val_other)
ndarray_val = TestTypes.TestClass(np.array(((1, 2), (3, 4))))
with self.assertRaises(ValueError):
self._test_list[ndarray_val] = 1
self.assertEqual(hash(ndarray_val), hash((1, 2, 3, 4)))
ndarray_val_other = TestTypes.TestClass(((1, 2), (3, 4)))
self.assertEqual(ndarray_val, ndarray_val_other)
ndarray_val_other = TestTypes.TestClass(np.ndarray((1, 2, 3, 4)))
self.assertNotEqual(ndarray_val, ndarray_val_other)
def test_tensor_type(self):
tensor_val = TestTypes.TestClass(torch.tensor(3))
self.assertEqual(self._test_list[tensor_val], 3)
self.assertEqual(hash(tensor_val), hash((3,)))
tensor_val_other = TestTypes.TestClass(torch.tensor(3))
self.assertEqual(tensor_val, tensor_val_other)
int_val_other = TestTypes.TestClass(3)
with self.assertRaises(TypeError):
_ = tensor_val == int_val_other
tensor_val_other = TestTypes.TestClass(torch.tensor(4))
self.assertNotEqual(tensor_val, tensor_val_other)
tensor_val = TestTypes.TestClass(torch.tensor(((1, 2), (3, 4))))
with self.assertRaises(ValueError):
self._test_list[tensor_val] = 1
self.assertEqual(hash(tensor_val), hash((1, 2, 3, 4)))
tensor_val_other = TestTypes.TestClass(torch.tensor((1, 2, 3, 4)))
self.assertNotEqual(tensor_val, tensor_val_other)
class TestValues(unittest.TestCase):
TestIntType = TypeWrapper[int]
TestTupleFloatType = TypeWrapper[Tuple[float]]
class TestIntKeyValues(Values[TestIntType]):
def _new_key(self, k: int):
return TestValues.TestIntType(k)
class TestTupleFloatKeyValues(Values[TestTupleFloatType]):
def _new_key(self, k: int):
raise TypeError(
f"value {k} invalid for " f"{TestValues.TestTupleFloatType.__name__}"
)
def setUp(self) -> None:
self._int_float_values = TestValues.TestIntKeyValues([2.2, 4.4, 1.1, 3.3])
self._tuple_float_float_values = TestValues.TestTupleFloatKeyValues(
{
TestValues.TestTupleFloatType((1.0, 2.0)): 2.2,
TestValues.TestTupleFloatType((3.0, 4.0)): 4.4,
TestValues.TestTupleFloatType((5.0, 6.0)): 1.1,
TestValues.TestTupleFloatType((7.0, 8.0)): 3.3,
}
)
self._int_array_values = TestValues.TestIntKeyValues(
np.array((2.2, 4.4, 1.1, 3.3))
)
self._int_tensor_values = TestValues.TestIntKeyValues(
torch.tensor((2.2, 4.4, 1.1, 3.3))
)
def test_indexing(self):
self.assertEqual(self._int_float_values[2], 1.1)
self.assertEqual(self._int_float_values[TestValues.TestIntType(2)], 1.1)
self.assertEqual(
self._tuple_float_float_values[TestValues.TestTupleFloatType((3.0, 4.0))],
4.4,
)
def test_sort(self):
keys, values = self._int_float_values.sort()
self.assertEqual(
keys,
[
TestValues.TestIntType(1),
TestValues.TestIntType(3),
TestValues.TestIntType(0),
TestValues.TestIntType(2),
],
)
self.assertEqual(values, [4.4, 3.3, 2.2, 1.1])
keys, values = self._tuple_float_float_values.sort()
self.assertEqual(
keys,
[
TestValues.TestTupleFloatType((3.0, 4.0)),
TestValues.TestTupleFloatType((7.0, 8.0)),
TestValues.TestTupleFloatType((1.0, 2.0)),
TestValues.TestTupleFloatType((5.0, 6.0)),
],
)
self.assertEqual(values, [4.4, 3.3, 2.2, 1.1])
keys, values = self._int_array_values.sort()
self.assertEqual(
keys,
[
TestValues.TestIntType(1),
TestValues.TestIntType(3),
TestValues.TestIntType(0),
TestValues.TestIntType(2),
],
)
self.assertTrue(np.array_equal(values, np.array([4.4, 3.3, 2.2, 1.1])))
keys, values = self._int_tensor_values.sort()
self.assertEqual(
keys,
[
TestValues.TestIntType(1),
TestValues.TestIntType(3),
TestValues.TestIntType(0),
TestValues.TestIntType(2),
],
)
self.assertTrue(torch.equal(values, torch.tensor([4.4, 3.3, 2.2, 1.1])))
def test_unzip(self):
items = self._int_float_values.items
values = self._int_float_values.values
self.assertEqual(
items,
[
TestValues.TestIntType(0),
TestValues.TestIntType(1),
TestValues.TestIntType(2),
TestValues.TestIntType(3),
],
)
self.assertEqual(values, [2.2, 4.4, 1.1, 3.3])
items = self._tuple_float_float_values.items
values = self._tuple_float_float_values.values
self.assertEqual(
items,
[
TestValues.TestTupleFloatType((1.0, 2.0)),
TestValues.TestTupleFloatType((3.0, 4.0)),
TestValues.TestTupleFloatType((5.0, 6.0)),
TestValues.TestTupleFloatType((7.0, 8.0)),
],
)
self.assertEqual(values, [2.2, 4.4, 1.1, 3.3])
items = self._int_array_values.items
values = self._int_array_values.values
self.assertEqual(
items,
[
TestValues.TestIntType(0),
TestValues.TestIntType(1),
TestValues.TestIntType(2),
TestValues.TestIntType(3),
],
)
self.assertTrue(np.array_equal(values, np.array([2.2, 4.4, 1.1, 3.3])))
items = self._int_tensor_values.items
values = self._int_tensor_values.values
self.assertEqual(
items,
[
TestValues.TestIntType(0),
TestValues.TestIntType(1),
TestValues.TestIntType(2),
TestValues.TestIntType(3),
],
)
self.assertTrue(torch.equal(values, torch.tensor([2.2, 4.4, 1.1, 3.3])))
def test_copy(self):
copy = self._int_float_values.copy()
for i, c in zip(self._int_float_values, copy):
self.assertEqual(i, c)
copy[1] = 2.1
self.assertNotEqual(copy[1], self._int_float_values[1])
copy = self._tuple_float_float_values.copy()
for i, c in zip(self._tuple_float_float_values, copy):
self.assertEqual(i, c)
key = TestValues.TestTupleFloatType((3.0, 4.0))
copy[key] = 2.1
self.assertNotEqual(copy[key], self._tuple_float_float_values[key])
copy = self._int_array_values.copy()
for i, c in zip(self._int_array_values, copy):
self.assertEqual(i, c)
copy[1] = 2.1
self.assertNotEqual(copy[1], self._int_array_values[1])
copy = self._int_tensor_values.copy()
for i, c in zip(self._int_tensor_values, copy):
self.assertEqual(i, c)
copy[1] = 2.1
self.assertNotEqual(copy[1], self._int_tensor_values[1])
def test_conversion(self):
float_list_val = [1.1, 2.2, 3.3]
tensor_val = torch.tensor([1.1, 2.2, 3.3], dtype=torch.double)
array_val = np.array([1.1, 2.2, 3.3], dtype=np.float64)
self.assertTrue(
torch.equal(
Values.to_tensor(float_list_val, dtype=torch.double), tensor_val
)
)
self.assertTrue(
torch.equal(Values.to_tensor(tensor_val, dtype=torch.double), tensor_val)
)
self.assertTrue(
torch.equal(Values.to_tensor(array_val, dtype=torch.double), tensor_val)
)
self.assertTrue(np.array_equal(Values.to_ndarray(float_list_val), array_val))
self.assertTrue(
np.array_equal(Values.to_ndarray(tensor_val, dtype=np.float64), array_val)
)
self.assertTrue(np.array_equal(Values.to_ndarray(array_val), array_val))
self.assertEqual(Values.to_sequence(float_list_val), float_list_val)
self.assertEqual(Values.to_sequence(tensor_val), float_list_val)
self.assertEqual(Values.to_sequence(array_val), float_list_val)
class TestDistribution(unittest.TestCase):
class TestIntKeyDistribution(Distribution[int]):
def _new_key(self, k: int):
return k
def setUp(self) -> None:
self._tensor_distribution = TestDistribution.TestIntKeyDistribution(
torch.tensor([1.0, 2.0, 3.0, 4.0])
)
self._array_distribution = TestDistribution.TestIntKeyDistribution(
np.array([1.0, 2.0, 3.0, 4.0])
)
self._list_distribution = TestDistribution.TestIntKeyDistribution(
[1.0, 2.0, 3.0, 4.0]
)
self._map_distribution = TestDistribution.TestIntKeyDistribution(
{0: 1.0, 1: 2.0, 2: 3.0, 3: 4.0}
)
def test_values(self):
self.assertTrue(
torch.equal(
self._tensor_distribution.values, torch.tensor([0.1, 0.2, 0.3, 0.4])
)
)
self.assertTrue(
np.array_equal(
self._array_distribution.values, np.array([0.1, 0.2, 0.3, 0.4])
)
)
self.assertEqual(self._list_distribution.values, [0.1, 0.2, 0.3, 0.4])
self.assertTrue(self._map_distribution.values, [0.1, 0.2, 0.3, 0.4])
def _test_sample(self, distribution: Distribution):
counts = [0] * 4
total = 100000
for _ in range(total):
counts[distribution.sample()[0]] += 1
self.assertAlmostEqual(counts[0] / total, 0.1, places=2)
self.assertAlmostEqual(counts[1] / total, 0.2, places=2)
self.assertAlmostEqual(counts[2] / total, 0.3, places=2)
self.assertAlmostEqual(counts[3] / total, 0.4, places=2)
def test_sample(self):
self._test_sample(self._tensor_distribution)
self.assertEqual(self._tensor_distribution.greedy(4), [3, 2, 1, 0])
self._test_sample(self._array_distribution)
self.assertEqual(self._array_distribution.greedy(4), [3, 2, 1, 0])
self._test_sample(self._list_distribution)
self.assertEqual(self._list_distribution.greedy(4), [3, 2, 1, 0])
self._test_sample(self._map_distribution)
self.assertEqual(self._map_distribution.greedy(4), [3, 2, 1, 0])
if __name__ == "__main__":
np.random.seed(1234)
torch.random.manual_seed(1234)
unittest.main()
| 13,486 | 38.667647 | 86 | py |
ReAgent | ReAgent-master/reagent/ope/test/unit_tests/test_contextual_bandit_estimators.py | #!/usr/bin/env python3
import random
import unittest
import numpy as np
import torch
from reagent.ope.estimators.contextual_bandits_estimators import (
Action,
ActionDistribution,
ActionSpace,
BanditsEstimatorInput,
DMEstimator,
DoublyRobustEstimator,
IPSEstimator,
LogSample,
ModelOutputs,
SwitchDREstimator,
SwitchEstimator,
)
class TestSwitchEstimators(unittest.TestCase):
"""
These unit tests verify basic properties of the Switch estimators, in that
when the threshold is low, the model-based DM estimator is used and when the
threshold is high, the propensity score estimator is used.
"""
NUM_ACTIONS = 2
DR_EPSILON = 0.05
def setUp(self) -> None:
random.seed(0)
torch.random.manual_seed(0)
np.random.seed(0)
self.action_space = ActionSpace(TestSwitchEstimators.NUM_ACTIONS)
self.sample1 = LogSample(
context=0,
log_action=Action(0),
log_reward=1.0,
log_action_probabilities=ActionDistribution(torch.tensor([0.7, 0.3])),
tgt_action_probabilities=ActionDistribution([0.6, 0.4]),
tgt_action=Action(1),
model_outputs=ModelOutputs(0.5, [0.4, 0.5]),
)
self.sample2 = LogSample(
context=0,
log_action=Action(1),
log_reward=0.0,
log_action_probabilities=ActionDistribution([0.5, 0.5]),
tgt_action_probabilities=ActionDistribution([0.7, 0.3]),
tgt_action=Action(0),
model_outputs=ModelOutputs(0.0, [0.0, 0.0]),
)
self.bandit_input = BanditsEstimatorInput(
self.action_space, [self.sample1, self.sample2], True
)
SwitchEstimator.EXP_BASE = 1.5
SwitchEstimator.CANDIDATES = 21
def test_switch_equal_to_ips(self):
"""
Switch with tau set at the max value should be equal to IPS
"""
# Setting the base to 1 will cause all candidates to be the maximum threshold
SwitchEstimator.EXP_BASE = 1
switch = SwitchEstimator(rmax=1.0).evaluate(self.bandit_input)
ips = IPSEstimator().evaluate(self.bandit_input)
self.assertAlmostEqual(ips.estimated_reward, switch.estimated_reward)
def test_switch_dr_equal_to_dr(self):
"""
Switch-DR with tau set at the max value should be equal to DR
"""
# Setting the base to 1 will cause all candidates to be the maximum threshold
SwitchEstimator.EXP_BASE = 1
switch = SwitchDREstimator(rmax=1.0).evaluate(self.bandit_input)
dr = DoublyRobustEstimator().evaluate(self.bandit_input)
self.assertAlmostEqual(
dr.estimated_reward,
switch.estimated_reward,
delta=TestSwitchEstimators.DR_EPSILON,
)
def test_switch_equal_to_dm(self):
"""
Switch with tau set at the min value should be equal to DM
"""
# Setting candidates to 0 will default to tau being the minimum threshold
SwitchEstimator.CANDIDATES = 0
switch = SwitchEstimator(rmax=1.0).evaluate(self.bandit_input)
dm = DMEstimator().evaluate(self.bandit_input)
self.assertAlmostEqual(dm.estimated_reward, switch.estimated_reward)
def test_switch_dr_equal_to_dm(self):
"""
Switch-DR with tau set at the min value should be equal to DM
"""
# Setting candidates to 0 will default to tau being the minimum threshold
SwitchEstimator.CANDIDATES = 0
switch = SwitchDREstimator(rmax=1.0).evaluate(self.bandit_input)
dm = DMEstimator().evaluate(self.bandit_input)
self.assertAlmostEqual(dm.estimated_reward, switch.estimated_reward)
| 3,772 | 34.933333 | 85 | py |
ReAgent | ReAgent-master/reagent/ope/test/unit_tests/test_utils.py | #!/usr/bin/env python3
import unittest
import numpy as np
import torch
from reagent.ope.utils import Clamper, RunningAverage
class TestUtils(unittest.TestCase):
def test_running_average(self):
ra = RunningAverage()
ra.add(1.0).add(2.0).add(3.0).add(4.0)
self.assertEqual(ra.count, 4)
self.assertEqual(ra.average, 2.5)
self.assertEqual(ra.total, 10.0)
def test_clamper(self):
with self.assertRaises(ValueError):
clamper = Clamper(1.0, 0.0)
list_value = [-1.1, 0.9, 0.0, 1.1, -0.9]
tensor_value = torch.tensor(list_value)
array_value = np.array(list_value)
clamper = Clamper()
self.assertEqual(clamper(list_value), list_value)
self.assertTrue(torch.equal(clamper(tensor_value), tensor_value))
self.assertTrue(np.array_equal(clamper(array_value), array_value))
clamper = Clamper(-1.0, 1.0)
self.assertEqual(clamper(list_value), [-1.0, 0.9, 0.0, 1.0, -0.9])
self.assertTrue(
torch.equal(
clamper(tensor_value), torch.tensor([-1.0, 0.9, 0.0, 1.0, -0.9])
)
)
self.assertTrue(
np.array_equal(clamper(array_value), np.array([-1.0, 0.9, 0.0, 1.0, -0.9]))
)
if __name__ == "__main__":
unittest.main()
| 1,326 | 30.595238 | 87 | py |
ReAgent | ReAgent-master/reagent/ope/datasets/logged_dataset.py | #!/usr/bin/env python3
from abc import ABC, abstractmethod
from dataclasses import dataclass
import torch
class BanditsDataset(ABC):
"""
Base class for logged, aka behavior, dataset
"""
@abstractmethod
def __len__(self) -> int:
"""
Returns:
length of the dataset
"""
pass
@abstractmethod
def __getitem__(self, idx) -> dataclass:
"""
Args:
idx: index of the sample
Returns:
tuple of features, action, and reward at idx
"""
pass
@property
@abstractmethod
def num_features(self) -> int:
"""
Returns:
number of features
"""
pass
@property
@abstractmethod
def num_actions(self) -> int:
"""
Returns:
number of total possible actions
"""
pass
@property
@abstractmethod
def features(self) -> torch.Tensor:
"""
Returns:
all features in the dataset as numpy array
"""
pass
@property
@abstractmethod
def actions(self) -> torch.Tensor:
"""
Returns:
all actions in the dataset as numpy array
"""
pass
@property
@abstractmethod
def rewards(self) -> torch.Tensor:
"""
Returns:
all rewards in the dataset as numpy array
"""
pass
| 1,441 | 17.727273 | 56 | py |
ReAgent | ReAgent-master/reagent/ope/trainers/linear_trainers.py | #!/usr/bin/env python3
import logging
import math
import time
from typing import Optional
import numpy as np
import torch
from reagent.ope.estimators.types import PredictResults, Trainer, TrainingData
from sklearn.linear_model import Lasso, LogisticRegression, SGDClassifier
from sklearn.metrics import accuracy_score, mean_squared_error
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from torch import Tensor
class LinearTrainer(Trainer):
def __init__(self, is_classifier: bool = False):
super().__init__()
self._is_classifier = is_classifier
def predict(self, x: Tensor, device=None) -> PredictResults:
if self._model is not None:
if hasattr(self._model, "predict_proba"):
proba = torch.as_tensor(
self._model.predict_proba(x), dtype=torch.float, device=device
)
score = (proba * torch.arange(proba.shape[1])).sum(dim=1)
return PredictResults(torch.argmax(proba, 1), score, proba)
elif hasattr(self._model, "predict"):
return PredictResults(
None,
torch.as_tensor(
self._model.predict(x), dtype=torch.float, device=device
),
None,
)
else:
raise AttributeError("model doesn't have predict_proba or predict")
else:
raise Exception("model not trained")
def _score(self, y_true: np.ndarray, y_pred: np.ndarray, weight=None) -> float:
if self._is_classifier:
return accuracy_score(y_true, y_pred, sample_weight=weight)
else:
return 1.0 / math.pow(
2,
mean_squared_error(y_true, y_pred, sample_weight=weight),
)
def score(self, x: Tensor, y: Tensor, weight: Optional[Tensor] = None) -> float:
y_pred = self._model.predict(x)
w = weight.numpy() if weight is not None else None
return self._score(y.numpy(), y_pred, weight=w)
class LassoTrainer(LinearTrainer):
@property
def name(self) -> str:
return "lasso"
def train(self, data: TrainingData, iterations: int = 1, num_samples: int = 0):
logging.info("LassoTrainer.train...")
self._model = None
best_score = float("-inf")
for _ in range(iterations):
x, y, _ = super()._sample(
data.train_x, data.train_y, data.train_weight, num_samples, True
)
sx, sy, ssw = super()._sample(
data.validation_x, data.validation_y, data.validation_weight
)
for alpha in np.logspace(-4, 2, num=7, base=10):
model = Lasso(
alpha=alpha,
fit_intercept=False,
copy_X=True,
max_iter=10000,
warm_start=False,
selection="random",
)
model.fit(x, y)
y_pred = model.predict(sx)
score = self._score(sy, y_pred, weight=ssw)
# score = model.score(sx, sy, ssw)
logging.info(f" alpha: {alpha}, score: {score}")
if score > best_score:
best_score = score
self._model = model
class DecisionTreeTrainer(LinearTrainer):
@property
def name(self) -> str:
return "decision_tree"
def train(self, data: TrainingData, iterations: int = 1, num_samples: int = 0):
logging.info("DecisionTreeTrainer.train...")
self._model = None
best_score = float("-inf")
for _ in range(iterations):
x, y, sw = super()._sample(
data.train_x, data.train_y, data.train_weight, num_samples, True
)
sx, sy, ssw = super()._sample(
data.validation_x, data.validation_y, data.validation_weight
)
if self._model is None:
self._model = DecisionTreeRegressor(
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=4,
min_samples_leaf=4,
)
self._model.fit(x, y, sw)
y_pred = self._model.predict(sx)
best_score = self._score(sy, y_pred, weight=ssw)
logging.info(f" max_depth: None, score: {best_score}")
for depth in range(3, 21, 3):
model = DecisionTreeRegressor(
criterion="mse",
splitter="random",
max_depth=depth,
min_samples_split=4,
min_samples_leaf=4,
)
model.fit(x, y, sw)
y_pred = model.predict(sx)
score = self._score(sy, y_pred, weight=ssw)
# score = model.score(sx, sy, ssw)
logging.info(f" max_depth: {depth}, score: {score}")
if score > best_score:
best_score = score
self._model = model
class DecisionTreeClassifierTrainer(LinearTrainer):
def __init__(self):
super().__init__(True)
@property
def name(self) -> str:
return "decision_tree_classifier"
def train(self, data: TrainingData, iterations: int = 1, num_samples: int = 0):
logging.info("DecisionTreeClassifierTrainer.train...")
self._model = None
best_score = float("-inf")
for _ in range(iterations):
x, y, sw = super()._sample(
data.train_x, data.train_y, data.train_weight, num_samples, True
)
sx, sy, ssw = super()._sample(
data.validation_x, data.validation_y, data.validation_weight
)
for depth in range(3, 21, 3):
model = DecisionTreeClassifier(
criterion="entropy",
splitter="random",
max_depth=depth,
min_samples_split=4,
min_samples_leaf=4,
)
model.fit(x, y, sw)
score = model.score(sx, sy, ssw)
logging.info(f" max_depth: {depth}, score: {score}")
if score > best_score:
best_score = score
self._model = model
class LogisticRegressionTrainer(LinearTrainer):
def __init__(self, solver: str = "lbfgs"):
super().__init__(True)
self._solver = solver
@property
def name(self) -> str:
return "logistic_regression"
def train(self, data: TrainingData, iterations: int = 1, num_samples: int = 0):
logging.info("LogisticRegressionTrainer.train...")
self._model = None
best_score = float("-inf")
for _ in range(iterations):
x, y, sw = super()._sample(
data.train_x, data.train_y, data.train_weight, num_samples, True
)
sx, sy, ssw = super()._sample(
data.validation_x, data.validation_y, data.validation_weight
)
for c in np.logspace(-5, 4, num=10, base=10):
model = LogisticRegression(
C=c,
fit_intercept=False,
solver=self._solver,
max_iter=1000,
multi_class="auto",
n_jobs=-1,
)
model.fit(x, y, sw)
score = model.score(sx, sy, ssw)
logging.info(f" C: {c}, score: {score}")
if score > best_score:
best_score = score
self._model = model
class SGDClassifierTrainer(LinearTrainer):
def __init__(self, loss: str = "log", max_iter: int = 1000):
super().__init__(True)
self._loss = loss
self._max_iter = max_iter
@property
def name(self) -> str:
return "sgd_classifier"
def train(self, data: TrainingData, iterations: int = 1, num_samples: int = 0):
logging.info("SGDClassifierTrainer.train...")
self._model = None
best_score = float("-inf")
for _ in range(iterations):
x, y, _ = super()._sample(
data.train_x, data.train_y, data.train_weight, num_samples, True
)
sx, sy, ssw = super()._sample(
data.validation_x, data.validation_y, data.validation_weight
)
for alpha in np.logspace(-8, -1, num=8, base=10):
model = SGDClassifier(
loss=self._loss,
alpha=alpha,
random_state=0,
max_iter=self._max_iter,
)
model.fit(x, y)
score = model.score(sx, sy, ssw)
logging.info(f" alpha: {alpha}, score: {score}")
if score > best_score:
best_score = score
self._model = model
class LinearNet(torch.nn.Module):
def __init__(
self,
D_in: int,
H: int,
D_out: int,
hidden_layers: int = 2,
activation=torch.nn.ReLU,
):
super(LinearNet, self).__init__()
self._hidden_dim = H
self._hidden_layers = hidden_layers
self._activation = activation
self._out_dim = D_out
self.layers = []
dim = D_in
for _ in range(self._hidden_layers):
self.layers.append(torch.nn.Linear(dim, self._hidden_dim))
self.layers.append(self._activation())
dim = self._hidden_dim
self.layers.append(torch.nn.Linear(dim, self._out_dim))
self.model = torch.nn.Sequential(*self.layers)
def forward(self, x: torch.Tensor):
x = x.requires_grad_(True)
return self.model(x)
class NNTrainer(Trainer):
def __init__(self, device=None):
super().__init__()
self._device = device
self._loss_fn: Optional[torch.nn.MSELoss] = None
@property
def name(self) -> str:
return "linear_net"
def train(
self,
data: TrainingData,
iterations: int = 100,
epochs: int = 1,
num_samples: int = 0,
):
d_in, d_out = (
data.train_x.shape[1],
data.train_y.shape[1] if len(data.train_y.shape) > 1 else 1,
)
if d_in == 0 or d_out == 0:
return None
h = 500
n = data.train_x.shape[0] // 200
logging.info(f"start training...")
logging.info(f" d_in = {d_in}, h = {h}, d_out = {d_out}, n = {n}")
st = time.process_time()
self._model = LinearNet(d_in, h, d_out)
if self._device is not None and self._device.type == "cuda":
self._model = self._model.cuda()
self._loss_fn = torch.nn.MSELoss(reduction="mean")
learning_rate = 1e-3
optimizer = torch.optim.Adam(self._model.parameters(), lr=learning_rate)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, "min", patience=5, verbose=True, threshold=1e-5
)
for _ in range(epochs):
for t in range(iterations):
x, y, _ = super()._sample(
data.train_x, data.train_y, data.train_weight, num_samples, True
)
x = torch.as_tensor(x, device=self._device)
y = torch.as_tensor(y, device=self._device)
if len(y.shape) == 1:
y = y.reshape(-1, 1)
y_pred = self._model(x)
# pyre-fixme[29]: `Optional[torch.nn.MSELoss]` is not a function.
loss = self._loss_fn(y_pred, y)
if (t + 1) % 10 == 0:
scheduler.step(loss.item())
logging.info(f" step [{t + 1}]: loss={loss.item()}")
optimizer.zero_grad()
loss.backward()
optimizer.step()
logging.info(f" training time {time.process_time() - st}")
def predict(self, x: Tensor, device=None) -> PredictResults:
if self._model is not None:
self._model.eval()
proba = torch.as_tensor(self._model(x), dtype=torch.float, device=device)
return PredictResults(torch.argmax(proba, 1), proba)
else:
raise Exception("mode not trained")
def score(self, x: Tensor, y: Tensor, weight: Optional[Tensor] = None) -> float:
if self._loss_fn is not None:
return self._loss_fn(y, x).item()
else:
raise Exception("mode not trained")
| 12,799 | 35.571429 | 85 | py |
ReAgent | ReAgent-master/reagent/ope/trainers/rl_tabular_trainers.py | #!/usr/bin/env python3
import pickle
from functools import reduce
from typing import List, Mapping, Sequence
import torch
from reagent.ope.estimators.sequential_estimators import (
Model,
RLPolicy,
State,
ValueFunction,
)
from reagent.ope.estimators.types import Action, ActionDistribution, ActionSpace
from reagent.ope.test.envs import Environment, PolicyLogGenerator
from reagent.ope.utils import RunningAverage
class TabularPolicy(RLPolicy):
def __init__(self, action_space: ActionSpace, epsilon: float = 0.0, device=None):
super().__init__(action_space, device)
self._epsilon = epsilon
as_size = len(action_space)
self._exploitation_prob = 1.0 - epsilon
self._exploration_prob = epsilon / len(action_space)
self._uniform_probs: List[float] = as_size * [1.0 / as_size]
self._state_space = {}
def update(self, state: State, actions: Sequence[float]) -> float:
old_dist = self._uniform_probs
if state in self._state_space:
old_dist = self._state_space[state]
self._state_space[state] = actions
return (
reduce(
lambda a, b: a + b,
map(lambda a: (a[0] - a[1]) ** 2, zip(old_dist, actions)),
)
** 0.5
)
def action_dist(self, state: State) -> ActionDistribution:
if state in self._state_space:
actions = self._state_space[state]
probs = list(
map(
lambda p: p * self._exploitation_prob + self._exploration_prob,
actions,
)
)
else:
probs = self._uniform_probs
return self._action_space.distribution(probs)
def save(self, path) -> bool:
try:
with open(path, "wb") as f:
pickle.dump((self._action_space, self._epsilon, self._state_space), f)
except Exception:
return False
else:
return True
def load(self, path) -> bool:
try:
with open(path, "rb") as f:
self._action_space, self._epsilon, self._state_space = pickle.load(f)
except Exception:
return False
else:
return True
class TabularValueFunction(ValueFunction):
def __init__(self, policy: RLPolicy, model: Model, gamma=0.99):
self._policy = policy
self._model = model
self._gamma = gamma
self._state_values = {}
def _state_value(self, state: State) -> float:
return (
0.0
if (state is None or state.is_terminal)
else (0.0 if state not in self._state_values else self._state_values[state])
)
def state_action_value(self, state: State, action: Action) -> float:
value = 0.0
sr_dist = self._model.next_state_reward_dist(state, action)
for _, rp in sr_dist.items():
value += rp.prob * (rp.reward + self._gamma * self.state_value(state))
return value
def state_value(self, state: State) -> float:
pass
def reset(self, clear_state_values: bool = False):
pass
class EstimatedStateValueFunction(ValueFunction):
def __init__(
self, policy: RLPolicy, env: Environment, gamma: float, num_episodes: int = 100
):
self._policy = policy
self._env = env
self._gamma = gamma
self._num_episodes = num_episodes
self._state_values = {}
self._estimate_value()
def _estimate_value(self):
tgt_generator = PolicyLogGenerator(self._env, self._policy)
log = {}
for state in self._env.states:
mdps = []
for _ in range(self._num_episodes):
mdps.append(tgt_generator.generate_log(state))
log[state] = mdps
for state, mdps in log.items():
avg = RunningAverage()
for mdp in mdps:
discount = 1.0
r = 0.0
for t in mdp:
r += discount * t.reward
discount *= self._gamma
avg.add(r)
self._state_values[state] = avg.average
def state_action_value(self, state: State, action: Action) -> float:
return 0.0
def state_value(self, state: State) -> float:
return self._state_values[state]
def reset(self):
self._state_values = {}
class DPValueFunction(TabularValueFunction):
def __init__(
self,
policy: RLPolicy,
env: Environment,
gamma: float = 0.99,
threshold: float = 0.0001,
):
super().__init__(policy, env, gamma)
self._env = env
self._threshold = threshold
self._evaluated = False
def state_value(self, state: State, horizon: int = -1) -> float:
if not self._evaluated:
self._evaluate()
return self._state_value(state)
def reset(self, clear_state_values: bool = False):
self._evaluated = False
if clear_state_values:
self._state_values.clear()
def _evaluate(self):
delta = float("inf")
while delta >= self._threshold:
delta = 0.0
for state in self._env.states:
old_value = self._state_value(state)
new_value = 0.0
a_dist = self._policy(state)
for a, ap in a_dist:
s_dist = self._model(state, a)
a_value = 0.0
for s, rp in s_dist.items():
a_value += rp.prob * (
rp.reward + self._gamma * self._state_value(s)
)
new_value += ap * a_value
delta = max(delta, abs(old_value - new_value))
self._state_values[state] = new_value
self._evaluated = True
class DPTrainer(object):
def __init__(self, env: Environment, policy: TabularPolicy):
self._env = env
self._policy = policy
@staticmethod
def _state_value(state: State, state_values: Mapping[State, float]) -> float:
return 0.0 if state not in state_values else state_values[state]
def train(self, gamma: float = 0.9, threshold: float = 0.0001):
stable = False
valfunc = DPValueFunction(self._policy, self._env, gamma, threshold)
while not stable:
stable = True
for state in self._env.states:
new_actions = []
max_value = float("-inf")
for action in self._policy.action_space:
s_dist = self._env(state, action)
value = 0.0
for s, rp in s_dist.items():
value += rp.prob * rp.reward
if s is not None:
value += rp.prob * gamma * valfunc(s)
if value > max_value:
max_value = value
new_actions = [action]
elif value == max_value:
new_actions.append(action)
prob = 1.0 / len(new_actions)
actions = [0.0] * len(self._policy.action_space)
for a in new_actions:
actions[a.value] = prob
if self._policy.update(state, actions) >= 1.0e-6:
stable = False
valfunc.reset()
return valfunc
class MonteCarloValueFunction(TabularValueFunction):
def __init__(
self,
policy: RLPolicy,
env: Environment,
gamma: float = 0.99,
first_visit: bool = True,
count_threshold: int = 100,
max_iteration: int = 200,
):
super().__init__(policy, env, gamma)
self._env = env
self._first_visit = first_visit
self._count_threshold = count_threshold
self._max_iteration = max_iteration
self._log_generator = PolicyLogGenerator(env, policy)
self._state_counts = {}
def _state_value(self, state: State):
i = 0
state_count = self._state_counts[state] if state in self._state_counts else 0
while state_count < self._count_threshold and i < self._max_iteration:
i += 1
mdp = self._log_generator.generate_log(state)
if self._first_visit:
state_counts = {}
for t in mdp:
if t.last_state is None:
continue
if t.last_state in state_counts:
state_counts[t.last_state] += 1
else:
state_counts[t.last_state] = 1
g = 0
for t in reversed(mdp):
if t.last_state is None:
continue
g = self._gamma * g + t.reward
counts = state_counts[t.last_state]
if counts > 1:
self._update_state_value(t.last_state, g)
counts -= 1
if counts == 0:
del state_counts[t.last_state]
else:
state_counts[t.last_state] = counts
else:
g = 0
for t in reversed(mdp):
if t.last_state is None:
continue
g = self._gamma * g + t.reward
self._update_state_value(t.last_state, g)
state_count = (
self._state_counts[state] if state in self._state_counts else 0
)
return super()._state_value(state)
def _update_state_value(self, state: State, g: float):
sv = super()._state_value(state)
sc = self._state_counts[state] if state in self._state_counts else 0
sc += 1
sv = sv + (g - sv) / sc
self._state_values[state] = sv
self._state_counts[state] = sc
def state_value(self, state: State) -> float:
return self._state_value(state)
def reset(self, clear_state_values: bool = False):
if clear_state_values:
self._state_values.clear()
self._state_counts.clear()
class MonteCarloTrainer(object):
def __init__(self, env: Environment, policy: TabularPolicy):
self._env = env
self._policy = policy
self._log_generator = PolicyLogGenerator(env, policy)
def train(
self,
iterations: int,
gamma: float = 0.9,
first_visit: bool = True,
update_interval: int = 20,
):
i = 0
value_counts = {}
while i < iterations:
i += 1
for state in self._env.states:
mdp = self._log_generator.generate_log(state)
if first_visit:
vcounts = {}
for t in mdp:
if t.last_state is None or t.action is None:
continue
key = (t.last_state, t.action)
if key in vcounts:
vcounts[key] += 1
else:
vcounts[key] = 1
g = 0
for t in reversed(mdp):
if t.last_state is None or t.action is None:
continue
g = gamma * g + t.reward
key = (t.last_state, t.action)
vc = vcounts[key]
if vc > 1:
self._update_state_value(
value_counts, t.last_state, t.action, g
)
vc -= 1
if vc == 0:
del vcounts[key]
else:
vcounts[key] = vc
else:
g = 0
for t in reversed(mdp):
if t.last_state is None or t.action is None:
continue
g = gamma * g + t.reward
self._update_state_value(
value_counts, t.last_state, t.action, g
)
if i % update_interval == 0 and self._update_policy(value_counts):
break
def _update_state_value(self, value_counts, state, action, g: float):
key = (state, action)
sv, sc = value_counts[key] if key in value_counts else (0.0, 0)
sc += 1
sv = sv + (g - sv) / sc
value_counts[key] = (sv, sc)
def _update_policy(self, value_counts) -> bool:
stable = True
for state in self._env.states:
probs = []
for a in self._policy.action_space:
key = (state, a)
if key not in value_counts:
probs.append(0.0)
else:
v, c = value_counts[key]
probs.append(v * c)
probs = torch.nn.functional.softmax(torch.tensor(probs), dim=0).tolist()
if self._policy.update(state, probs) >= 1.0e-6:
stable = False
return stable
| 13,359 | 34.157895 | 88 | py |
ReAgent | ReAgent-master/reagent/mab/ucb.py | import math
from abc import ABC, abstractmethod
from typing import Union, Optional, List
import torch
from torch import Tensor
def _get_arm_indices(
ids_of_all_arms: List[Union[str, int]], ids_of_arms_in_batch: List[Union[str, int]]
) -> List[int]:
arm_idxs = []
for i in ids_of_arms_in_batch:
try:
arm_idxs.append(ids_of_all_arms.index(i))
except ValueError:
raise ValueError(f"Unknown arm_id {i}. Known arm ids: {ids_of_all_arms}")
return arm_idxs
def _place_values_at_indices(values: Tensor, idxs: List[int], total_len: int) -> Tensor:
"""
TODO: maybe replace with sparse vector function?
Args:
values (Tensor): The values
idxs (List[int]): The indices at which the values have to be placed
total_len (int): Length of the array
"""
assert len(values) == len(idxs)
ret = torch.zeros(total_len)
ret[idxs] = values
return ret
class BaseUCB(torch.nn.Module, ABC):
"""
Base class for UCB-like Multi-Armed Bandits (MAB)
"""
def __init__(
self,
*,
n_arms: Optional[int] = None,
arm_ids: Optional[List[Union[str, int]]] = None,
):
super().__init__()
if n_arms is not None:
self.arm_ids = list(range(n_arms))
self.n_arms = n_arms
if arm_ids is not None:
self.arm_ids = arm_ids
self.n_arms = len(arm_ids)
self.total_n_obs_all_arms = 0
self.total_n_obs_per_arm = torch.zeros(self.n_arms)
self.total_sum_reward_per_arm = torch.zeros(self.n_arms)
def add_batch_observations(
self,
n_obs_per_arm: Tensor,
sum_reward_per_arm: Tensor,
arm_ids: Optional[List[Union[str, int]]] = None,
):
if not isinstance(n_obs_per_arm, Tensor):
n_obs_per_arm = torch.tensor(n_obs_per_arm, dtype=torch.float)
if not isinstance(sum_reward_per_arm, Tensor):
sum_reward_per_arm = torch.tensor(sum_reward_per_arm, dtype=torch.float)
if arm_ids is None or arm_ids == self.arm_ids:
# assume that the observations are for all arms in the default order
arm_ids = self.arm_ids
arm_idxs = list(range(self.n_arms))
else:
assert len(arm_ids) == len(
set(arm_ids)
) # make sure no duplicates in arm IDs
# get the indices of the arms
arm_idxs = _get_arm_indices(self.arm_ids, arm_ids)
# put elements from the batch in the positions specified by `arm_ids` (missing arms will be zero)
n_obs_per_arm = _place_values_at_indices(
n_obs_per_arm, arm_idxs, self.n_arms
)
sum_reward_per_arm = _place_values_at_indices(
sum_reward_per_arm, arm_idxs, self.n_arms
)
self.total_n_obs_per_arm += n_obs_per_arm
self.total_sum_reward_per_arm += sum_reward_per_arm
self.total_n_obs_all_arms += int(n_obs_per_arm.sum())
def add_single_observation(self, arm_id: int, reward: float):
assert arm_id in self.arm_ids
arm_idx = self.arm_ids.index(arm_id)
self.total_n_obs_per_arm[arm_idx] += 1
self.total_sum_reward_per_arm[arm_idx] += reward
self.total_n_obs_all_arms += 1
def get_avg_reward_values(self) -> Tensor:
return self.total_sum_reward_per_arm / self.total_n_obs_per_arm
def get_action(self) -> Union[str, int]:
"""
Get the id of the action chosen by the UCB algorithm
Returns:
int: The integer ID of the chosen action
"""
ucb_scores = self.get_ucb_scores()
return self.arm_ids[torch.argmax(ucb_scores)]
@classmethod
def get_ucb_scores_from_batch(
cls,
n_obs_per_arm: Tensor,
sum_reward_per_arm: Tensor,
*args,
**kwargs,
) -> Tensor:
"""
A utility method used to create the bandit, feed in a batch of observations and get the UCB scores in one function call
Args:
n_obs_per_arm (Tensor): An array of counts of per-arm numbers of observations
sum_reward_per_arm (Tensor): An array of sums of rewards for each arm
(additional arguments can be provided for specific concrete class implementations)
Returns:
Tensor: Array of per-arm UCB scores
"""
n_arms = len(n_obs_per_arm)
b = cls(n_arms=n_arms)
b.add_batch_observations(n_obs_per_arm, sum_reward_per_arm, *args, **kwargs)
return b.get_ucb_scores()
@abstractmethod
def get_ucb_scores(self):
pass
def __repr__(self):
t = ", ".join(
f"{v:.3f} ({int(n)})"
for v, n in zip(self.get_avg_reward_values(), self.total_n_obs_per_arm)
)
return f"UCB({self.n_arms} arms; {t}"
def forward(self):
return self.get_ucb_scores()
class UCB1(BaseUCB):
"""
Canonical implementation of UCB1
Reference: https://www.cs.bham.ac.uk/internal/courses/robotics/lectures/ucb1.pdf
"""
def get_ucb_scores(self):
"""
Get per-arm UCB scores. The formula is
UCB_i = AVG([rewards_i]) + SQRT(2*LN(T)/N_i)
Returns:
Tensor: An array of UCB scores (one per arm)
"""
avg_rewards = self.get_avg_reward_values()
log_t_over_ni = (
math.log(self.total_n_obs_all_arms + 1) / self.total_n_obs_per_arm
)
ucb = avg_rewards + torch.sqrt(2 * log_t_over_ni)
return torch.where(
self.total_n_obs_per_arm > 0,
ucb,
torch.tensor(torch.inf, dtype=torch.float),
)
class UCBTuned(BaseUCB):
"""
Implementation of the UCB-Tuned algorithm from Section 4 of https://link.springer.com/content/pdf/10.1023/A:1013689704352.pdf
Biggest difference from basic UCB is that per-arm reward variance is estimated.
"""
# _fields_for_saving = BaseUCB._fields_for_saving + [
# "total_sum_reward_squared_per_arm"
# ]
def __init__(
self,
n_arms: Optional[int] = None,
arm_ids: Optional[List[Union[str, int]]] = None,
):
super(UCBTuned, self).__init__(n_arms=n_arms, arm_ids=arm_ids)
self.total_sum_reward_squared_per_arm = torch.zeros(self.n_arms)
def add_batch_observations(
self,
n_obs_per_arm: Tensor,
sum_reward_per_arm: Tensor,
sum_reward_squared_per_arm: Tensor,
arm_ids: Optional[List[Union[str, int]]] = None,
):
"""
Add information about arm rewards in a batched form.
Args:
n_obs_per_arm (Tensor): An array of counts of per-arm numbers of observations
sum_reward_per_arm (Tensor): An array of sums of rewards for each arm
sum_reward_squared_per_arm (Tensor): An array of sums of squares of rewards for each arm
arm_ids (Optional[List[Union[str, int]]]): A list of ids of arms in the same order as the elements of previous arrays
"""
assert len(sum_reward_per_arm) == len(sum_reward_squared_per_arm)
super().add_batch_observations(
n_obs_per_arm, sum_reward_per_arm, arm_ids=arm_ids
)
if not isinstance(sum_reward_per_arm, Tensor):
sum_reward_squared_per_arm = torch.tensor(
sum_reward_squared_per_arm, dtype=torch.float
)
if arm_ids is None or arm_ids == self.arm_ids:
# assume that the observations are for all arms in the default order
arm_ids = self.arm_ids
arm_idxs = list(range(self.n_arms))
else:
assert len(arm_ids) == len(
set(arm_ids)
) # make sure no duplicates in arm IDs
# get the indices of the arms
arm_idxs = _get_arm_indices(self.arm_ids, arm_ids)
# put elements from the batch in the positions specified by `arm_ids` (missing arms will be zero)
sum_reward_squared_per_arm = _place_values_at_indices(
sum_reward_squared_per_arm, arm_idxs, self.n_arms
)
self.total_sum_reward_squared_per_arm += sum_reward_squared_per_arm
def add_single_observation(self, arm_id: int, reward: float):
"""
Add a single observation (arm played, reward) to the bandit
Args:
arm_id (int): Which arm was played
reward (float): Reward renerated by the arm
"""
super().add_single_observation(arm_id, reward)
arm_idx = self.arm_ids.index(arm_id)
self.total_sum_reward_squared_per_arm[arm_idx] += reward ** 2
def get_ucb_scores(self) -> Tensor:
"""
Get per-arm UCB scores. The formula is
UCB_i = AVG([rewards_i]) + SQRT(LN(T)/N_i * V_i)
where V_i is a conservative variance estimate of arm i:
V_i = AVG([rewards_i**2]) - AVG([rewards_i])**2 + sqrt(2ln(t) / n_i)
Nore that we don't apply the min(1/4, ...) operator to the variance because this bandit is meant for non-Bernoulli applications as well
Returns:
Tensor: An array of UCB scores (one per arm)
"""
avg_rewards = self.get_avg_reward_values()
log_t_over_ni = (
math.log(self.total_n_obs_all_arms + 1) / self.total_n_obs_per_arm
)
per_arm_var_est = (
self.total_sum_reward_squared_per_arm / self.total_n_obs_per_arm
- avg_rewards ** 2
+ torch.sqrt(
2 * log_t_over_ni
) # additional term to make the estimate conservative (unlikely to underestimate)
)
ucb = avg_rewards + torch.sqrt(log_t_over_ni * per_arm_var_est)
return torch.where(
self.total_n_obs_per_arm > 0,
ucb,
torch.tensor(torch.inf, dtype=torch.float),
)
class UCBTunedBernoulli(UCBTuned):
def add_batch_observations(
self,
n_obs_per_arm: Tensor,
num_success_per_arm: Tensor,
arm_ids: Optional[List[Union[str, int]]] = None,
):
"""
Add a batch of observations to the UCBTuned bandit, assuming Bernoulli distribution of rewards.
Because of the Bernoulli assumption, we don't need to provide the squared rewards separately
Args:
n_obs_per_arm (Tensor): An array of counts of per-arm numbers of observations
num_success_per_arm (Tensor): An array of counts of per-arm numbers of successes
"""
super().add_batch_observations(
n_obs_per_arm, num_success_per_arm, num_success_per_arm, arm_ids=arm_ids
)
class MetricUCB(BaseUCB):
"""
This is an improvement over UCB1 which uses a more precise confidence radius, especially for small expected rewards.
Reference: https://arxiv.org/pdf/0809.4882.pdf
"""
def get_ucb_scores(self):
"""
Get per-arm UCB scores. The formula is
UCB_i = AVG([rewards_i]) + SQRT(AVG([rewards_i]) * LN(T+1)/N_i) + LN(T+1)/N_i
Returns:
Tensor: An array of UCB scores (one per arm)
"""
avg_rewards = self.get_avg_reward_values()
log_t_over_ni = (
math.log(self.total_n_obs_all_arms + 1) / self.total_n_obs_per_arm
)
ucb = avg_rewards + torch.sqrt(avg_rewards * log_t_over_ni) + log_t_over_ni
return torch.where(
self.total_n_obs_per_arm > 0,
ucb,
torch.tensor(torch.inf, dtype=torch.float),
)
def get_bernoulli_tuned_ucb_scores(n_obs_per_arm, num_success_per_arm):
# a minimalistic function that implements Tuned UCB for Bernoulli bandit
avg_rewards = n_obs_per_arm / num_success_per_arm
log_t_over_ni = torch.log(torch.sum(n_obs_per_arm)) / num_success_per_arm
per_arm_var_est = (
avg_rewards
- avg_rewards ** 2
+ torch.sqrt(
2 * log_t_over_ni
) # additional term to make the estimate conservative (unlikely to underestimate)
)
return avg_rewards + torch.sqrt(log_t_over_ni * per_arm_var_est)
| 12,188 | 34.85 | 143 | py |
ReAgent | ReAgent-master/reagent/gym/utils.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import random
from typing import Dict, List, Optional
import gym
import numpy as np
import pandas as pd
import torch # @manual
import torch.nn.functional as F
from gym import spaces
from reagent.core.parameters import NormalizationData, NormalizationKey, ProblemDomain
from reagent.gym.agents.agent import Agent
from reagent.gym.agents.post_step import add_replay_buffer_post_step
from reagent.gym.envs import EnvWrapper
from reagent.gym.normalizers import (
only_continuous_normalizer,
discrete_action_normalizer,
only_continuous_action_normalizer,
)
from reagent.gym.policies.random_policies import make_random_policy_for_env
from reagent.gym.runners.gymrunner import run_episode
from reagent.replay_memory import ReplayBuffer
from tqdm import tqdm
logger = logging.getLogger(__name__)
SEED = 0
try:
from reagent.gym.envs import RecSim # noqa
HAS_RECSIM = True
except ImportError:
HAS_RECSIM = False
def fill_replay_buffer(
env, replay_buffer: ReplayBuffer, desired_size: int, agent: Agent
):
"""Fill replay buffer with transitions until size reaches desired_size."""
assert (
0 < desired_size and desired_size <= replay_buffer._replay_capacity
), f"It's not true that 0 < {desired_size} <= {replay_buffer._replay_capacity}."
assert replay_buffer.size < desired_size, (
f"Replay buffer already has {replay_buffer.size} elements. "
f"(more than desired_size = {desired_size})"
)
logger.info(
f" Starting to fill replay buffer using policy to size: {desired_size}."
)
post_step = add_replay_buffer_post_step(replay_buffer, env=env)
agent.post_transition_callback = post_step
max_episode_steps = env.max_steps
with tqdm(
total=desired_size - replay_buffer.size,
desc=f"Filling replay buffer from {replay_buffer.size} to size {desired_size}",
) as pbar:
mdp_id = 0
while replay_buffer.size < desired_size:
last_size = replay_buffer.size
max_steps = desired_size - replay_buffer.size
if max_episode_steps is not None:
max_steps = min(max_episode_steps, max_steps)
run_episode(env=env, agent=agent, mdp_id=mdp_id, max_steps=max_steps)
size_delta = replay_buffer.size - last_size
# The assertion below is commented out because it can't
# support input samples which has seq_len>1. This should be
# treated as a bug, and need to be fixed in the future.
# assert (
# size_delta >= 0
# ), f"size delta is {size_delta} which should be non-negative."
pbar.update(n=size_delta)
mdp_id += 1
if size_delta <= 0:
# replay buffer size isn't increasing... so stop early
break
if replay_buffer.size >= desired_size:
logger.info(f"Successfully filled replay buffer to size: {replay_buffer.size}!")
else:
logger.info(
f"Stopped early and filled replay buffer to size: {replay_buffer.size}."
)
def build_state_normalizer(env: EnvWrapper):
if isinstance(env.observation_space, spaces.Box):
assert (
len(env.observation_space.shape) == 1
), f"{env.observation_space.shape} has dim > 1, and is not supported."
return only_continuous_normalizer(
list(range(env.observation_space.shape[0])),
env.observation_space.low,
env.observation_space.high,
)
elif isinstance(env.observation_space, spaces.Dict):
# assuming env.observation_space is image
return None
else:
raise NotImplementedError(f"{env.observation_space} not supported")
def build_action_normalizer(env: EnvWrapper):
action_space = env.action_space
if isinstance(action_space, spaces.Discrete):
return discrete_action_normalizer(list(range(action_space.n)))
elif isinstance(action_space, spaces.Box):
assert (
len(action_space.shape) == 1
), f"Box action shape {action_space.shape} not supported."
action_dim = action_space.shape[0]
return only_continuous_action_normalizer(
list(range(action_dim)),
min_value=action_space.low,
max_value=action_space.high,
)
else:
raise NotImplementedError(f"{action_space} not supported.")
def build_normalizer(env: EnvWrapper) -> Dict[str, NormalizationData]:
try:
return env.normalization_data
except AttributeError:
# TODO: make this a property of EnvWrapper?
# pyre-fixme[16]: Module `envs` has no attribute `RecSim`.
if HAS_RECSIM and isinstance(env, RecSim):
return {
NormalizationKey.STATE: NormalizationData(
dense_normalization_parameters=only_continuous_normalizer(
list(range(env.observation_space["user"].shape[0]))
)
),
NormalizationKey.ITEM: NormalizationData(
dense_normalization_parameters=only_continuous_normalizer(
list(range(env.observation_space["doc"]["0"].shape[0]))
)
),
}
return {
NormalizationKey.STATE: NormalizationData(
dense_normalization_parameters=build_state_normalizer(env)
),
NormalizationKey.ACTION: NormalizationData(
dense_normalization_parameters=build_action_normalizer(env)
),
}
def create_df_from_replay_buffer(
env,
problem_domain: ProblemDomain,
desired_size: int,
multi_steps: Optional[int],
ds: str,
shuffle_df: bool = True,
) -> pd.DataFrame:
# fill the replay buffer
set_seed(env, SEED)
if multi_steps is None:
update_horizon = 1
return_as_timeline_format = False
else:
update_horizon = multi_steps
return_as_timeline_format = True
is_multi_steps = multi_steps is not None
# The last element of replay buffer always lacks
# next_action and next_possible_actions.
# To get full data for every returned sample, we create
# replay buffer of desired_size + 1 and discard the last element.
replay_buffer = ReplayBuffer(
replay_capacity=desired_size + 1,
batch_size=1,
update_horizon=update_horizon,
return_as_timeline_format=return_as_timeline_format,
)
random_policy = make_random_policy_for_env(env)
agent = Agent.create_for_env(env, policy=random_policy)
fill_replay_buffer(env, replay_buffer, desired_size + 1, agent)
batch = replay_buffer.sample_transition_batch(
batch_size=desired_size, indices=torch.arange(desired_size)
)
n = batch.state.shape[0]
logger.info(f"Creating df of size {n}.")
def discrete_feat_transform(elem) -> str:
"""query data expects str format"""
return str(elem.item())
def continuous_feat_transform(elem: List[float]) -> Dict[int, float]:
"""query data expects sparse format"""
assert isinstance(elem, torch.Tensor), f"{type(elem)} isn't tensor"
assert len(elem.shape) == 1, f"{elem.shape} isn't 1-dimensional"
return {i: s.item() for i, s in enumerate(elem)}
def make_parametric_feat_transform(one_hot_dim: int):
"""one-hot and then continuous_feat_transform"""
def transform(elem) -> Dict[int, float]:
elem_tensor = torch.tensor(elem.item())
one_hot_feat = F.one_hot(elem_tensor, one_hot_dim).float()
return continuous_feat_transform(one_hot_feat)
return transform
state_features = feature_transform(batch.state, continuous_feat_transform)
next_state_features = feature_transform(
batch.next_state,
continuous_feat_transform,
is_next_with_multi_steps=is_multi_steps,
)
if problem_domain == ProblemDomain.DISCRETE_ACTION:
# discrete action is str
action = feature_transform(batch.action, discrete_feat_transform)
next_action = feature_transform(
batch.next_action,
discrete_feat_transform,
is_next_with_multi_steps=is_multi_steps,
replace_when_terminal="",
terminal=batch.terminal,
)
elif problem_domain == ProblemDomain.PARAMETRIC_ACTION:
# continuous action is Dict[int, double]
assert isinstance(env.action_space, gym.spaces.Discrete)
parametric_feat_transform = make_parametric_feat_transform(env.action_space.n)
action = feature_transform(batch.action, parametric_feat_transform)
next_action = feature_transform(
batch.next_action,
parametric_feat_transform,
is_next_with_multi_steps=is_multi_steps,
replace_when_terminal={},
terminal=batch.terminal,
)
elif problem_domain == ProblemDomain.CONTINUOUS_ACTION:
action = feature_transform(batch.action, continuous_feat_transform)
next_action = feature_transform(
batch.next_action,
continuous_feat_transform,
is_next_with_multi_steps=is_multi_steps,
replace_when_terminal={},
terminal=batch.terminal,
)
elif problem_domain == ProblemDomain.MDN_RNN:
action = feature_transform(batch.action, discrete_feat_transform)
assert multi_steps is not None
next_action = feature_transform(
batch.next_action,
discrete_feat_transform,
is_next_with_multi_steps=True,
replace_when_terminal="",
terminal=batch.terminal,
)
else:
raise NotImplementedError(f"model type: {problem_domain}.")
if multi_steps is None:
time_diff = [1] * n
reward = batch.reward.squeeze(1).tolist()
metrics = [{"reward": r} for r in reward]
else:
time_diff = [[1] * len(ns) for ns in next_state_features]
reward = [reward_list.tolist() for reward_list in batch.reward]
metrics = [
[{"reward": r.item()} for r in reward_list] for reward_list in batch.reward
]
# TODO(T67265031): change this to int
mdp_id = [str(i.item()) for i in batch.mdp_id]
sequence_number = batch.sequence_number.squeeze(1).tolist()
# in the product data, all sequence_number_ordinal start from 1.
# So to be consistent with the product data.
sequence_number_ordinal = (batch.sequence_number.squeeze(1) + 1).tolist()
action_probability = batch.log_prob.exp().squeeze(1).tolist()
df_dict = {
"state_features": state_features,
"next_state_features": next_state_features,
"action": action,
"next_action": next_action,
"reward": reward,
"action_probability": action_probability,
"metrics": metrics,
"time_diff": time_diff,
"mdp_id": mdp_id,
"sequence_number": sequence_number,
"sequence_number_ordinal": sequence_number_ordinal,
"ds": [ds] * n,
}
if problem_domain == ProblemDomain.PARAMETRIC_ACTION:
# Possible actions are List[Dict[int, float]]
assert isinstance(env.action_space, gym.spaces.Discrete)
possible_actions = [{i: 1.0} for i in range(env.action_space.n)]
elif problem_domain == ProblemDomain.DISCRETE_ACTION:
# Possible actions are List[str]
assert isinstance(env.action_space, gym.spaces.Discrete)
possible_actions = [str(i) for i in range(env.action_space.n)]
elif problem_domain == ProblemDomain.MDN_RNN:
# Possible actions are List[str]
assert isinstance(env.action_space, gym.spaces.Discrete)
possible_actions = [str(i) for i in range(env.action_space.n)]
# these are fillers, which should have correct shape
pa_features = range(n)
pna_features = time_diff
if problem_domain in (
ProblemDomain.DISCRETE_ACTION,
ProblemDomain.PARAMETRIC_ACTION,
ProblemDomain.MDN_RNN,
):
def pa_transform(x):
return possible_actions
df_dict["possible_actions"] = feature_transform(pa_features, pa_transform)
df_dict["possible_next_actions"] = feature_transform(
pna_features,
pa_transform,
is_next_with_multi_steps=is_multi_steps,
replace_when_terminal=[],
terminal=batch.terminal,
)
df = pd.DataFrame(df_dict)
# validate df
validate_mdp_ids_seq_nums(df)
if shuffle_df:
# shuffling (sample the whole batch)
df = df.reindex(np.random.permutation(df.index))
return df
def set_seed(env: gym.Env, seed: int):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
env.seed(seed)
env.action_space.seed(seed)
def feature_transform(
features,
single_elem_transform,
is_next_with_multi_steps=False,
replace_when_terminal=None,
terminal=None,
):
"""feature_transform is a method on a single row.
We assume features is List[features] (batch of features).
This can also be called for next_features with multi_steps which we assume
to be List[List[features]]. First List is denoting that it's a batch,
second List is denoting that a single row consists of a list of features.
"""
if is_next_with_multi_steps:
if terminal is None:
return [
[single_elem_transform(feat) for feat in multi_steps_features]
for multi_steps_features in features
]
else:
# for next features where we replace them when terminal
assert replace_when_terminal is not None
return [
[single_elem_transform(feat) for feat in multi_steps_features]
if not terminal[idx]
else [single_elem_transform(feat) for feat in multi_steps_features[:-1]]
+ [replace_when_terminal]
for idx, multi_steps_features in enumerate(features)
]
else:
if terminal is None:
return [single_elem_transform(feat) for feat in features]
else:
assert replace_when_terminal is not None
return [
single_elem_transform(feat)
if not terminal[idx]
else replace_when_terminal
for idx, feat in enumerate(features)
]
def validate_mdp_ids_seq_nums(df):
mdp_ids = list(df["mdp_id"])
sequence_numbers = list(df["sequence_number"])
unique_mdp_ids = set(mdp_ids)
prev_mdp_id, prev_seq_num = None, None
mdp_count = 0
for mdp_id, seq_num in zip(mdp_ids, sequence_numbers):
if prev_mdp_id is None or mdp_id != prev_mdp_id:
mdp_count += 1
prev_mdp_id = mdp_id
else:
assert seq_num == prev_seq_num + 1, (
f"For mdp_id {mdp_id}, got {seq_num} <= {prev_seq_num}."
f"Sequence number must be in increasing order.\n"
f"Zip(mdp_id, seq_num): "
f"{list(zip(mdp_ids, sequence_numbers))}"
)
prev_seq_num = seq_num
assert len(unique_mdp_ids) == mdp_count, "MDPs are broken up. {} vs {}".format(
len(unique_mdp_ids), mdp_count
)
return
| 15,498 | 36.167866 | 88 | py |
ReAgent | ReAgent-master/reagent/gym/types.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# Please DO NOT import gym in here. We might have installation without gym depending on
# this module for typing
from abc import ABC, abstractmethod
from dataclasses import asdict, dataclass, field, fields
from typing import Any, Callable, Dict, List, Optional, Union
import numpy as np
import reagent.core.types as rlt
import torch
import torch.nn.functional as F
@dataclass
class Transition(rlt.BaseDataClass):
mdp_id: int
sequence_number: int
observation: Any
action: Any
reward: float
terminal: bool
log_prob: Optional[float] = None
possible_actions_mask: Optional[np.ndarray] = None
info: Optional[Dict] = None
# Same as asdict but filters out none values.
def asdict(self):
return {k: v for k, v in asdict(self).items() if v is not None}
def get_optional_fields(cls) -> List[str]:
"""return list of optional annotated fields"""
ret: List[str] = []
for f in fields(cls):
# Check if exactly two arguments exists and one of them are None type
if hasattr(f.type, "__args__") and type(None) in f.type.__args__:
ret.append(f.name)
return ret
@dataclass
class Trajectory(rlt.BaseDataClass):
transitions: List[Transition] = field(default_factory=list)
def __post_init__(self):
self.optional_field_exist: Dict[str, bool] = {
f: False for f in get_optional_fields(Transition)
}
def __len__(self):
return len(self.transitions)
def add_transition(self, transition: Transition):
if len(self) == 0:
# remember which optional fields should be filled
for f in self.optional_field_exist:
val = getattr(transition, f, None)
if val is not None:
self.optional_field_exist[f] = True
# check that later additions also fill the same optional fields
for f, should_exist in self.optional_field_exist.items():
val = getattr(transition, f, None)
if (val is not None) != should_exist:
raise ValueError(
f"Field {f} given val {val} whereas should_exist is {should_exist}."
)
self.transitions.append(transition)
def __getattr__(self, attr: str):
ret = []
for transition in self.transitions:
ret.append(getattr(transition, attr))
return ret
def calculate_cumulative_reward(self, gamma: float = 1.0):
"""Return (discounted) sum of rewards."""
num_transitions = len(self)
assert num_transitions > 0, "called on empty trajectory"
rewards = self.reward
discounts = [gamma ** i for i in range(num_transitions)]
return sum(reward * discount for reward, discount in zip(rewards, discounts))
def to_dict(self):
d = {"action": F.one_hot(torch.from_numpy(np.stack(self.action)), 2)}
for f in [
"observation",
"reward",
"terminal",
"log_prob",
"possible_actions_mask",
]:
if self.optional_field_exist.get(f, True):
f_value = getattr(self, f)
if np.isscalar(f_value[0]):
# scalar values
d[f] = torch.tensor(f_value)
else:
# vector values, need to stack
d[f] = torch.from_numpy(np.stack(f_value)).float()
return d
class Sampler(ABC):
"""Given scores, select the action."""
@abstractmethod
def sample_action(self, scores: Any) -> rlt.ActorOutput:
raise NotImplementedError()
@abstractmethod
def log_prob(self, scores: Any, action: torch.Tensor) -> torch.Tensor:
raise NotImplementedError()
def update(self) -> None:
"""Call to update internal parameters (e.g. decay epsilon)"""
pass
# From preprocessed observation, produce scores for sampler to select action
DiscreteScorer = Callable[[Any, Optional[torch.Tensor]], Any]
ContinuousScorer = Callable[[Any], Any]
Scorer = Union[DiscreteScorer, ContinuousScorer]
# Transform ReplayBuffer's transition batch to trainer.train
TrainerPreprocessor = Callable[[Any], Any]
""" Called after env.step(action)
Args: (state, action, reward, terminal, log_prob)
"""
PostStep = Callable[[Transition], None]
""" Called after end of episode
"""
PostEpisode = Callable[[Trajectory], None]
@dataclass
class GaussianSamplerScore(rlt.BaseDataClass):
loc: torch.Tensor
scale_log: torch.Tensor
| 4,633 | 30.52381 | 88 | py |
ReAgent | ReAgent-master/reagent/gym/preprocessors/default_preprocessors.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
""" Get default preprocessors for training time. """
import logging
from typing import List, Optional, Tuple
import numpy as np
import reagent.core.types as rlt
import torch
import torch.nn.functional as F
from gym import Env, spaces
logger = logging.getLogger(__name__)
#######################################
### Default obs preprocessors.
### These should operate on single obs.
#######################################
class RecsimObsPreprocessor:
def __init__(
self,
*,
num_docs: int,
discrete_keys: List[Tuple[str, int]],
box_keys: List[Tuple[str, int]],
):
self.num_docs = num_docs
self.discrete_keys = discrete_keys
self.box_keys = box_keys
@classmethod
def create_from_env(cls, env: Env, **kwargs):
obs_space = env.observation_space
assert isinstance(obs_space, spaces.Dict)
user_obs_space = obs_space["user"]
if not isinstance(user_obs_space, spaces.Box):
raise NotImplementedError(
f"User observation space {type(user_obs_space)} is not supported"
)
doc_obs_space = obs_space["doc"]
if not isinstance(doc_obs_space, spaces.Dict):
raise NotImplementedError(
f"Doc space {type(doc_obs_space)} is not supported"
)
# Assume that all docs are in the same space
discrete_keys: List[Tuple[str, int]] = []
box_keys: List[Tuple[str, int]] = []
key_0 = next(iter(doc_obs_space.spaces))
doc_0_space = doc_obs_space[key_0]
if isinstance(doc_0_space, spaces.Dict):
for k, v in doc_obs_space[key_0].spaces.items():
if isinstance(v, spaces.Discrete):
if v.n > 0:
discrete_keys.append((k, v.n))
elif isinstance(v, spaces.Box):
shape_dim = len(v.shape)
if shape_dim == 0:
box_keys.append((k, 1))
elif shape_dim == 1:
box_keys.append((k, v.shape[0]))
else:
raise NotImplementedError
else:
raise NotImplementedError(
f"Doc feature {k} with the observation space of {type(v)}"
" is not supported"
)
elif isinstance(doc_0_space, spaces.Box):
pass
else:
raise NotImplementedError(f"Unknown space {doc_0_space}")
return cls(
num_docs=len(doc_obs_space.spaces),
discrete_keys=sorted(discrete_keys),
box_keys=sorted(box_keys),
**kwargs,
)
def __call__(self, obs):
user = torch.tensor(obs["user"]).float().unsqueeze(0)
doc_obs = obs["doc"]
if self.discrete_keys or self.box_keys:
# Dict space
discrete_features: List[torch.Tensor] = []
for k, n in self.discrete_keys:
vals = torch.tensor([v[k] for v in doc_obs.values()])
assert vals.shape == (self.num_docs,)
discrete_features.append(F.one_hot(vals, n).float())
box_features: List[torch.Tensor] = []
for k, d in self.box_keys:
vals = np.vstack([v[k] for v in doc_obs.values()])
assert vals.shape == (self.num_docs, d)
box_features.append(torch.tensor(vals).float())
doc_features = torch.cat(discrete_features + box_features, dim=1).unsqueeze(
0
)
else:
# Simply a Box space
vals = np.vstack(list(doc_obs.values()))
doc_features = torch.tensor(vals).float().unsqueeze(0)
# This comes from ValueWrapper
value = (
torch.tensor([v["value"] for v in obs["augmentation"].values()])
.float()
.unsqueeze(0)
)
candidate_docs = rlt.DocList(float_features=doc_features, value=value)
return rlt.FeatureData(float_features=user, candidate_docs=candidate_docs)
| 4,247 | 32.714286 | 88 | py |
ReAgent | ReAgent-master/reagent/gym/preprocessors/trainer_preprocessor.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
""" Get default preprocessors for training time. """
import inspect
import logging
from typing import Dict, Optional
import gym
import numpy as np
import reagent.core.types as rlt
import torch
import torch.nn.functional as F
from reagent.core.parameters import CONTINUOUS_TRAINING_ACTION_RANGE
from reagent.gym.types import Trajectory
from reagent.preprocessing.types import InputColumn
from reagent.training.reagent_lightning_module import ReAgentLightningModule
from reagent.training.utils import rescale_actions
logger = logging.getLogger(__name__)
# This is here to make typechecker happpy, sigh
ONLINE_MAKER_MAP = {}
REPLAY_BUFFER_MAKER_MAP = {}
def make_trainer_preprocessor(
trainer: ReAgentLightningModule,
device: torch.device,
env: gym.Env,
maker_map: Dict,
):
assert isinstance(trainer, ReAgentLightningModule), f"{type(trainer)}"
sig = inspect.signature(trainer.train_step_gen)
logger.info(f"Deriving trainer_preprocessor from {sig.parameters}")
# Assuming training_batch is in the first position (excluding self)
assert (
list(sig.parameters.keys())[0] == "training_batch"
), f"{sig.parameters} doesn't have training batch in first position."
training_batch_type = sig.parameters["training_batch"].annotation
assert training_batch_type != inspect.Parameter.empty
try:
maker = maker_map[training_batch_type].create_for_env(env)
except KeyError:
logger.error(f"Unknown type: {training_batch_type}")
raise
def trainer_preprocessor(batch):
retval = maker(batch)
return retval.to(device)
return trainer_preprocessor
def make_trainer_preprocessor_online(
trainer: ReAgentLightningModule, device: torch.device, env: gym.Env
):
return make_trainer_preprocessor(trainer, device, env, ONLINE_MAKER_MAP)
def make_replay_buffer_trainer_preprocessor(
trainer: ReAgentLightningModule, device: torch.device, env: gym.Env
):
return make_trainer_preprocessor(trainer, device, env, REPLAY_BUFFER_MAKER_MAP)
def one_hot_actions(
num_actions: int,
action: torch.Tensor,
next_action: torch.Tensor,
terminal: torch.Tensor,
):
"""
One-hot encode actions and non-terminal next actions.
Input shape is (batch_size, 1). Output shape is (batch_size, num_actions)
"""
assert (
len(action.shape) == 2
and action.shape[1] == 1
and next_action.shape == action.shape
), (
f"Must be action with stack_size = 1, but "
f"got shapes {action.shape}, {next_action.shape}"
)
action = F.one_hot(action, num_actions).squeeze(1).float()
# next action is garbage for terminal transitions (so just zero them)
next_action_res = torch.zeros_like(action)
non_terminal_indices = (terminal == 0).squeeze(1)
next_action_res[non_terminal_indices] = (
F.one_hot(next_action[non_terminal_indices], num_actions).squeeze(1).float()
)
return action, next_action_res
class DiscreteDqnInputMaker:
def __init__(self, num_actions: int, trainer_preprocessor=None):
self.num_actions = num_actions
self.trainer_preprocessor = trainer_preprocessor
@classmethod
def create_for_env(cls, env: gym.Env):
action_space = env.action_space
assert isinstance(action_space, gym.spaces.Discrete)
try:
return cls(
num_actions=action_space.n,
# pyre-fixme[16]: `Env` has no attribute `trainer_preprocessor`.
trainer_preprocessor=env.trainer_preprocessor,
)
except AttributeError:
return cls(num_actions=action_space.n)
def __call__(self, batch):
not_terminal = 1.0 - batch.terminal.float()
action, next_action = one_hot_actions(
self.num_actions, batch.action, batch.next_action, batch.terminal
)
if self.trainer_preprocessor is not None:
state = self.trainer_preprocessor(batch.state)
next_state = self.trainer_preprocessor(batch.next_state)
else:
state = rlt.FeatureData(float_features=batch.state)
next_state = rlt.FeatureData(float_features=batch.next_state)
try:
possible_actions_mask = batch.possible_actions_mask.float()
except AttributeError:
possible_actions_mask = torch.ones_like(action).float()
try:
possible_next_actions_mask = batch.next_possible_actions_mask.float()
except AttributeError:
possible_next_actions_mask = torch.ones_like(next_action).float()
return rlt.DiscreteDqnInput(
state=state,
action=action,
next_state=next_state,
next_action=next_action,
possible_actions_mask=possible_actions_mask,
possible_next_actions_mask=possible_next_actions_mask,
reward=batch.reward,
not_terminal=not_terminal,
step=None,
time_diff=None,
extras=rlt.ExtraData(
mdp_id=None,
sequence_number=None,
action_probability=batch.log_prob.exp(),
max_num_actions=None,
metrics=None,
),
)
class PolicyNetworkInputMaker:
def __init__(self, action_low: np.ndarray, action_high: np.ndarray):
self.action_low = torch.tensor(action_low)
self.action_high = torch.tensor(action_high)
(train_low, train_high) = CONTINUOUS_TRAINING_ACTION_RANGE
self.train_low = torch.tensor(train_low)
self.train_high = torch.tensor(train_high)
@classmethod
def create_for_env(cls, env: gym.Env):
action_space = env.action_space
assert isinstance(action_space, gym.spaces.Box)
return cls(action_space.low, action_space.high)
def __call__(self, batch):
not_terminal = 1.0 - batch.terminal.float()
# normalize actions
action = rescale_actions(
batch.action,
new_min=self.train_low,
new_max=self.train_high,
prev_min=self.action_low,
prev_max=self.action_high,
)
# only normalize non-terminal
non_terminal_indices = (batch.terminal == 0).squeeze(1)
next_action = torch.zeros_like(action)
next_action[non_terminal_indices] = rescale_actions(
batch.next_action[non_terminal_indices],
new_min=self.train_low,
new_max=self.train_high,
prev_min=self.action_low,
prev_max=self.action_high,
)
dict_batch = {
InputColumn.STATE_FEATURES: batch.state,
InputColumn.NEXT_STATE_FEATURES: batch.next_state,
InputColumn.ACTION: action,
InputColumn.NEXT_ACTION: next_action,
InputColumn.REWARD: batch.reward,
InputColumn.NOT_TERMINAL: not_terminal,
InputColumn.STEP: None,
InputColumn.TIME_DIFF: None,
InputColumn.EXTRAS: rlt.ExtraData(
mdp_id=None,
sequence_number=None,
action_probability=batch.log_prob.exp(),
max_num_actions=None,
metrics=None,
),
}
has_candidate_features = False
try:
dict_batch.update(
{
InputColumn.CANDIDATE_FEATURES: batch.doc,
InputColumn.NEXT_CANDIDATE_FEATURES: batch.next_doc,
}
)
has_candidate_features = True
except AttributeError:
pass
output = rlt.PolicyNetworkInput.from_dict(dict_batch)
if has_candidate_features:
output.state = rlt._embed_states(output.state)
output.next_state = rlt._embed_states(output.next_state)
return output
class SlateQInputMaker:
def __init__(self):
self.metric = "watch_time"
@classmethod
def create_for_env(cls, env: gym.Env):
return cls()
def __call__(self, batch):
n = batch.state.shape[0]
item_mask = torch.ones(batch.doc.shape[:2])
next_item_mask = torch.ones(batch.doc.shape[:2])
# TODO: abs value to make probability?
item_probability = batch.augmentation_value # .unsqueeze(2)
next_item_probability = batch.next_augmentation_value # .unsqueeze(2)
# concat null action
null_action = torch.tensor([batch.action.shape[1]] * n, dtype=torch.int64).view(
n, 1
)
action = torch.cat([batch.action, null_action], dim=1)
next_action = torch.cat([batch.next_action, null_action], dim=1)
# concat null reward to position wise reward
position_reward = getattr(batch, f"response_{self.metric}")
null_reward = torch.zeros((n, 1))
position_reward = torch.cat([position_reward, null_reward], dim=1)
# concat null mask when nothing clicked
reward_mask = batch.response_click
null_mask = (reward_mask.sum(dim=1) == 0).view(n, 1)
reward_mask = torch.cat([reward_mask.to(torch.bool), null_mask], dim=1)
dict_batch = {
"state_features": batch.state,
"next_state_features": batch.next_state,
"candidate_features": batch.doc,
"next_candidate_features": batch.next_doc,
"item_mask": item_mask,
"next_item_mask": next_item_mask,
"item_probability": item_probability,
"next_item_probability": next_item_probability,
"action": action,
"next_action": next_action,
"position_reward": position_reward,
"reward_mask": reward_mask,
"time_diff": None,
"not_terminal": ~batch.terminal,
}
return rlt.SlateQInput.from_dict(dict_batch)
class MemoryNetworkInputMaker:
def __init__(self, num_actions: Optional[int] = None):
self.num_actions = num_actions
@classmethod
def create_for_env(cls, env: gym.Env):
action_space = env.action_space
if isinstance(action_space, gym.spaces.Discrete):
return cls(action_space.n)
elif isinstance(action_space, gym.spaces.Box):
return cls()
else:
raise NotImplementedError()
def __call__(self, batch):
action = batch.action
if self.num_actions is not None:
assert len(action.shape) == 2, f"{action.shape}"
# one hot makes shape (batch_size, stack_size, feature_dim)
action = F.one_hot(batch.action, self.num_actions).float()
# make shape to (batch_size, feature_dim, stack_size)
action = action.transpose(1, 2)
# For (1-dimensional) vector fields, RB returns (batch_size, state_dim)
# or (batch_size, state_dim, stack_size).
# We want these to all be (stack_size, batch_size, state_dim), so
# unsqueeze the former case; Note this only happens for stack_size = 1.
# Then, permute.
permutation = [2, 0, 1]
vector_fields = {
"state": batch.state,
"action": action,
"next_state": batch.next_state,
}
for name, tensor in vector_fields.items():
if len(tensor.shape) == 2:
tensor = tensor.unsqueeze(2)
assert len(tensor.shape) == 3, f"{name} has shape {tensor.shape}"
vector_fields[name] = tensor.permute(permutation)
# For scalar fields, RB returns (batch_size), or (batch_size, stack_size)
# Do same as above, except transpose instead.
scalar_fields = {
"reward": batch.reward,
"not_terminal": 1.0 - batch.terminal.float(),
}
for name, tensor in scalar_fields.items():
if len(tensor.shape) == 1:
tensor = tensor.unsqueeze(1)
assert len(tensor.shape) == 2, f"{name} has shape {tensor.shape}"
scalar_fields[name] = tensor.transpose(0, 1)
# stack_size > 1, so let's pad not_terminal with 1's, since
# previous states couldn't have been terminal..
if scalar_fields["reward"].shape[0] > 1:
batch_size = scalar_fields["reward"].shape[1]
assert scalar_fields["not_terminal"].shape == (
1,
batch_size,
), f"{scalar_fields['not_terminal'].shape}"
stacked_not_terminal = torch.ones_like(scalar_fields["reward"])
stacked_not_terminal[-1] = scalar_fields["not_terminal"]
scalar_fields["not_terminal"] = stacked_not_terminal
dict_batch = {
"state": vector_fields["state"],
"next_state": vector_fields["next_state"],
"action": vector_fields["action"],
"reward": scalar_fields["reward"],
"not_terminal": scalar_fields["not_terminal"],
"step": None,
"time_diff": None,
}
return rlt.MemoryNetworkInput.from_dict(dict_batch)
def get_possible_actions_for_gym(batch_size: int, num_actions: int) -> rlt.FeatureData:
"""
tiled_actions should be (batch_size * num_actions, num_actions)
forall i in [batch_size],
tiled_actions[i*num_actions:(i+1)*num_actions] should be I[num_actions]
where I[n] is the n-dimensional identity matrix.
NOTE: this is only the case for when we convert discrete action to
parametric action via one-hot encoding.
"""
possible_actions = torch.eye(num_actions).repeat(repeats=(batch_size, 1))
return rlt.FeatureData(float_features=possible_actions)
class ParametricDqnInputMaker:
def __init__(self, num_actions: int):
self.num_actions = num_actions
@classmethod
def create_for_env(cls, env: gym.Env):
action_space = env.action_space
assert isinstance(action_space, gym.spaces.Discrete)
return cls(action_space.n)
def __call__(self, batch):
not_terminal = 1.0 - batch.terminal.float()
assert (
len(batch.state.shape) == 2
), f"{batch.state.shape} is not (batch_size, state_dim)."
batch_size, _ = batch.state.shape
action, next_action = one_hot_actions(
self.num_actions, batch.action, batch.next_action, batch.terminal
)
possible_actions = get_possible_actions_for_gym(batch_size, self.num_actions)
possible_next_actions = possible_actions.clone()
possible_actions_mask = torch.ones((batch_size, self.num_actions))
possible_next_actions_mask = possible_actions_mask.clone()
return rlt.ParametricDqnInput(
state=rlt.FeatureData(float_features=batch.state),
action=rlt.FeatureData(float_features=action),
next_state=rlt.FeatureData(float_features=batch.next_state),
next_action=rlt.FeatureData(float_features=next_action),
possible_actions=possible_actions,
possible_actions_mask=possible_actions_mask,
possible_next_actions=possible_next_actions,
possible_next_actions_mask=possible_next_actions_mask,
reward=batch.reward,
not_terminal=not_terminal,
step=None,
time_diff=None,
extras=rlt.ExtraData(
mdp_id=None,
sequence_number=None,
action_probability=batch.log_prob.exp(),
max_num_actions=None,
metrics=None,
),
)
class PolicyGradientInputMaker:
def __init__(self, num_actions: Optional[int] = None, recsim_obs: bool = False):
self.num_actions = num_actions
self.recsim_obs = recsim_obs
@classmethod
def create_for_env(cls, env: gym.Env):
action_space = env.action_space
if isinstance(action_space, gym.spaces.Discrete):
return cls(action_space.n)
elif isinstance(action_space, gym.spaces.Box):
return cls()
elif isinstance(action_space, gym.spaces.MultiDiscrete):
return cls(recsim_obs=True)
else:
raise NotImplementedError()
def _get_recsim_state(self, observation):
def _stack(slates):
obs = rlt.FeatureData(
float_features=torch.from_numpy(
np.stack(np.array([slate["user"] for slate in slates]))
),
candidate_docs=rlt.DocList(
float_features=torch.from_numpy(
np.stack(np.array([slate["doc"] for slate in slates]))
)
),
)
return obs
def _stack_slate(slate):
return {
"user": slate["user"],
"doc": np.stack(np.array(list(slate["doc"].values()))),
}
return _stack([_stack_slate(slate) for slate in observation])
def __call__(self, trajectory: Trajectory):
action = torch.from_numpy(np.stack(trajectory.action).squeeze())
if self.num_actions is not None:
action = F.one_hot(action, self.num_actions).float()
assert len(action.shape) == 2, f"{action.shape}"
# one hot makes shape (batch_size, num_actions)
state = (
self._get_recsim_state(trajectory.observation)
if self.recsim_obs
else rlt.FeatureData(
torch.from_numpy(np.stack(trajectory.observation)).float()
)
)
return rlt.PolicyGradientInput(
state=state,
action=action,
reward=torch.tensor(trajectory.reward),
log_prob=torch.tensor(trajectory.log_prob),
)
ONLINE_MAKER_MAP = {rlt.PolicyGradientInput: PolicyGradientInputMaker}
REPLAY_BUFFER_MAKER_MAP = {
rlt.DiscreteDqnInput: DiscreteDqnInputMaker,
rlt.PolicyNetworkInput: PolicyNetworkInputMaker,
rlt.MemoryNetworkInput: MemoryNetworkInputMaker,
rlt.ParametricDqnInput: ParametricDqnInputMaker,
rlt.SlateQInput: SlateQInputMaker,
}
| 18,178 | 36.637681 | 88 | py |
ReAgent | ReAgent-master/reagent/gym/envs/oracle_pvm.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from collections import OrderedDict
from typing import Callable, Dict, List
import gym
import numpy as np
import reagent.core.types as rlt
import torch
from reagent.core.dataclasses import dataclass
from reagent.gym.envs import RecSim
from reagent.gym.preprocessors.default_preprocessors import RecsimObsPreprocessor
from scipy import stats
logger = logging.getLogger(__name__)
# score function takes user and doc features, and outputs a score
SCORE_FUNCTION_T = Callable[[np.ndarray, np.ndarray], float]
def make_default_score_fn(fn_i: int) -> SCORE_FUNCTION_T:
"""
Make ith score_fn (constructor of ith score)
"""
def fn(user: np.ndarray, doc: np.ndarray) -> float:
return doc[fn_i]
# user = user ** (fn_i + 1)
# doc = doc ** (fn_i + 1)
# return np.inner(user, doc)
# return user[fn_i] * doc[fn_i]
return fn
VM_WEIGHT_LOW = -1.0
VM_WEIGHT_HIGH = 1.0
MATCH_REWARD_BOOST = 3.0
def get_default_score_fns(num_weights):
return [make_default_score_fn(i) for i in range(num_weights)]
def get_ground_truth_weights(num_weights):
return np.array([1] * num_weights)
@dataclass
class OraclePVM(RecSim):
"""
Wrapper over RecSim for simulating (Personalized) VM Tuning.
The state is the same as for RecSim (user feature + candidate features).
There are num_weights VM weights to tune, and so action space is a vector
of length num_weights.
OraclePVM hides num_weights number of
(1) score_fns (akin to VM models), that take in
user + candidate_i feature and produces a score for candidate_i.
(2) ground_truth_weights, that are used to produce "ground truth", a.k.a.
"Oracle", rankings.
Reward is the Kendall-Tau between ground truth and the ranking created from the
weights given by action. If the rankings match exactly, the reward is boosted to 3.
NOTE: This environment only tests if the Agent can learn the hidden ground
truth weights, which may be far from optimal (in terms of RecSim's rewards,
which we're ignoring). This is easier for unit tests, but in the real world
we will be trying to learn the optimal weights, and the reward signal would
reflect that.
TODO: made environment easier to learn from by not using RecSim.
"""
user_feat_dim: int = 1
candidate_feat_dim: int = 3
num_weights: int = 3
def __post_init_post_parse__(self):
assert (
self.slate_size == self.num_candidates
), f"Must be equal (slate_size) {self.slate_size} != (num_candidates) {self.num_candidates}"
super().__post_init_post_parse__()
self.score_fns: List[SCORE_FUNCTION_T] = get_default_score_fns(self.num_weights)
self.ground_truth_weights: List[float] = get_ground_truth_weights(
self.num_weights
)
assert len(self.score_fns) == len(
self.ground_truth_weights
), f"{len(self.score_fns)} != {len(self.ground_truth_weights)}"
assert (
len(self.ground_truth_weights) == self.num_weights
), f"{self.ground_truth_weights.shape} != {self.num_weights}"
def reset(self):
self.prev_obs = super().reset()
self.prev_obs.update(
{
"user": np.random.rand(self.user_feat_dim),
"doc": OrderedDict(
[
(str(i), np.random.rand(self.candidate_feat_dim))
for i in range(self.num_candidates)
]
),
}
)
return self.prev_obs
def step(self, action):
user_feat = self.prev_obs["user"]
doc_feats = self.prev_obs["doc"]
scores = self._get_scores(user_feat, doc_feats)
ground_truth_ranking = self._get_ranking(scores, self.ground_truth_weights)
policy_ranking = self._get_ranking(scores, action)
t = True
# comment out to avoid non-stationary
# self.prev_obs, _, t, i = super().step(policy_ranking)
num_matches = (ground_truth_ranking == policy_ranking).sum()
if num_matches == self.slate_size:
reward = MATCH_REWARD_BOOST
else:
reward, _p_value = stats.kendalltau(ground_truth_ranking, policy_ranking)
return self.prev_obs, reward, t, None
def is_match(self, reward):
# for evaluation, return true iff the reward represents a match
return reward > (MATCH_REWARD_BOOST - 1e-6)
@property
def action_space(self):
return gym.spaces.Box(
low=VM_WEIGHT_LOW, high=VM_WEIGHT_HIGH, shape=(self.num_weights,)
)
@action_space.setter
def action_space(self, val):
pass
def _get_scores(
self, user_feat: np.ndarray, doc_feats: Dict[str, np.ndarray]
) -> np.ndarray:
# num_docs x num_scores where i,j coordinate is jth score for ith doc
scores = np.array(
[
[score_fn(user_feat, doc_feat) for score_fn in self.score_fns]
for _k, doc_feat in doc_feats.items()
]
)
return scores
def _get_ranking(self, scores: np.ndarray, weights: np.ndarray):
assert weights.shape == (scores.shape[1],), f"{weights.shape}, {scores.shape}"
weighted_scores = scores * weights
values = weighted_scores.sum(axis=1)
indices = np.argsort(-values)
return indices[: self.slate_size]
def obs_preprocessor(self, obs: np.ndarray) -> rlt.FeatureData:
preprocessor = RecsimObsPreprocessor.create_from_env(self)
preprocessed_obs = preprocessor(obs)
return rlt._embed_states(preprocessed_obs)
def serving_obs_preprocessor(self, obs: np.ndarray):
preprocessor = RecsimObsPreprocessor.create_from_env(self)
x = preprocessor(obs)
# user was batch_size x state_size, stack
user = x.float_features.unsqueeze(1).repeat_interleave(
self.num_candidates, dim=1
)
candidates = x.candidate_docs.float_features
combined = torch.cat([user, candidates], dim=2).squeeze(0)
return (combined, torch.ones_like(combined, dtype=torch.uint8))
| 6,305 | 35.034286 | 100 | py |
ReAgent | ReAgent-master/reagent/gym/envs/changing_arms.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""
Traditional MAB setup has sequence length = 1 always. In this setup, the
distributions of the arms rewards changes every round, and the agent is presented
with some information and control about how the arms will change.
In particular, the observation includes "mu_changes", which is the possible changes
to mu; only the arm picked by agent will have it's mu_changes reflected.
This way, the next state depend on (only) the previous state and action;
hence this a MDP.
The reward for picking an action is the change in mu corresponding to that arm.
With following set-up (where ARM_INIT_VALUE = 100 and NUM_ARMS = 5), the
optimal policy can accumulate a reward of 500 per run.
Note that if the policy picks an illegal action at any time, the game ends.
"""
import random
import gym
import numpy as np
import reagent.core.types as rlt
import torch
from reagent.core.dataclasses import dataclass
from reagent.core.parameters import NormalizationData, NormalizationKey
from reagent.gym.envs.env_wrapper import EnvWrapper
from reagent.gym.normalizers import only_continuous_normalizer
ABS_LOW = -1000.0
ABS_HIGH = 1000.0
MU_LOW = 0.0
MU_HIGH = 1000.0
# illegal move causes game to end with a big BOOM!!!
INVALID_MOVE_PENALTY = -1000.0
IDLE_PENALTY = -500.0
NUM_ARMS = 5
# keep these constant for now
ARM_INIT_VALUE = 100.0
ARM_MU_DECREASE = 10.0
MAX_STEPS = 49
# in the real world, IDs are not indices into embedding table
# thus, we offset vals to test hashing mechanism
ID_LIST_OFFSET = 1000000
ID_SCORE_LIST_OFFSET = 1500000
ID_LIST_FEATURE_ID = 100
ID_SCORE_LIST_FEATURE_ID = 1000
def get_initial_mus(num_arms):
return torch.tensor([ARM_INIT_VALUE] * num_arms)
def get_mu_changes(num_arms):
return torch.tensor([-ARM_MU_DECREASE] * num_arms)
def get_legal_indices_mask(num_arms):
# FIXME: hardcoded for now
assert num_arms == 5, f"unsupported num_arms = {num_arms}, should be 5"
LEGAL_PROBS = torch.tensor([0.95, 1.0, 0.95, 0.8, 0.8])
return torch.bernoulli(LEGAL_PROBS).to(torch.uint8)
@dataclass
class ChangingArms(EnvWrapper):
num_arms: int = NUM_ARMS
def make(self) -> gym.Env:
return ChangingArmsEnv(self.num_arms)
def _split_state(self, obs: np.ndarray):
assert obs.shape == (3, self.num_arms), f"{obs.shape}."
dense_val = torch.tensor(obs[0, :]).view(1, self.num_arms)
id_list_val = torch.tensor(obs[1, :]).nonzero(as_tuple=True)[0].to(torch.long)
id_score_list_val = torch.tensor(obs[2, :])
return dense_val, id_list_val, id_score_list_val
def obs_preprocessor(self, obs: np.ndarray) -> rlt.FeatureData:
dense_val, id_list_val, id_score_list_val = self._split_state(obs)
return rlt.FeatureData(
# dense value
float_features=dense_val,
# (offset, value)
id_list_features={
"legal": (torch.tensor([0], dtype=torch.long), id_list_val)
},
# (offset, key, value)
id_score_list_features={
"mu_changes": (
torch.tensor([0], dtype=torch.long),
torch.arange(self.num_arms, dtype=torch.long),
id_score_list_val,
)
},
)
def serving_obs_preprocessor(self, obs: np.ndarray) -> rlt.ServingFeatureData:
dense_val, id_list_val, id_score_list_val = self._split_state(obs)
return rlt.ServingFeatureData(
float_features_with_presence=(
dense_val,
torch.ones_like(dense_val, dtype=torch.uint8),
),
id_list_features={
ID_LIST_FEATURE_ID: (
torch.tensor([0], dtype=torch.long),
id_list_val + ID_LIST_OFFSET,
)
},
id_score_list_features={
ID_SCORE_LIST_FEATURE_ID: (
torch.tensor([0], dtype=torch.long),
torch.arange(self.num_arms, dtype=torch.long)
+ ID_SCORE_LIST_OFFSET,
id_score_list_val,
)
},
)
def split_state_transform(self, elem: torch.Tensor):
"""For generate data"""
dense_val, id_list_val, id_score_list_val = self._split_state(elem.numpy())
return (
{i: s.item() for i, s in enumerate(dense_val.view(-1))},
{ID_LIST_FEATURE_ID: (id_list_val + ID_LIST_OFFSET).tolist()},
{
ID_SCORE_LIST_FEATURE_ID: {
i + ID_SCORE_LIST_OFFSET: s.item()
for i, s in enumerate(id_score_list_val)
}
},
)
@property
def normalization_data(self):
return {
NormalizationKey.STATE: NormalizationData(
dense_normalization_parameters=only_continuous_normalizer(
list(range(self.num_arms)), MU_LOW, MU_HIGH
)
)
}
def trainer_preprocessor(self, obs: torch.Tensor):
batch_size = obs.shape[0]
assert obs.shape == (batch_size, 3, self.num_arms), f"{obs.shape}"
dense_val = obs[:, 0, :].view(batch_size, self.num_arms)
# extract one-hot encoded values from id_list
batch_indices, id_list_val = obs[:, 1, :].nonzero(as_tuple=True)
offsets = []
prev_batch_idx = -1
for i, batch_idx in enumerate(batch_indices.tolist()):
if batch_idx > prev_batch_idx:
offsets.extend([i] * (batch_idx - prev_batch_idx))
prev_batch_idx = batch_idx
else:
assert batch_idx == prev_batch_idx
# handle the case of trailing empty batches
# pyre-fixme[61]: `batch_idx` may not be initialized here.
if batch_idx < batch_size - 1:
# pyre-fixme[61]: `batch_idx` may not be initialized here.
offsets.extend([i] * (batch_size - 1 - batch_idx))
assert len(offsets) == batch_size, f"{len(offsets)} != {batch_size}."
id_list_offsets = torch.tensor(offsets)
# id_score_list is easier because not one-hot encoded
id_score_list_offsets = torch.tensor(
list(range(0, batch_size * self.num_arms, self.num_arms))
)
id_score_list_keys = torch.arange(self.num_arms).repeat(batch_size)
id_score_list_vals = obs[:, 2, :].reshape(-1)
return rlt.FeatureData(
# dense value
float_features=dense_val,
# (offset, value)
id_list_features={"legal": (id_list_offsets, id_list_val)},
# (offset, key, value)
id_score_list_features={
"mu_changes": (
id_score_list_offsets,
id_score_list_keys,
id_score_list_vals,
)
},
)
class ChangingArmsEnv(gym.Env):
"""This is just the gym environment, without extra functionality"""
def __init__(self, num_arms):
self.seed(0)
self.num_arms = num_arms
self.max_steps = MAX_STEPS
def step(self, action):
if isinstance(action, np.ndarray):
action = action.item()
assert (
0 <= action and action <= self.num_arms
), f"out-of-bounds action {action}."
reached_max_steps = self.num_steps >= self.max_steps
self.num_steps += 1
# idle action
if action == self.num_arms:
# simply return new state, without updating distributions
# this is ideal when there aren't any legal actions, this
# would generate a new batch of legal actions
return self.state, IDLE_PENALTY, reached_max_steps, None
# illegal action
if action not in self.legal_indices:
return self.state, INVALID_MOVE_PENALTY, True, None
# update states for only the action selected
prev = self.mus[action].item()
self.mus[action] = prev + self.mu_changes[action]
if self.mus[action] <= MU_LOW:
self.legal_indices_mask[action] = 0
reward = prev - self.mus[action].item()
return self.state, reward, reached_max_steps, None
def seed(self, seed: int):
random.seed(seed)
torch.manual_seed(seed)
def reset(self):
# initialize the distributions
self.num_steps = 0
self.mus = get_initial_mus(self.num_arms)
# these are turned off when an arm has been "exhausted"
self.legal_indices_mask = torch.tensor([1] * self.num_arms).to(torch.uint8)
return self.state
@property
def state(self):
"""
State comprises of:
- initial mus
- legal_indices mask
- randomly-generated mu changes
"""
self.mu_changes = get_mu_changes(self.num_arms)
legal_indices_mask = (
get_legal_indices_mask(self.num_arms) & self.legal_indices_mask
)
self.legal_indices = legal_indices_mask.nonzero(as_tuple=True)[0]
result = torch.stack([self.mus, legal_indices_mask, self.mu_changes])
return result.numpy()
@property
def observation_space(self):
"""
It should really be a Dict, but we return them all stacked since it's
more convenient for RB.
"""
return gym.spaces.Box(ABS_LOW, ABS_HIGH, shape=(3, self.num_arms))
@property
def action_space(self):
# Selecting 0,1,2...,num_arms-1 is selecting an arm.
# If action is invalid, agent incurs a penalty.
# If action is valid, action is an idx i, and reward
# is a sample from ith distribution. At the same time
# the ith distribution is updated with the changes.
# Alternatively, can choose NULL (i.e. do-nothing) action
# if action = num_arms
return gym.spaces.Discrete(self.num_arms + 1)
| 10,024 | 34.9319 | 86 | py |
ReAgent | ReAgent-master/reagent/gym/envs/gym.py | #!/usr/bin/env python3
import logging
from typing import Optional, Tuple
import gym
import numpy as np
import reagent.core.types as rlt
import torch
from gym import spaces
from gym_minigrid.wrappers import ReseedWrapper
from reagent.core.dataclasses import dataclass
from reagent.gym.envs.env_wrapper import EnvWrapper
from reagent.gym.envs.wrappers.simple_minigrid import SimpleObsWrapper
logger = logging.getLogger(__name__)
@dataclass
class Gym(EnvWrapper):
env_name: str
set_max_steps: Optional[int] = None
def make(self) -> gym.Env:
kwargs = {}
if self.set_max_steps is not None:
kwargs["max_steps"] = self.set_max_steps
env: gym.Env = gym.make(self.env_name, **kwargs)
if self.env_name.startswith("MiniGrid-"):
# Wrap in minigrid simplifier
env = SimpleObsWrapper(ReseedWrapper(env))
return env
def obs_preprocessor(self, obs: np.ndarray) -> rlt.FeatureData:
obs_space = self.observation_space
if isinstance(obs_space, spaces.Box):
return rlt.FeatureData(torch.tensor(obs).float().unsqueeze(0))
else:
raise NotImplementedError(f"{obs_space} obs space not supported for Gym.")
# TODO: make return serving feature data
# pyre-fixme[15]: `serving_obs_preprocessor` overrides method defined in
# `EnvWrapper` inconsistently.
def serving_obs_preprocessor(
self, obs: np.ndarray
) -> Tuple[torch.Tensor, torch.Tensor]:
obs_space = self.observation_space
if not isinstance(obs_space, spaces.Box):
raise NotImplementedError(f"{obs_space} not supported!")
if len(obs_space.shape) != 1:
raise NotImplementedError(f"Box shape {obs_space.shape} not supported!")
state_dim = obs_space.shape[0]
obs_tensor = torch.tensor(obs).float().view(1, state_dim)
presence_tensor = torch.ones_like(obs_tensor)
return (obs_tensor, presence_tensor)
| 1,987 | 33.275862 | 86 | py |
ReAgent | ReAgent-master/reagent/gym/envs/env_wrapper.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import abc
import logging
from typing import Callable, Optional
import gym
import numpy as np
import reagent.core.types as rlt
import torch
from gym import spaces
from reagent.core.dataclasses import dataclass
from reagent.core.parameters import CONTINUOUS_TRAINING_ACTION_RANGE
from reagent.core.registry_meta import RegistryMeta
from reagent.training.utils import rescale_actions
# types for reference
ObsPreprocessor = Callable[[np.ndarray], rlt.FeatureData]
ServingObsPreprocessor = Callable[[np.ndarray], rlt.ServingFeatureData]
ActionExtractor = Callable[[rlt.ActorOutput], np.ndarray]
ServingActionExtractor = ActionExtractor
CONTINUOUS_MODEL_LOW = torch.tensor(CONTINUOUS_TRAINING_ACTION_RANGE[0])
CONTINUOUS_MODEL_HIGH = torch.tensor(CONTINUOUS_TRAINING_ACTION_RANGE[1])
logger = logging.getLogger(__name__)
@dataclass
class EnvWrapper(gym.core.Wrapper, metaclass=RegistryMeta):
"""Wrapper around it's environment, to simplify configuration."""
def __post_init_post_parse__(self):
super().__init__(self.make())
logger.info(
f"Env: {self.env};\n"
f"observation_space: {self.env.observation_space};\n"
f"action_space: {self.env.action_space};"
)
@abc.abstractmethod
def make(self) -> gym.Env:
pass
@abc.abstractmethod
def obs_preprocessor(self, obs: np.ndarray) -> rlt.FeatureData:
pass
@abc.abstractmethod
def serving_obs_preprocessor(self, obs: np.ndarray) -> rlt.ServingFeatureData:
pass
def get_obs_preprocessor(self, *ctor_args, **ctor_kwargs):
# ctor_args go to .to call
ctor_kwargs["non_blocking"] = True
return lambda *args, **kwargs: self.obs_preprocessor(*args, **kwargs).to(
*ctor_args, **ctor_kwargs
)
def get_serving_obs_preprocessor(self):
return lambda *args, **kwargs: self.serving_obs_preprocessor(*args, **kwargs)
def action_extractor(self, actor_output: rlt.ActorOutput) -> torch.Tensor:
action = actor_output.action
action_space = self.action_space
# Canonical rule to return one-hot encoded actions for discrete
assert (
len(action.shape) == 2 and action.shape[0] == 1
), f"{action} (shape: {action.shape}) is not a single action!"
if isinstance(action_space, spaces.Discrete):
# pyre-fixme[16]: `Tensor` has no attribute `argmax`.
return action.squeeze(0).argmax()
elif isinstance(action_space, spaces.MultiDiscrete):
return action.squeeze(0)
# Canonical rule to scale actions to CONTINUOUS_TRAINING_ACTION_RANGE
elif isinstance(action_space, spaces.Box):
assert len(action_space.shape) == 1, f"{action_space} not supported."
return rescale_actions(
action.squeeze(0),
new_min=torch.tensor(action_space.low),
new_max=torch.tensor(action_space.high),
prev_min=CONTINUOUS_MODEL_LOW,
prev_max=CONTINUOUS_MODEL_HIGH,
)
else:
raise NotImplementedError(f"Unsupported action space: {action_space}")
def serving_action_extractor(self, actor_output: rlt.ActorOutput) -> torch.Tensor:
action = actor_output.action
action_space = self.action_space
assert (
len(action.shape) == 2 and action.shape[0] == 1
), f"{action.shape} isn't (1, action_dim)"
if isinstance(action_space, spaces.Discrete):
# pyre-fixme[16]: `Tensor` has no attribute `argmax`.
return action.squeeze(0).argmax().view([])
elif isinstance(action_space, spaces.MultiDiscrete):
return action.squeeze(0)
elif isinstance(action_space, spaces.Box):
assert (
len(action_space.shape) == 1
), f"Unsupported Box with shape {action_space.shape}"
return action.squeeze(0)
else:
raise NotImplementedError(f"Unsupported action space: {action_space}")
def get_action_extractor(self):
return (
lambda *args, **kwargs: self.action_extractor(*args, **kwargs).cpu().numpy()
)
def get_serving_action_extractor(self):
return (
lambda *args, **kwargs: self.serving_action_extractor(*args, **kwargs)
.cpu()
.numpy()
)
# TODO: add more methods to simplify gym code
# e.g. normalization, specific preprocessor, etc.
# This can move a lot of the if statements from create_from_env methods.
@property
def max_steps(self) -> Optional[int]:
possible_keys = [
# gym should have _max_episode_steps
"_max_episode_steps",
# Minigrid should have max_steps
"max_steps",
]
for key in possible_keys:
res = getattr(self.env, key, None)
if res is not None:
return res
return None
@property
def possible_actions_mask(self) -> Optional[np.ndarray]:
ret = getattr(self.env, "possible_actions_mask", None)
if ret is not None:
ret = ret.copy()
return ret
| 5,302 | 35.572414 | 88 | py |
ReAgent | ReAgent-master/reagent/gym/envs/pomdp/string_game_v1.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""
A game with a stochastic length of the MDP but no longer than 3.
An agent can choose one character to reveal (either "A" or "B") as the action,
and the next state is exactly the action just taken (i.e., the transition
function only depends on the action). Each episode is limited to 3 steps.
There is some probability to terminate at any step (but the agent must terminate if
making 3 steps)
If the current state is "A", the agent has 0.5 probability to make to the next step.
If the current state is "B", the agent has 0.9 probability to make to the next step.
The reward is given at the terminal state, based on the accumulated observation (a string).
If the agent observes "AAA" (survive the first 2 steps and terminate at the last step
no matter what action taken), it receives +5 reward.
If the agent observes "BA" (survive the first step and terminate at the second step),
it receives +4 reward.
For all other scenarios, the agent receives 0 reward.
If we plan for 3 steps ahead from the beginning, "A" is the better action to take first.
If we plan with consideration of termination probabilities, "B" is better. Because:
The expected Q-value of "A" = 0.5 * 0 + 0.5 * max(0.5 * 0 + 0.5 * max(5, 0), 0) = 1.25
The expected Q-value of "B" = 0.1 * 0 + 0.9 * max(0.5 * 4 + 0.5 * max(0, 0), 0) = 1.8
"""
import logging
from collections import deque, defaultdict
import numpy as np
import torch
from gym import Env
from gym.spaces import Box, Discrete
logger = logging.getLogger(__name__)
MAX_STEP = 3
CHARACTERS = ["A", "B"]
STATE_DIM = ACTION_DIM = len(CHARACTERS)
class StringGameEnvV1(Env):
def __init__(self, max_steps=MAX_STEP):
np.random.seed(123)
torch.manual_seed(123)
self.max_steps = max_steps
self.reward_map = defaultdict(float)
self.terminal_probs = defaultdict(float)
self._init_reward_and_terminal_probs()
self.recent_actions = deque([], maxlen=MAX_STEP)
self.action_space = Discrete(ACTION_DIM)
self.observation_space = Box(low=0, high=1, shape=(STATE_DIM,))
self.step_cnt = 0
self.reset()
def _init_reward_and_terminal_probs(self):
self.reward_map["AAA"] = 5.0
self.reward_map["BA"] = 4.0
self.terminal_probs["A"] = 0.5
self.terminal_probs["B"] = 0.1
def seed(self, seed=None):
np.random.seed(seed)
torch.manual_seed(seed)
@staticmethod
def random_action():
return np.random.randint(0, ACTION_DIM)
def get_reward(self):
"""
The function you can write to customize rewards. In this
specific environment, the reward only depends on action history
"""
recent_characters = [CHARACTERS[c] for c in list(self.recent_actions)]
string = "".join(recent_characters)
if not self.done:
reward = 0
else:
reward = self.reward_map[string]
return reward, string
def step(self, action):
assert self.action_space.contains(action)
assert self.done is False
self.step_cnt += 1
self.recent_actions.append(action)
if self.step_cnt >= self.max_steps:
self.done = True
else:
self.done = self.sample_terminal(action)
reward, info = self.get_reward()
ob = self.get_observation()
return ob, reward, self.done, {"reward_str": info}
def sample_terminal(self, action):
terminal_probability = self.terminal_probs[CHARACTERS[action]]
if np.random.rand() < terminal_probability:
return True
return False
def get_observation(self):
"""
The function you can write to customize transitions. In this
specific environment, the next state is exactly the latest action taken.
The initial observation is all zeros.
"""
ob = np.zeros(STATE_DIM)
if len(self.recent_actions) > 0:
ob[self.recent_actions[-1]] = 1
return ob
def reset(self):
self.done = False
self.recent_actions = deque([], maxlen=MAX_STEP)
self.step_cnt = 0
ob = self.get_observation()
return ob
def print_internal_state(self):
action_str = "".join([CHARACTERS[c] for c in self.recent_actions])
logger.debug(
f"Step {self.step_cnt}, recent actions {action_str}, terminal={self.done}"
)
@staticmethod
def print_ob(ob):
return str(ob)
@staticmethod
def print_action(action):
return CHARACTERS[action]
| 4,664 | 33.051095 | 91 | py |
ReAgent | ReAgent-master/reagent/gym/envs/pomdp/state_embed_env.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""
This file shows an example of using embedded states to feed to RL models in
partially observable environments (POMDPs). Embedded states are generated by a world
model which learns how to encode past n observations into a low-dimension
vector.Embedded states improve performance in POMDPs compared to just using
one-step observations as states because they encode more historical information
than one-step observations.
"""
import logging
from collections import deque
from typing import Optional
import gym
import numpy as np
import reagent.core.types as rlt
import torch
from gym.spaces import Box
from reagent.gym.envs import EnvWrapper
from reagent.models.world_model import MemoryNetwork
logger = logging.getLogger(__name__)
class StateEmbedEnvironment(gym.Env):
def __init__(
self,
gym_env: EnvWrapper,
mdnrnn: MemoryNetwork,
max_embed_seq_len: int,
state_min_value: Optional[float] = None,
state_max_value: Optional[float] = None,
):
self.env = gym_env
self.unwrapped.spec = self.env.unwrapped.spec
self.max_embed_seq_len = max_embed_seq_len
self.mdnrnn = mdnrnn
self.embed_size = self.mdnrnn.num_hiddens
self.raw_state_dim = self.env.observation_space.shape[0] # type: ignore
self.state_dim = self.embed_size + self.raw_state_dim
if isinstance(self.env.action_space, gym.spaces.Discrete):
self.is_discrete_action = True
self.action_dim = self.env.action_space.n
elif isinstance(self.env.action_space, gym.spaces.Box):
self.is_discrete_action = False
self.action_dim = self.env.action_space.shape[0]
self.action_space = self.env.action_space
# only need to set up if needed
if state_min_value is None or state_max_value is None:
state_min_value = np.min(gym_env.observation_space.low)
state_max_value = np.max(gym_env.observation_space.high)
self.observation_space = Box( # type: ignore
low=state_min_value, high=state_max_value, shape=(self.state_dim,)
)
self.cur_raw_state = None
self.recent_states = deque([], maxlen=self.max_embed_seq_len) # type: ignore
self.recent_actions = deque([], maxlen=self.max_embed_seq_len) # type: ignore
def seed(self, seed):
self.env.seed(seed)
def __getattr__(self, name):
return getattr(self.env, name)
@torch.no_grad()
def embed_state(self, state):
"""Embed state after either reset() or step()"""
assert len(self.recent_states) == len(self.recent_actions)
old_mdnrnn_mode = self.mdnrnn.mdnrnn.training
self.mdnrnn.mdnrnn.eval()
# Embed the state as the hidden layer's output
# until the previous step + current state
if len(self.recent_states) == 0:
mdnrnn_state = np.zeros((1, self.raw_state_dim))
mdnrnn_action = np.zeros((1, self.action_dim))
else:
mdnrnn_state = np.array(list(self.recent_states))
mdnrnn_action = np.array(list(self.recent_actions))
mdnrnn_state = torch.tensor(mdnrnn_state, dtype=torch.float).unsqueeze(1)
mdnrnn_action = torch.tensor(mdnrnn_action, dtype=torch.float).unsqueeze(1)
mdnrnn_output = self.mdnrnn(
rlt.FeatureData(mdnrnn_state), rlt.FeatureData(mdnrnn_action)
)
hidden_embed = (
mdnrnn_output.all_steps_lstm_hidden[-1].squeeze().detach().cpu().numpy()
)
state_embed = np.hstack((hidden_embed, state))
self.mdnrnn.mdnrnn.train(old_mdnrnn_mode)
logger.debug(
f"Embed_state\nrecent states: {np.array(self.recent_states)}\n"
f"recent actions: {np.array(self.recent_actions)}\n"
f"state_embed{state_embed}\n"
)
return state_embed
def reset(self):
next_raw_state = self.env.reset()
self.recent_states = deque([], maxlen=self.max_embed_seq_len)
self.recent_actions = deque([], maxlen=self.max_embed_seq_len)
self.cur_raw_state = next_raw_state
next_embed_state = self.embed_state(next_raw_state)
return next_embed_state
def step(self, action):
if self.is_discrete_action:
action_np = np.zeros(self.action_dim)
action_np[action] = 1.0
else:
action_np = action
self.recent_states.append(self.cur_raw_state)
self.recent_actions.append(action_np)
next_raw_state, reward, terminal, info = self.env.step(action)
logger.debug("action {}, reward {}\n".format(action, reward))
self.cur_raw_state = next_raw_state
next_embed_state = self.embed_state(next_raw_state)
return next_embed_state, reward, terminal, info
| 4,927 | 38.424 | 86 | py |
ReAgent | ReAgent-master/reagent/gym/envs/pomdp/string_game.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""
The agent can observe a character at one time. But the
reward is given based on last n (n>1) steps' observation (a string).
In this environment, the agent can observe a character ("A", "B") at
each time step, but the reward it receives actually depends on past 3 steps:
if the agent observes "ABB" in the past 3 steps, it receives +5 reward; if the
agent observes "BBB", it receives -5 reward; otherwise, the agent receives 0.
The action is the next character the agent wants to reveal, and the next state
is exactly the action just taken (i.e., the transition function only depends on
the action). Each episode is limited to 6 steps. Therefore, the optimal policy
is to choose actions "ABBABB" in sequence which results to +10 reward.
"""
import itertools
import logging
from collections import deque
import numpy as np
import torch
from gym import Env
from gym.spaces import Box, Discrete
logger = logging.getLogger(__name__)
MAX_STEP = 6
CHARACTERS = ["A", "B"]
STATE_DIM = ACTION_DIM = len(CHARACTERS)
SEQ_LEN = 3
class StringGameEnv(Env):
def __init__(self, max_steps=MAX_STEP):
np.random.seed(123)
torch.manual_seed(123)
self.max_steps = max_steps
self.reward_map = {}
self._init_reward()
logger.debug(self.reward_map)
self.recent_actions = deque([], maxlen=SEQ_LEN)
self.recent_states = deque([], maxlen=SEQ_LEN)
self.cur_state = None
self.action_space = Discrete(ACTION_DIM)
self.observation_space = Box(low=0, high=1, shape=(STATE_DIM,))
self.step_cnt = 0
self.reset()
def _init_reward(self):
for seq_len in range(1, SEQ_LEN + 1):
for k in itertools.product(CHARACTERS, repeat=seq_len):
self.reward_map["".join(k)] = 0
self.reward_map["ABB"] = 5
self.reward_map["BBB"] = -5
def seed(self, seed=None):
np.random.seed(seed)
torch.manual_seed(seed)
@staticmethod
def random_action():
return np.random.randint(0, ACTION_DIM)
def get_reward(self):
"""
The function you can write to customize rewards. In this
specific environment, the reward only depends on action history
"""
recent_characters = [CHARACTERS[c] for c in list(self.recent_actions)]
string = "".join(recent_characters)
reward = self.reward_map[string]
return reward, string
def step(self, action):
assert self.action_space.contains(action)
assert self.done is False
self.step_cnt += 1
self.recent_states.append(self.cur_state)
self.recent_actions.append(action)
reward, info = self.get_reward()
if self.step_cnt >= self.max_steps:
self.done = True
ob = self.get_observation()
self.cur_state = ob
return ob, reward, self.done, {"reward_str": info}
def get_observation(self):
"""
The function you can write to customize transitions. In this
specific environment, the next state is exactly the latest action taken.
The initial observation is all zeros.
"""
ob = np.zeros(STATE_DIM)
if len(self.recent_actions) > 0:
ob[self.recent_actions[-1]] = 1
return ob
def reset(self):
self.done = False
self.recent_states = deque([], maxlen=SEQ_LEN)
self.recent_actions = deque([], maxlen=SEQ_LEN)
self.step_cnt = 0
ob = self.get_observation()
self.cur_state = ob
return ob
def print_internal_state(self):
print("Step", self.step_cnt)
def state_to_chr(s):
state_index = np.nonzero(s)[0]
if len(state_index) != 1:
# initial state
return "I"
return CHARACTERS[state_index.item()]
state_str = "".join([state_to_chr(s) for s in self.recent_states])
action_str = "".join([CHARACTERS[c] for c in self.recent_actions])
print(
"Internal state: recent states {}, recent actions {}".format(
state_str, action_str
)
)
@staticmethod
def print_ob(ob):
return str(ob)
@staticmethod
def print_action(action):
return CHARACTERS[action]
| 4,391 | 31.533333 | 80 | py |
ReAgent | ReAgent-master/reagent/gym/policies/predictor_policies.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Optional, Tuple, Union
import numpy as np
import reagent.core.types as rlt
import torch
from reagent.core.fb_checker import IS_FB_ENVIRONMENT
from reagent.core.parameters import RLParameters
from reagent.gym.policies import Policy
from reagent.gym.policies.samplers.discrete_sampler import (
GreedyActionSampler,
SoftmaxActionSampler,
)
from reagent.gym.policies.samplers.top_k_sampler import TopKSampler
from reagent.gym.policies.scorers.discrete_scorer import (
discrete_dqn_serving_scorer,
parametric_dqn_serving_scorer,
)
from reagent.gym.policies.scorers.slate_q_scorer import slate_q_serving_scorer
from reagent.models.actor import LOG_PROB_MIN, LOG_PROB_MAX
if IS_FB_ENVIRONMENT:
from reagent.fb.prediction.fb_predictor_wrapper import (
FbActorPredictorUnwrapper as ActorPredictorUnwrapper,
FbDiscreteDqnPredictorUnwrapper as DiscreteDqnPredictorUnwrapper,
FbParametricPredictorUnwrapper as ParametricDqnPredictorUnwrapper,
)
else:
from reagent.prediction.predictor_wrapper import (
ActorPredictorUnwrapper,
DiscreteDqnPredictorUnwrapper,
ParametricDqnPredictorUnwrapper,
)
def create_predictor_policy_from_model(serving_module, **kwargs) -> Policy:
"""
serving_module is the result of ModelManager.build_serving_module().
This function creates a Policy for gym environments.
"""
module_name = serving_module.original_name
if module_name.endswith("DiscreteDqnPredictorWrapper"):
rl_parameters = kwargs.get("rl_parameters", None)
return DiscreteDQNPredictorPolicy(serving_module, rl_parameters)
elif module_name.endswith("ActorPredictorWrapper"):
return ActorPredictorPolicy(predictor=ActorPredictorUnwrapper(serving_module))
elif module_name.endswith("ParametricDqnPredictorWrapper"):
# TODO: remove this dependency
max_num_actions = kwargs.get("max_num_actions", None)
assert (
max_num_actions is not None
), f"max_num_actions not given for Parametric DQN."
q_network = ParametricDqnPredictorUnwrapper(serving_module)
# TODO: write SlateQ Wrapper
slate_size = kwargs.get("slate_size", None)
if slate_size is not None:
scorer = slate_q_serving_scorer(
num_candidates=max_num_actions, q_network=q_network
)
sampler = TopKSampler(k=slate_size)
else:
sampler = GreedyActionSampler()
scorer = parametric_dqn_serving_scorer(
max_num_actions=max_num_actions, q_network=q_network
)
return Policy(scorer=scorer, sampler=sampler)
else:
raise NotImplementedError(
f"Predictor policy for serving module {serving_module} not available."
)
class DiscreteDQNPredictorPolicy(Policy):
def __init__(self, wrapped_dqn_predictor, rl_parameters: Optional[RLParameters]):
if rl_parameters and rl_parameters.softmax_policy:
self.sampler = SoftmaxActionSampler(temperature=rl_parameters.temperature)
else:
self.sampler = GreedyActionSampler()
self.scorer = discrete_dqn_serving_scorer(
q_network=DiscreteDqnPredictorUnwrapper(wrapped_dqn_predictor)
)
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def act(
self,
obs: Union[rlt.ServingFeatureData, Tuple[torch.Tensor, torch.Tensor]],
possible_actions_mask: Optional[torch.Tensor],
) -> rlt.ActorOutput:
"""Input is either state_with_presence, or
ServingFeatureData (in the case of sparse features)"""
assert isinstance(obs, tuple)
if isinstance(obs, rlt.ServingFeatureData):
state: rlt.ServingFeatureData = obs
else:
state = rlt.ServingFeatureData(
float_features_with_presence=obs,
id_list_features={},
id_score_list_features={},
)
scores = self.scorer(state, possible_actions_mask)
return self.sampler.sample_action(scores).cpu().detach()
class ActorPredictorPolicy(Policy):
def __init__(self, predictor):
self.predictor = predictor
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def act(
self,
obs: Union[rlt.ServingFeatureData, Tuple[torch.Tensor, torch.Tensor]],
possible_actions_mask: Optional[torch.Tensor] = None,
) -> rlt.ActorOutput:
"""Input is either state_with_presence, or
ServingFeatureData (in the case of sparse features)"""
assert isinstance(obs, tuple)
if isinstance(obs, rlt.ServingFeatureData):
state: rlt.ServingFeatureData = obs
else:
state = rlt.ServingFeatureData(
float_features_with_presence=obs,
id_list_features={},
id_score_list_features={},
)
output = self.predictor(*state)
if isinstance(output, tuple):
action, log_prob = output
log_prob = log_prob.clamp(LOG_PROB_MIN, LOG_PROB_MAX)
return rlt.ActorOutput(action=action.cpu(), log_prob=log_prob.cpu())
else:
return rlt.ActorOutput(action=output.cpu())
| 5,542 | 38.592857 | 86 | py |
ReAgent | ReAgent-master/reagent/gym/policies/policy.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Any, Optional
import reagent.core.types as rlt
import torch
from reagent.gym.types import Sampler, Scorer
class Policy:
def __init__(self, scorer: Scorer, sampler: Sampler):
"""
The Policy composes the scorer and sampler to create actions.
Args:
scorer: given preprocessed input, outputs intermediate scores
used for sampling actions
sampler: given scores (from the scorer), samples an action.
"""
self.scorer = scorer
self.sampler = sampler
def act(
self, obs: Any, possible_actions_mask: Optional[torch.Tensor] = None
) -> rlt.ActorOutput:
"""
Performs the composition described above.
These are the actions being put into the replay buffer, not necessary
the actions taken by the environment!
"""
scorer_inputs = (obs,)
if possible_actions_mask is not None:
scorer_inputs += (possible_actions_mask,)
scores = self.scorer(*scorer_inputs)
actor_output = self.sampler.sample_action(scores)
return actor_output.cpu().detach()
| 1,245 | 31.789474 | 77 | py |
ReAgent | ReAgent-master/reagent/gym/policies/random_policies.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import List, Optional
import gym
import numpy as np
import reagent.core.types as rlt
import torch
import torch.nn.functional as F
from reagent.core.parameters import CONTINUOUS_TRAINING_ACTION_RANGE
from reagent.gym.policies.policy import Policy
from reagent.gym.policies.scorers.discrete_scorer import apply_possible_actions_mask
def make_random_policy_for_env(env: gym.Env) -> Policy:
if isinstance(env.action_space, gym.spaces.Discrete):
# discrete action space
return DiscreteRandomPolicy.create_for_env(env)
elif isinstance(env.action_space, gym.spaces.Box):
# continuous action space
return ContinuousRandomPolicy.create_for_env(env)
elif isinstance(env.action_space, gym.spaces.MultiDiscrete):
return MultiDiscreteRandomPolicy.create_for_env(env)
else:
raise NotImplementedError(f"{env.action_space} not supported")
class DiscreteRandomPolicy(Policy):
def __init__(self, num_actions: int):
"""Random actor for accumulating random offline data."""
self.num_actions = num_actions
@classmethod
def create_for_env(cls, env: gym.Env):
action_space = env.action_space
if isinstance(action_space, gym.spaces.Discrete):
return cls(num_actions=action_space.n)
elif isinstance(action_space, gym.spaces.Box):
raise NotImplementedError(f"Try continuous random policy instead")
else:
raise NotImplementedError(f"action_space is {type(action_space)}")
def act(
self, obs: rlt.FeatureData, possible_actions_mask: Optional[torch.Tensor] = None
) -> rlt.ActorOutput:
"""Act randomly regardless of the observation."""
# pyre-fixme[35]: Target cannot be annotated.
obs: torch.Tensor = obs.float_features
assert obs.dim() >= 2, f"obs has shape {obs.shape} (dim < 2)"
assert obs.shape[0] == 1, f"obs has shape {obs.shape} (0th dim != 1)"
batch_size = obs.shape[0]
scores = torch.ones((batch_size, self.num_actions))
scores = apply_possible_actions_mask(
scores, possible_actions_mask, invalid_score=0.0
)
# sample a random action
m = torch.distributions.Categorical(scores)
raw_action = m.sample()
action = F.one_hot(raw_action, self.num_actions)
log_prob = m.log_prob(raw_action).float()
return rlt.ActorOutput(action=action, log_prob=log_prob)
class MultiDiscreteRandomPolicy(Policy):
def __init__(self, num_action_vec: List[int]):
self.num_action_vec = num_action_vec
self.dists = [
torch.distributions.Categorical(torch.ones(n) / n)
for n in self.num_action_vec
]
@classmethod
def create_for_env(cls, env: gym.Env):
action_space = env.action_space
if not isinstance(action_space, gym.spaces.MultiDiscrete):
raise ValueError(f"Invalid action space: {action_space}")
return cls(action_space.nvec.tolist())
# TODO: consider possible_actions_mask
def act(
self, obs: rlt.FeatureData, possible_actions_mask: Optional[torch.Tensor] = None
) -> rlt.ActorOutput:
# pyre-fixme[35]: Target cannot be annotated.
obs: torch.Tensor = obs.float_features
batch_size, _ = obs.shape
actions = []
log_probs = []
for m in self.dists:
actions.append(m.sample((batch_size, 1)))
log_probs.append(m.log_prob(actions[-1]).float())
return rlt.ActorOutput(
action=torch.cat(actions, dim=1),
log_prob=torch.cat(log_probs, dim=1).sum(1, keepdim=True),
)
class ContinuousRandomPolicy(Policy):
def __init__(self, low: torch.Tensor, high: torch.Tensor):
self.low = low
self.high = high
assert (
low.shape == high.shape
), f"low.shape = {low.shape}, high.shape = {high.shape}"
self.dist = torch.distributions.uniform.Uniform(self.low, self.high)
@classmethod
def create_for_env(cls, env: gym.Env):
action_space = env.action_space
if isinstance(action_space, gym.spaces.Discrete):
raise NotImplementedError(
f"Action space is discrete. Try using DiscreteRandomPolicy instead."
)
elif isinstance(action_space, gym.spaces.Box):
assert (
len(action_space.shape) == 1
), f"Box with shape {action_space.shape} not supported."
low, high = CONTINUOUS_TRAINING_ACTION_RANGE
# broadcast low and high to shape
np_ones = np.ones(action_space.shape)
return cls(
low=torch.tensor(low * np_ones), high=torch.tensor(high * np_ones)
)
else:
raise NotImplementedError(f"action_space is {type(action_space)}")
def act(
self, obs: rlt.FeatureData, possible_actions_mask: Optional[torch.Tensor] = None
) -> rlt.ActorOutput:
"""Act randomly regardless of the observation."""
# pyre-fixme[35]: Target cannot be annotated.
obs: torch.Tensor = obs.float_features
assert obs.dim() >= 2, f"obs has shape {obs.shape} (dim < 2)"
batch_size = obs.size(0)
# pyre-fixme[6]: Expected `Union[torch.Size, torch.Tensor]` for 1st param
# but got `Tuple[int]`.
action = self.dist.sample((batch_size,))
# sum over action_dim (since assuming i.i.d. per coordinate)
log_prob = self.dist.log_prob(action).sum(1)
return rlt.ActorOutput(action=action, log_prob=log_prob)
| 5,717 | 38.434483 | 88 | py |
ReAgent | ReAgent-master/reagent/gym/policies/scorers/slate_q_scorer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import reagent.core.types as rlt
import torch
import torch.nn.functional as F
from reagent.gym.types import Scorer
from reagent.models.base import ModelBase
def slate_q_scorer(num_candidates: int, q_network: ModelBase) -> Scorer:
@torch.no_grad()
def score(state: rlt.FeatureData) -> torch.Tensor:
tiled_state = state.repeat_interleave(repeats=num_candidates, axis=0)
candidate_docs = state.candidate_docs
assert candidate_docs is not None
actions = candidate_docs.as_feature_data()
q_network.eval()
scores = q_network(tiled_state, actions).view(-1, num_candidates)
q_network.train()
select_prob = F.softmax(candidate_docs.value, dim=1)
assert select_prob.shape == scores.shape
return select_prob * scores
return score
def slate_q_serving_scorer(num_candidates: int, q_network: torch.nn.Module) -> Scorer:
@torch.no_grad()
def score(state: rlt.FeatureData) -> torch.Tensor:
# pyre-fixme[28]: Unexpected keyword argument `axis`.
tiled_state = state.float_features.repeat_interleave(
repeats=num_candidates, axis=0
)
candidate_docs = state.candidate_docs
assert candidate_docs is not None
actions = candidate_docs.as_feature_data().float_features
q_network.eval()
action_names, q_values = q_network(
(tiled_state, torch.ones_like(tiled_state)),
(actions, torch.ones_like(actions)),
)
scores = q_values.view(-1, num_candidates)
q_network.train()
select_prob = F.softmax(candidate_docs.value, dim=1)
assert select_prob.shape == scores.shape
return select_prob * scores
return score
| 1,838 | 31.839286 | 86 | py |
ReAgent | ReAgent-master/reagent/gym/policies/scorers/continuous_scorer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import reagent.core.types as rlt
import torch
from reagent.gym.types import GaussianSamplerScore, Scorer
from reagent.models.base import ModelBase
def sac_scorer(actor_network: ModelBase) -> Scorer:
@torch.no_grad()
def score(preprocessed_obs: rlt.FeatureData) -> GaussianSamplerScore:
actor_network.eval()
# pyre-fixme[29]: `Union[torch.Tensor, torch.nn.Module]` is not a function.
loc, scale_log = actor_network._get_loc_and_scale_log(preprocessed_obs)
actor_network.train()
return GaussianSamplerScore(loc=loc, scale_log=scale_log)
return score
| 699 | 34 | 83 | py |
ReAgent | ReAgent-master/reagent/gym/policies/scorers/discrete_scorer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Optional, Tuple
import numpy as np
import reagent.core.types as rlt
import torch
from reagent.gym.preprocessors.trainer_preprocessor import get_possible_actions_for_gym
from reagent.gym.types import Scorer
from reagent.models.base import ModelBase
NEG_INF = float("-inf")
def apply_possible_actions_mask(
scores: torch.Tensor,
possible_actions_mask: Optional[torch.Tensor] = None,
invalid_score: float = NEG_INF,
) -> torch.Tensor:
if possible_actions_mask is None:
return scores
possible_actions_mask = possible_actions_mask.unsqueeze(0)
assert (
scores.shape == possible_actions_mask.shape
), f"{scores.shape} != {possible_actions_mask.shape}"
scores[~possible_actions_mask] = invalid_score # pyre-ignore[16]
return scores
def discrete_dqn_scorer(q_network: ModelBase) -> Scorer:
@torch.no_grad()
def score(
preprocessed_obs: rlt.FeatureData,
possible_actions_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
q_network.eval()
scores = q_network(preprocessed_obs)
# qrdqn returns (batchsize, num_actions, num_atoms)
if scores.dim() == 3:
scores = scores.mean(dim=2)
assert scores.dim() == 2, f"{scores.shape} isn't (batchsize, num_actions)."
q_network.train()
scores = apply_possible_actions_mask(scores, possible_actions_mask)
return scores
return score
def discrete_dqn_serving_scorer(q_network: torch.nn.Module) -> Scorer:
@torch.no_grad()
def score(
state: rlt.ServingFeatureData,
possible_actions_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
action_names, q_values = q_network(*state)
q_values = apply_possible_actions_mask(q_values, possible_actions_mask)
return q_values
return score
def get_parametric_input(max_num_actions: int, obs: rlt.FeatureData):
assert (
len(obs.float_features.shape) == 2
), f"{obs.float_features.shape} is not (batch_size, state_dim)."
batch_size, _ = obs.float_features.shape
possible_actions = get_possible_actions_for_gym(batch_size, max_num_actions).to(
obs.float_features.device
)
return obs.get_tiled_batch(max_num_actions), possible_actions
def parametric_dqn_scorer(max_num_actions: int, q_network: ModelBase) -> Scorer:
@torch.no_grad()
def score(preprocessed_obs: rlt.FeatureData) -> torch.Tensor:
tiled_state, possible_actions = get_parametric_input(
max_num_actions, preprocessed_obs
)
q_network.eval()
scores = q_network(tiled_state, possible_actions)
q_network.train()
return scores.view(-1, max_num_actions)
return score
def parametric_dqn_serving_scorer(
max_num_actions: int, q_network: torch.nn.Module
) -> Scorer:
@torch.no_grad()
def score(value_presence: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
obs = value_presence[0]
tiled_state, possible_actions = get_parametric_input(
max_num_actions, rlt.FeatureData(obs)
)
tiled_state_tensor = tiled_state.float_features
possible_actions_tensor = possible_actions.float_features
action_names, q_values = q_network(
state_with_presence=(
tiled_state_tensor,
torch.ones_like(tiled_state_tensor),
),
action_with_presence=(
possible_actions_tensor,
torch.ones_like(possible_actions_tensor),
),
)
return q_values.view(-1, max_num_actions)
return score
| 3,740 | 32.106195 | 87 | py |
ReAgent | ReAgent-master/reagent/gym/policies/samplers/continuous_sampler.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import reagent.core.types as rlt
import torch
from reagent.gym.types import GaussianSamplerScore, Sampler
class GaussianSampler(Sampler):
def __init__(self, actor_network):
self.actor_network = actor_network
def _sample_action(self, loc: torch.Tensor, scale_log: torch.Tensor):
r = torch.randn_like(scale_log, device=scale_log.device)
# pyre-fixme[16]: `Tensor` has no attribute `exp`.
action = torch.tanh(loc + r * scale_log.exp())
# Since each dim are independent, log-prob is simply sum
log_prob = self.actor_network._log_prob(r, scale_log)
squash_correction = self.actor_network._squash_correction(action)
log_prob = torch.sum(log_prob - squash_correction, dim=1)
return action, log_prob
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def sample_action(self, scores: GaussianSamplerScore) -> rlt.ActorOutput:
self.actor_network.eval()
unscaled_actions, log_prob = self._sample_action(scores.loc, scores.scale_log)
self.actor_network.train()
return rlt.ActorOutput(action=unscaled_actions, log_prob=log_prob)
def _log_prob(
self, loc: torch.Tensor, scale_log: torch.Tensor, squashed_action: torch.Tensor
):
# This is not getting exported; we can use it
# pyre-fixme[16]: `Tensor` has no attribute `exp`.
n = torch.distributions.Normal(loc, scale_log.exp())
raw_action = self.actor_network._atanh(squashed_action)
log_prob = n.log_prob(raw_action)
squash_correction = self.actor_network._squash_correction(squashed_action)
log_prob = torch.sum(log_prob - squash_correction, dim=1)
return log_prob
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def log_prob(
self, scores: GaussianSamplerScore, squashed_action: torch.Tensor
) -> torch.Tensor:
self.actor_network.eval()
# pyre-fixme[20]: Argument `squashed_action` expected.
log_prob = self._log_prob(scores.loc, scores.scale_log)
self.actor_network.train()
return log_prob
| 2,378 | 41.482143 | 87 | py |
ReAgent | ReAgent-master/reagent/gym/policies/samplers/top_k_sampler.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import reagent.core.types as rlt
import torch
from reagent.gym.types import Sampler
class TopKSampler(Sampler):
def __init__(self, k: int):
self.k = k
def sample_action(self, scores: torch.Tensor) -> rlt.ActorOutput:
top_values, item_idxs = torch.topk(scores, self.k, dim=1)
return rlt.ActorOutput(
action=item_idxs, log_prob=torch.zeros(item_idxs.shape[0], 1)
)
def log_prob(self, scores: torch.Tensor, action: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
| 634 | 27.863636 | 83 | py |
ReAgent | ReAgent-master/reagent/gym/policies/samplers/discrete_sampler.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import reagent.core.types as rlt
import torch
import torch.nn.functional as F
from reagent.gym.types import Sampler
from reagent.models.dqn import INVALID_ACTION_CONSTANT
class SoftmaxActionSampler(Sampler):
"""
Softmax sampler.
Equation: http://incompleteideas.net/book/first/ebook/node17.html
The action scores are logits.
Supports decaying the temperature over time.
Args:
temperature: A measure of how uniformly random the distribution looks.
The higher the temperature, the more uniform the sampling.
temperature_decay: A multiplier by which temperature is reduced at each .update() call
minimum_temperature: Minimum temperature, below which the temperature is not decayed further
"""
def __init__(
self,
temperature: float = 1.0,
temperature_decay: float = 1.0,
minimum_temperature: float = 0.1,
):
assert temperature > 0, f"Invalid non-positive temperature {temperature}."
self.temperature = temperature
self.temperature_decay = temperature_decay
self.minimum_temperature = minimum_temperature
assert (
temperature_decay <= 1.0
), f"Invalid temperature_decay>1: {temperature_decay}."
assert (
minimum_temperature <= temperature
), f"minimum_temperature ({minimum_temperature}) exceeds initial temperature ({temperature})"
def _get_distribution(
self, scores: torch.Tensor
) -> torch.distributions.Categorical:
return torch.distributions.Categorical(logits=scores / self.temperature)
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def sample_action(self, scores: torch.Tensor) -> rlt.ActorOutput:
assert (
scores.dim() == 2
), f"scores shape is {scores.shape}, not (batch_size, num_actions)"
batch_size, num_actions = scores.shape
m = self._get_distribution(scores)
raw_action = m.sample()
assert raw_action.shape == (
batch_size,
), f"{raw_action.shape} != ({batch_size}, )"
action = F.one_hot(raw_action, num_actions)
assert action.ndim == 2
log_prob = m.log_prob(raw_action)
assert log_prob.ndim == 1
return rlt.ActorOutput(action=action, log_prob=log_prob)
def log_prob(self, scores: torch.Tensor, action: torch.Tensor) -> torch.Tensor:
assert len(scores.shape) == 2, f"{scores.shape}"
assert scores.shape == action.shape, f"{scores.shape} != {action.shape}"
m = self._get_distribution(scores)
# pyre-fixme[16]: `Tensor` has no attribute `argmax`.
return m.log_prob(action.argmax(dim=1))
def entropy(self, scores: torch.Tensor) -> torch.Tensor:
"""
Returns average policy entropy. Simple unweighted average across the batch.
"""
assert len(scores.shape) == 2, f"{scores.shape}"
m = self._get_distribution(scores)
return m.entropy().mean()
def update(self) -> None:
self.temperature *= self.temperature_decay
self.temperature = max(self.temperature, self.minimum_temperature)
class GreedyActionSampler(Sampler):
"""
Return the highest scoring action.
"""
def _get_greedy_indices(self, scores: torch.Tensor) -> torch.Tensor:
assert (
len(scores.shape) == 2
), f"scores shape is {scores.shape}, not (batchsize, num_actions)"
# pyre-fixme[16]: `Tensor` has no attribute `argmax`.
return scores.argmax(dim=1)
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def sample_action(self, scores: torch.Tensor) -> rlt.ActorOutput:
batch_size, num_actions = scores.shape
raw_action = self._get_greedy_indices(scores)
action = F.one_hot(raw_action, num_actions)
assert action.shape == (batch_size, num_actions)
return rlt.ActorOutput(
action=action, log_prob=torch.zeros_like(raw_action, dtype=torch.float)
)
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def log_prob(self, scores: torch.Tensor, action: torch.Tensor) -> torch.Tensor:
greedy_indices = self._get_greedy_indices(scores)
# pyre-fixme[16]: `Tensor` has no attribute `argmax`.
match = greedy_indices == action.argmax(-1)
lp = torch.zeros(scores.shape[0]).float()
lp[match] = -float("inf")
return lp
class EpsilonGreedyActionSampler(Sampler):
"""
Epsilon-Greedy Policy
With probability epsilon, a random action is sampled. Otherwise,
the highest scoring (greedy) action is chosen.
Call update() to decay the amount of exploration by lowering
`epsilon` by a factor of `epsilon_decay` (<=1) until we reach
`minimum_epsilon`
"""
def __init__(
self, epsilon: float, epsilon_decay: float = 1.0, minimum_epsilon: float = 0.0
):
self.epsilon = float(epsilon)
assert epsilon_decay <= 1
self.epsilon_decay = epsilon_decay
assert minimum_epsilon <= epsilon_decay
self.minimum_epsilon = minimum_epsilon
def sample_action(self, scores: torch.Tensor) -> rlt.ActorOutput:
assert scores.dim() == 2, (
"scores dim is %d" % scores.dim()
) # batch_size x num_actions
batch_size, num_actions = scores.shape
# pyre-fixme[16]: `Tensor` has no attribute `argmax`.
argmax = F.one_hot(scores.argmax(dim=1), num_actions).bool()
valid_actions_ind = (scores > INVALID_ACTION_CONSTANT).bool()
num_valid_actions = valid_actions_ind.float().sum(1, keepdim=True)
rand_prob = self.epsilon / num_valid_actions
p = torch.full_like(scores, rand_prob)
greedy_prob = 1 - self.epsilon + rand_prob
p[argmax] = greedy_prob.squeeze()
p[~valid_actions_ind] = 0.0
assert torch.isclose(p.sum(1) == torch.ones(p.shape[0]))
m = torch.distributions.Categorical(probs=p)
raw_action = m.sample()
action = F.one_hot(raw_action, num_actions)
assert action.shape == (batch_size, num_actions)
log_prob = m.log_prob(raw_action)
assert log_prob.shape == (batch_size,)
return rlt.ActorOutput(action=action, log_prob=log_prob)
def log_prob(self, scores: torch.Tensor, action: torch.Tensor) -> torch.Tensor:
max_index = self.sample_action(scores).argmax(-1)
# pyre-fixme[16]: `Tensor` has no attribute `argmax`.
opt = max_index == action.argmax(-1)
n = len(scores)
lp = torch.ones(n) * self.epsilon / n
lp[opt] = 1 - self.epsilon + self.epsilon / n
return lp
def update(self) -> None:
self.epsilon *= self.epsilon_decay
if self.minimum_epsilon is not None:
self.epsilon = max(self.epsilon, self.minimum_epsilon)
| 7,223 | 37.425532 | 101 | py |
ReAgent | ReAgent-master/reagent/gym/datasets/replay_buffer_dataset.py | #!/usr/bin/env python3
import logging
from typing import Optional, Callable
import torch
from reagent.gym.agents.agent import Agent
from reagent.gym.envs import EnvWrapper
from reagent.gym.preprocessors import (
make_replay_buffer_inserter,
make_replay_buffer_trainer_preprocessor,
)
from reagent.gym.types import Transition, Trajectory
from reagent.replay_memory.circular_replay_buffer import ReplayBuffer
logger = logging.getLogger(__name__)
class ReplayBufferDataset(torch.utils.data.IterableDataset):
def __init__(
self,
env: EnvWrapper,
agent: Agent,
replay_buffer: ReplayBuffer,
batch_size: int,
training_frequency: int = 1,
num_episodes: Optional[int] = None,
max_steps: Optional[int] = None,
post_episode_callback: Optional[Callable] = None,
trainer_preprocessor=None,
replay_buffer_inserter=None,
):
super().__init__()
self._env = env
self._agent = agent
self._replay_buffer = replay_buffer
self._batch_size = batch_size
self._training_frequency = training_frequency
self._num_episodes = num_episodes
self._max_steps = max_steps
self._post_episode_callback = post_episode_callback
self._trainer_preprocessor = trainer_preprocessor
assert replay_buffer_inserter is not None
self._replay_buffer_inserter = replay_buffer_inserter
# TODO: Just use kwargs here?
@classmethod
def create_for_trainer(
cls,
trainer,
env: EnvWrapper,
agent: Agent,
replay_buffer: ReplayBuffer,
batch_size: int,
training_frequency: int = 1,
num_episodes: Optional[int] = None,
max_steps: Optional[int] = None,
post_episode_callback: Optional[Callable] = None,
trainer_preprocessor=None,
replay_buffer_inserter=None,
device=None,
):
device = device or torch.device("cpu")
if trainer_preprocessor is None:
trainer_preprocessor = make_replay_buffer_trainer_preprocessor(
trainer, device, env
)
if replay_buffer_inserter is None:
replay_buffer_inserter = make_replay_buffer_inserter(env)
return cls(
env=env,
agent=agent,
replay_buffer=replay_buffer,
batch_size=batch_size,
training_frequency=training_frequency,
num_episodes=num_episodes,
max_steps=max_steps,
post_episode_callback=post_episode_callback,
trainer_preprocessor=trainer_preprocessor,
replay_buffer_inserter=replay_buffer_inserter,
)
def __iter__(self):
mdp_id = 0
global_num_steps = 0
rewards = []
# TODO: We probably should put member vars into local vars to
# reduce indirection, improving perf
while self._num_episodes is None or mdp_id < self._num_episodes:
obs = self._env.reset()
possible_actions_mask = self._env.possible_actions_mask
terminal = False
num_steps = 0
episode_reward_sum = 0
trajectory = Trajectory()
while not terminal:
action, log_prob = self._agent.act(obs, possible_actions_mask)
next_obs, reward, terminal, info = self._env.step(action)
next_possible_actions_mask = self._env.possible_actions_mask
if self._max_steps is not None and num_steps >= self._max_steps:
terminal = True
# Only partially filled. Agent can fill in more fields.
transition = Transition(
mdp_id=mdp_id,
sequence_number=num_steps,
observation=obs,
action=action,
reward=float(reward),
terminal=bool(terminal),
log_prob=log_prob,
possible_actions_mask=possible_actions_mask,
)
trajectory.add_transition(transition)
self._replay_buffer_inserter(self._replay_buffer, transition)
episode_reward_sum += reward
if (
global_num_steps % self._training_frequency == 0
and self._replay_buffer.size >= self._batch_size
):
train_batch = self._replay_buffer.sample_transition_batch(
batch_size=self._batch_size
)
if self._trainer_preprocessor:
train_batch = self._trainer_preprocessor(train_batch)
yield train_batch
obs = next_obs
possible_actions_mask = next_possible_actions_mask
num_steps += 1
global_num_steps += 1
if self._post_episode_callback:
self._post_episode_callback(trajectory, info)
rewards.append(episode_reward_sum)
mdp_id += 1
logger.info(
f"Training episode: {mdp_id}, total episode reward = {episode_reward_sum}"
)
logger.info(f"Episode rewards during training: {rewards}")
class OfflineReplayBufferDataset(torch.utils.data.IterableDataset):
"""
Simply sampling from the replay buffer
"""
def __init__(
self,
env: EnvWrapper,
replay_buffer: ReplayBuffer,
batch_size: int,
num_batches: int,
trainer_preprocessor=None,
):
super().__init__()
self._env = env
self._replay_buffer = replay_buffer
self._batch_size = batch_size
self._num_batches = num_batches
self._trainer_preprocessor = trainer_preprocessor
# TODO: Just use kwargs here?
@classmethod
def create_for_trainer(
cls,
trainer,
env: EnvWrapper,
replay_buffer: ReplayBuffer,
batch_size: int,
num_batches: int,
trainer_preprocessor=None,
device=None,
):
device = device or torch.device("cpu")
if trainer_preprocessor is None:
trainer_preprocessor = make_replay_buffer_trainer_preprocessor(
trainer, device, env
)
return cls(
env=env,
replay_buffer=replay_buffer,
batch_size=batch_size,
num_batches=num_batches,
trainer_preprocessor=trainer_preprocessor,
)
def __iter__(self):
for _ in range(self._num_batches):
train_batch = self._replay_buffer.sample_transition_batch(
batch_size=self._batch_size
)
if self._trainer_preprocessor:
train_batch = self._trainer_preprocessor(train_batch)
yield train_batch
| 6,916 | 33.242574 | 90 | py |
ReAgent | ReAgent-master/reagent/gym/datasets/episodic_dataset.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from typing import Optional
import torch
from reagent.gym.agents.agent import Agent
from reagent.gym.envs.gym import Gym
from reagent.gym.runners.gymrunner import run_episode
logger = logging.getLogger(__name__)
class EpisodicDataset(torch.utils.data.IterableDataset):
def __init__(
self,
env: Gym,
agent: Agent,
num_episodes: int,
seed: int = 0,
max_steps: Optional[int] = None,
):
self.env = env
self.agent = agent
self.num_episodes = num_episodes
self.seed = seed
self.max_steps = max_steps
def __iter__(self):
self.env.reset()
for i in range(self.num_episodes):
trajectory = run_episode(
self.env, self.agent, max_steps=self.max_steps, mdp_id=i
)
yield trajectory.to_dict()
def __len__(self):
return self.num_episodes
| 1,020 | 23.902439 | 72 | py |
ReAgent | ReAgent-master/reagent/gym/agents/agent.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Any, Optional, Tuple, Union
import numpy as np
import torch
from reagent.gym.envs.env_wrapper import EnvWrapper
from reagent.gym.policies.policy import Policy
from reagent.gym.policies.random_policies import make_random_policy_for_env
from reagent.gym.types import PostEpisode, PostStep, Trajectory, Transition
def _id(x):
return x
class Agent:
def __init__(
self,
policy: Policy,
post_transition_callback: Optional[PostStep] = None,
post_episode_callback: Optional[PostEpisode] = None,
obs_preprocessor=_id,
action_extractor=_id,
device: Optional[torch.device] = None,
):
"""
The Agent orchestrates the interactions on our RL components, given
the interactions with the environment.
Args:
policy: Policy that acts given preprocessed input
action_preprocessor: preprocesses action for environment
post_step: called after env.step(action).
Default post_step is to do nothing.
"""
device = device or torch.device("cpu")
self.policy = policy
self.obs_preprocessor = obs_preprocessor
self.action_extractor = action_extractor
self.post_transition_callback = post_transition_callback
self.post_episode_callback = post_episode_callback
self.device = device
@classmethod
def create_for_env(
cls,
env: EnvWrapper,
policy: Optional[Policy],
*,
device: Union[str, torch.device] = "cpu",
obs_preprocessor=None,
action_extractor=None,
**kwargs,
):
"""
If `policy` is not given, we will try to create a random policy
"""
if isinstance(device, str):
device = torch.device(device)
if obs_preprocessor is None:
obs_preprocessor = env.get_obs_preprocessor(device=device)
if action_extractor is None:
action_extractor = env.get_action_extractor()
if policy is None:
policy = make_random_policy_for_env(env)
return cls(
policy,
obs_preprocessor=obs_preprocessor,
action_extractor=action_extractor,
device=device,
**kwargs,
)
@classmethod
def create_for_env_with_serving_policy(
cls,
env: EnvWrapper,
serving_policy: Policy,
*,
obs_preprocessor=None,
action_extractor=None,
**kwargs,
):
# device shouldn't be provided as serving is CPU only
if obs_preprocessor is None:
obs_preprocessor = env.get_serving_obs_preprocessor()
if action_extractor is None:
action_extractor = env.get_serving_action_extractor()
return cls(
serving_policy,
obs_preprocessor=obs_preprocessor,
action_extractor=action_extractor,
**kwargs,
)
def act(
self, obs: Any, possible_actions_mask: Optional[np.ndarray] = None
) -> Tuple[Any, Optional[float]]:
"""Act on a single observation"""
# preprocess and convert to batch data
preprocessed_obs = self.obs_preprocessor(obs)
if possible_actions_mask is not None:
possible_actions_mask = torch.tensor(
possible_actions_mask, device=self.device
)
# store intermediate actor output for post_step
actor_output = self.policy.act(preprocessed_obs, possible_actions_mask)
log_prob = actor_output.log_prob
if log_prob is not None:
log_prob = log_prob.cpu().squeeze(0).item()
return self.action_extractor(actor_output), log_prob
def post_step(self, transition: Transition):
"""to be called after step(action)"""
if self.post_transition_callback is not None:
self.post_transition_callback(transition)
def post_episode(self, trajectory: Trajectory):
"""to be called after step(action)"""
if self.post_episode_callback is not None:
self.post_episode_callback(trajectory)
| 4,248 | 31.435115 | 79 | py |
ReAgent | ReAgent-master/reagent/gym/tests/test_gym_offline.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import os
import pprint
import unittest
import uuid
import numpy as np
import pytest
import pytorch_lightning as pl
import torch
from parameterized import parameterized
from reagent.gym.agents.agent import Agent
from reagent.gym.datasets.replay_buffer_dataset import OfflineReplayBufferDataset
from reagent.gym.envs import Gym
from reagent.gym.policies.random_policies import make_random_policy_for_env
from reagent.gym.runners.gymrunner import evaluate_for_n_episodes
from reagent.gym.utils import build_normalizer, fill_replay_buffer
from reagent.model_managers.union import ModelManager__Union
from reagent.replay_memory.circular_replay_buffer import ReplayBuffer
from reagent.test.base.horizon_test_base import HorizonTestBase
from reagent.workflow.types import RewardOptions
# for seeding the environment
SEED = 0
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
"""
These are trained offline.
"""
GYM_TESTS = [
("CEM Cartpole", "configs/world_model/cem_cartpole_offline.yaml"),
(
"CEM Single World Model Linear Dynamics",
"configs/world_model/cem_single_world_model_linear_dynamics_offline.yaml",
),
(
"CEM Many World Models Linear Dynamics",
"configs/world_model/cem_many_world_models_linear_dynamics_offline.yaml",
),
]
curr_dir = os.path.dirname(__file__)
class TestGymOffline(HorizonTestBase):
# pyre-fixme[16]: Module `parameterized` has no attribute `expand`.
@parameterized.expand(GYM_TESTS)
def test_gym_offline_cpu(self, name: str, config_path: str):
self.run_from_config(
run_test=run_test_offline,
config_path=os.path.join(curr_dir, config_path),
use_gpu=False,
)
logger.info(f"{name} passes!")
# pyre-fixme[16]: Module `parameterized` has no attribute `expand`.
@parameterized.expand(GYM_TESTS)
@pytest.mark.serial
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_gym_offline_gpu(self, name: str, config_path: str):
self.run_from_config(
run_test=run_test_offline,
config_path=os.path.join(curr_dir, config_path),
use_gpu=True,
)
logger.info(f"{name} passes!")
def evaluate_cem(env, manager, trainer_module, num_eval_episodes: int):
# NOTE: for CEM, serving isn't implemented
policy = manager.create_policy(trainer_module, serving=False)
agent = Agent.create_for_env(env, policy)
return evaluate_for_n_episodes(
n=num_eval_episodes, env=env, agent=agent, max_steps=env.max_steps
)
def identity_collate(batch):
assert isinstance(batch, list) and len(batch) == 1, f"Got {batch}"
return batch[0]
def run_test_offline(
env_name: str,
model: ModelManager__Union,
replay_memory_size: int,
num_batches_per_epoch: int,
num_train_epochs: int,
passing_score_bar: float,
num_eval_episodes: int,
minibatch_size: int,
use_gpu: bool,
):
env = Gym(env_name=env_name)
env.seed(SEED)
env.action_space.seed(SEED)
normalization = build_normalizer(env)
logger.info(f"Normalization is: \n{pprint.pformat(normalization)}")
manager = model.value
trainer = manager.build_trainer(
use_gpu=use_gpu,
reward_options=RewardOptions(),
normalization_data_map=normalization,
)
# first fill the replay buffer to burn_in
replay_buffer = ReplayBuffer(
replay_capacity=replay_memory_size, batch_size=minibatch_size
)
# always fill full RB
random_policy = make_random_policy_for_env(env)
agent = Agent.create_for_env(env, policy=random_policy)
fill_replay_buffer(
env=env,
replay_buffer=replay_buffer,
desired_size=replay_memory_size,
agent=agent,
)
device = torch.device("cuda") if use_gpu else None
dataset = OfflineReplayBufferDataset.create_for_trainer(
trainer,
env,
replay_buffer,
batch_size=minibatch_size,
num_batches=num_batches_per_epoch,
device=device,
)
data_loader = torch.utils.data.DataLoader(dataset, collate_fn=identity_collate)
pl_trainer = pl.Trainer(
max_epochs=num_train_epochs,
gpus=int(use_gpu),
deterministic=True,
default_root_dir=f"lightning_log_{str(uuid.uuid4())}",
)
pl_trainer.fit(trainer, data_loader)
logger.info(f"Evaluating after training for {num_train_epochs} epochs: ")
eval_rewards = evaluate_cem(env, manager, trainer, num_eval_episodes)
mean_rewards = np.mean(eval_rewards)
assert (
mean_rewards >= passing_score_bar
), f"{mean_rewards} doesn't pass the bar {passing_score_bar}."
if __name__ == "__main__":
unittest.main()
| 4,868 | 30.412903 | 83 | py |
ReAgent | ReAgent-master/reagent/gym/tests/test_gym.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import os
import pprint
import unittest
import uuid
from typing import Optional
import numpy as np
import pytest
import pytorch_lightning as pl
import torch
from parameterized import parameterized
from reagent.gym.agents.agent import Agent
from reagent.gym.datasets.episodic_dataset import (
EpisodicDataset,
)
from reagent.gym.datasets.replay_buffer_dataset import ReplayBufferDataset
from reagent.gym.envs import Env__Union
from reagent.gym.envs.env_wrapper import EnvWrapper
from reagent.gym.policies.policy import Policy
from reagent.gym.policies.random_policies import make_random_policy_for_env
from reagent.gym.runners.gymrunner import evaluate_for_n_episodes
from reagent.gym.utils import build_normalizer, fill_replay_buffer
from reagent.model_managers.union import ModelManager__Union
from reagent.replay_memory.circular_replay_buffer import ReplayBuffer
from reagent.test.base.horizon_test_base import HorizonTestBase
# for seeding the environment
SEED = 0
# exponential moving average parameter for tracking reward progress
REWARD_DECAY = 0.8
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
"""
Put on-policy gym tests here in the format (test name, path to yaml config).
Format path to be: "configs/<env_name>/<model_name>_<env_name>_online.yaml."
NOTE: These tests should ideally finish quickly (within 10 minutes) since they are
unit tests which are run many times.
"""
REPLAY_BUFFER_GYM_TESTS_1 = [
("Discrete CRR Cartpole", "configs/cartpole/discrete_crr_cartpole_online.yaml"),
("Discrete DQN Cartpole", "configs/cartpole/discrete_dqn_cartpole_online.yaml"),
("Discrete C51 Cartpole", "configs/cartpole/discrete_c51_cartpole_online.yaml"),
("Discrete QR Cartpole", "configs/cartpole/discrete_qr_cartpole_online.yaml"),
(
"Discrete DQN Open Gridworld",
"configs/open_gridworld/discrete_dqn_open_gridworld.yaml",
),
("SAC Pendulum", "configs/pendulum/sac_pendulum_online.yaml"),
("Continuous CRR Pendulum", "configs/pendulum/continuous_crr_pendulum_online.yaml"),
("TD3 Pendulum", "configs/pendulum/td3_pendulum_online.yaml"),
]
REPLAY_BUFFER_GYM_TESTS_2 = [
("Parametric DQN Cartpole", "configs/cartpole/parametric_dqn_cartpole_online.yaml"),
(
"Parametric SARSA Cartpole",
"configs/cartpole/parametric_sarsa_cartpole_online.yaml",
),
# Disabled for now because flaky.
# (
# "Sparse DQN Changing Arms",
# "configs/sparse/discrete_dqn_changing_arms_online.yaml",
# ),
("SlateQ RecSim", "configs/recsim/slate_q_recsim_online.yaml"),
(
"SlateQ RecSim with Discount Scaled by Time Diff",
"configs/recsim/slate_q_recsim_online_with_time_scale.yaml",
),
(
"SlateQ RecSim multi selection",
"configs/recsim/slate_q_recsim_online_multi_selection.yaml",
),
(
"SlateQ RecSim multi selection average by current slate size",
"configs/recsim/slate_q_recsim_online_multi_selection_avg_curr.yaml",
),
("PossibleActionsMask DQN", "configs/functionality/dqn_possible_actions_mask.yaml"),
]
ONLINE_EPISODE_GYM_TESTS = [
(
"REINFORCE Cartpole online",
"configs/cartpole/discrete_reinforce_cartpole_online.yaml",
),
(
"PPO Cartpole online",
"configs/cartpole/discrete_ppo_cartpole_online.yaml",
),
]
curr_dir = os.path.dirname(__file__)
class TestGym(HorizonTestBase):
# pyre-fixme[16]: Module `parameterized` has no attribute `expand`.
@parameterized.expand(REPLAY_BUFFER_GYM_TESTS_1)
def test_replay_buffer_gym_cpu_1(self, name: str, config_path: str):
self._test_replay_buffer_gym_cpu(name, config_path)
# pyre-fixme[16]: Module `parameterized` has no attribute `expand`.
@parameterized.expand(REPLAY_BUFFER_GYM_TESTS_2)
def test_replay_buffer_gym_cpu_2(self, name: str, config_path: str):
self._test_replay_buffer_gym_cpu(name, config_path)
def _test_replay_buffer_gym_cpu(self, name: str, config_path: str):
logger.info(f"Starting {name} on CPU")
self.run_from_config(
run_test=run_test_replay_buffer,
config_path=os.path.join(curr_dir, config_path),
use_gpu=False,
)
logger.info(f"{name} passes!")
# pyre-fixme[16]: Module `parameterized` has no attribute `expand`.
@parameterized.expand(REPLAY_BUFFER_GYM_TESTS_1)
@pytest.mark.serial
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_replay_buffer_gym_gpu_1(self, name: str, config_path: str):
self._test_replay_buffer_gym_gpu(name, config_path)
# pyre-fixme[16]: Module `parameterized` has no attribute `expand`.
@parameterized.expand(REPLAY_BUFFER_GYM_TESTS_2)
@pytest.mark.serial
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_replay_buffer_gym_gpu_2(self, name: str, config_path: str):
self._test_replay_buffer_gym_gpu(name, config_path)
def _test_replay_buffer_gym_gpu(self, name: str, config_path: str):
logger.info(f"Starting {name} on GPU")
self.run_from_config(
run_test=run_test_replay_buffer,
config_path=os.path.join(curr_dir, config_path),
use_gpu=True,
)
logger.info(f"{name} passes!")
# pyre-fixme[16]: Module `parameterized` has no attribute `expand`.
@parameterized.expand(ONLINE_EPISODE_GYM_TESTS)
def test_online_episode_gym_cpu(self, name: str, config_path: str):
logger.info(f"Starting {name} on CPU")
self.run_from_config(
run_test=run_test_online_episode,
config_path=os.path.join(curr_dir, config_path),
use_gpu=False,
)
logger.info(f"{name} passes!")
def eval_policy(
env: EnvWrapper,
serving_policy: Policy,
num_eval_episodes: int,
serving: bool = True,
) -> np.ndarray:
agent = (
Agent.create_for_env_with_serving_policy(env, serving_policy)
if serving
else Agent.create_for_env(env, serving_policy)
)
eval_rewards = evaluate_for_n_episodes(
n=num_eval_episodes,
env=env,
agent=agent,
max_steps=env.max_steps,
num_processes=1,
).squeeze(1)
logger.info("============Eval rewards==============")
logger.info(eval_rewards)
mean_eval = np.mean(eval_rewards)
logger.info(f"average: {mean_eval};\tmax: {np.max(eval_rewards)}")
return np.array(eval_rewards)
def identity_collate(batch):
assert isinstance(batch, list) and len(batch) == 1, f"Got {batch}"
return batch[0]
def run_test_replay_buffer(
env: Env__Union,
model: ModelManager__Union,
replay_memory_size: int,
train_every_ts: int,
train_after_ts: int,
num_train_episodes: int,
passing_score_bar: float,
num_eval_episodes: int,
use_gpu: bool,
minibatch_size: Optional[int] = None,
):
"""
Run an online learning test with a replay buffer. The replay buffer is pre-filled, then the training starts.
Each transition is added to the replay buffer immediately after it takes place.
"""
env = env.value
pl.seed_everything(SEED)
env.seed(SEED)
env.action_space.seed(SEED)
normalization = build_normalizer(env)
logger.info(f"Normalization is: \n{pprint.pformat(normalization)}")
manager = model.value
trainer = manager.build_trainer(
use_gpu=use_gpu,
normalization_data_map=normalization,
)
training_policy = manager.create_policy(trainer, serving=False)
if not isinstance(trainer, pl.LightningModule):
if minibatch_size is None:
minibatch_size = trainer.minibatch_size
assert minibatch_size == trainer.minibatch_size
assert minibatch_size is not None
replay_buffer = ReplayBuffer(
replay_capacity=replay_memory_size, batch_size=minibatch_size
)
device = torch.device("cuda") if use_gpu else torch.device("cpu")
# first fill the replay buffer using random policy
train_after_ts = max(train_after_ts, minibatch_size)
random_policy = make_random_policy_for_env(env)
agent = Agent.create_for_env(env, policy=random_policy)
fill_replay_buffer(
env=env,
replay_buffer=replay_buffer,
desired_size=train_after_ts,
agent=agent,
)
agent = Agent.create_for_env(env, policy=training_policy, device=device)
# TODO: Simplify this setup by creating LightningDataModule
dataset = ReplayBufferDataset.create_for_trainer(
trainer,
env,
agent,
replay_buffer,
batch_size=minibatch_size,
training_frequency=train_every_ts,
num_episodes=num_train_episodes,
max_steps=200,
device=device,
)
data_loader = torch.utils.data.DataLoader(dataset, collate_fn=identity_collate)
pl_trainer = pl.Trainer(
max_epochs=1,
gpus=int(use_gpu),
deterministic=True,
default_root_dir=f"lightning_log_{str(uuid.uuid4())}",
)
# Note: the fit() function below also evaluates the agent along the way
# and adds the new transitions to the replay buffer, so it is training
# on incrementally larger and larger buffers.
pl_trainer.fit(trainer, data_loader)
# TODO: Also check train_reward
serving_policy = manager.create_policy(
trainer, serving=True, normalization_data_map=normalization
)
eval_rewards = eval_policy(env, serving_policy, num_eval_episodes, serving=True)
assert (
eval_rewards.mean() >= passing_score_bar
), f"Eval reward is {eval_rewards.mean()}, less than < {passing_score_bar}.\n"
def run_test_online_episode(
env: Env__Union,
model: ModelManager__Union,
num_train_episodes: int,
passing_score_bar: float,
num_eval_episodes: int,
use_gpu: bool,
):
"""
Run an online learning test. At the end of each episode training is run on the trajectory.
"""
env = env.value
pl.seed_everything(SEED)
env.seed(SEED)
env.action_space.seed(SEED)
normalization = build_normalizer(env)
logger.info(f"Normalization is: \n{pprint.pformat(normalization)}")
manager = model.value
trainer = manager.build_trainer(
use_gpu=use_gpu,
normalization_data_map=normalization,
)
policy = manager.create_policy(trainer, serving=False)
device = torch.device("cuda") if use_gpu else torch.device("cpu")
agent = Agent.create_for_env(env, policy, device=device)
pl_trainer = pl.Trainer(
max_epochs=1,
gpus=int(use_gpu),
deterministic=True,
default_root_dir=f"lightning_log_{str(uuid.uuid4())}",
)
dataset = EpisodicDataset(
env=env, agent=agent, num_episodes=num_train_episodes, seed=SEED
)
data_loader = torch.utils.data.DataLoader(dataset, collate_fn=identity_collate)
pl_trainer.fit(trainer, data_loader)
eval_rewards = evaluate_for_n_episodes(
n=num_eval_episodes,
env=env,
agent=agent,
max_steps=env.max_steps,
num_processes=1,
).squeeze(1)
assert (
eval_rewards.mean() >= passing_score_bar
), f"Eval reward is {eval_rewards.mean()}, less than < {passing_score_bar}.\n"
if __name__ == "__main__":
unittest.main()
| 11,439 | 33.251497 | 112 | py |
ReAgent | ReAgent-master/reagent/gym/tests/test_world_model.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import os
import unittest
from typing import Dict, List, Optional
import gym
import numpy as np
import reagent.core.types as rlt
import torch
from reagent.evaluation.world_model_evaluator import (
FeatureImportanceEvaluator,
FeatureSensitivityEvaluator,
)
from reagent.gym.agents.agent import Agent
from reagent.gym.envs import EnvWrapper, Gym
from reagent.gym.envs.pomdp.state_embed_env import StateEmbedEnvironment
from reagent.gym.policies.random_policies import make_random_policy_for_env
from reagent.gym.preprocessors import make_replay_buffer_trainer_preprocessor
from reagent.gym.runners.gymrunner import evaluate_for_n_episodes
from reagent.gym.utils import build_normalizer, fill_replay_buffer
from reagent.model_managers.union import ModelManager__Union
from reagent.models.world_model import MemoryNetwork
from reagent.replay_memory.circular_replay_buffer import ReplayBuffer
from reagent.test.base.horizon_test_base import HorizonTestBase
from reagent.training.world_model.mdnrnn_trainer import MDNRNNTrainer
from reagent.workflow.types import RewardOptions
from tqdm import tqdm
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
curr_dir = os.path.dirname(__file__)
SEED = 0
def calculate_feature_importance(
env: gym.Env,
trainer: MDNRNNTrainer,
use_gpu: bool,
test_batch: rlt.MemoryNetworkInput,
):
assert isinstance(env.action_space, gym.spaces.Discrete)
assert isinstance(env.observation_space, gym.spaces.Box)
assert len(env.observation_space.shape) == 1
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.n
feature_importance_evaluator = FeatureImportanceEvaluator(
trainer,
discrete_action=True,
state_feature_num=state_dim,
action_feature_num=action_dim,
sorted_state_feature_start_indices=list(range(state_dim)),
sorted_action_feature_start_indices=list(range(action_dim)),
)
feature_loss_vector = feature_importance_evaluator.evaluate(test_batch)[
"feature_loss_increase"
]
feature_importance_map = {}
for i in range(action_dim):
print(
"action {}, feature importance: {}".format(i, feature_loss_vector[i].item())
)
feature_importance_map[f"action{i}"] = feature_loss_vector[i].item()
for i in range(state_dim):
print(
"state {}, feature importance: {}".format(
i, feature_loss_vector[i + action_dim].item()
)
)
feature_importance_map[f"state{i}"] = feature_loss_vector[i + action_dim].item()
return feature_importance_map
def calculate_feature_sensitivity(
env: EnvWrapper,
trainer: MDNRNNTrainer,
use_gpu: bool,
test_batch: rlt.MemoryNetworkInput,
):
assert isinstance(env.action_space, gym.spaces.Discrete)
assert isinstance(env.observation_space, gym.spaces.Box)
assert len(env.observation_space.shape) == 1
state_dim = env.observation_space.shape[0]
feature_sensitivity_evaluator = FeatureSensitivityEvaluator(
trainer,
state_feature_num=state_dim,
sorted_state_feature_start_indices=list(range(state_dim)),
)
feature_sensitivity_vector = feature_sensitivity_evaluator.evaluate(test_batch)[
"feature_sensitivity"
]
feature_sensitivity_map = {}
for i in range(state_dim):
feature_sensitivity_map["state" + str(i)] = feature_sensitivity_vector[i].item()
print(
"state {}, feature sensitivity: {}".format(
i, feature_sensitivity_vector[i].item()
)
)
return feature_sensitivity_map
def train_mdnrnn(
env: EnvWrapper,
trainer: MDNRNNTrainer,
trainer_preprocessor,
num_train_transitions: int,
seq_len: int,
batch_size: int,
num_train_epochs: int,
# for optional validation
test_replay_buffer=None,
):
train_replay_buffer = ReplayBuffer(
replay_capacity=num_train_transitions,
batch_size=batch_size,
stack_size=seq_len,
return_everything_as_stack=True,
)
random_policy = make_random_policy_for_env(env)
agent = Agent.create_for_env(env, policy=random_policy)
fill_replay_buffer(env, train_replay_buffer, num_train_transitions, agent)
num_batch_per_epoch = train_replay_buffer.size // batch_size
logger.info("Made RBs, starting to train now!")
optimizer = trainer.configure_optimizers()[0]
for _ in range(num_train_epochs):
for i in range(num_batch_per_epoch):
batch = train_replay_buffer.sample_transition_batch(batch_size=batch_size)
preprocessed_batch = trainer_preprocessor(batch)
loss = next(trainer.train_step_gen(preprocessed_batch, i))
optimizer.zero_grad()
loss.backward()
optimizer.step()
# validation
if test_replay_buffer is not None:
with torch.no_grad():
trainer.memory_network.mdnrnn.eval()
test_batch = test_replay_buffer.sample_transition_batch(
batch_size=batch_size
)
preprocessed_test_batch = trainer_preprocessor(test_batch)
valid_losses = trainer.get_loss(preprocessed_test_batch)
trainer.memory_network.mdnrnn.train()
return trainer
def train_mdnrnn_and_compute_feature_stats(
env_name: str,
model: ModelManager__Union,
num_train_transitions: int,
num_test_transitions: int,
seq_len: int,
batch_size: int,
num_train_epochs: int,
use_gpu: bool,
saved_mdnrnn_path: Optional[str] = None,
):
"""Train MDNRNN Memory Network and compute feature importance/sensitivity."""
env: gym.Env = Gym(env_name=env_name)
env.seed(SEED)
manager = model.value
trainer = manager.build_trainer(
use_gpu=use_gpu,
reward_options=RewardOptions(),
normalization_data_map=build_normalizer(env),
)
device = "cuda" if use_gpu else "cpu"
# pyre-fixme[6]: Expected `device` for 2nd param but got `str`.
trainer_preprocessor = make_replay_buffer_trainer_preprocessor(trainer, device, env)
test_replay_buffer = ReplayBuffer(
replay_capacity=num_test_transitions,
batch_size=batch_size,
stack_size=seq_len,
return_everything_as_stack=True,
)
random_policy = make_random_policy_for_env(env)
agent = Agent.create_for_env(env, policy=random_policy)
fill_replay_buffer(env, test_replay_buffer, num_test_transitions, agent)
if saved_mdnrnn_path is None:
# train from scratch
trainer = train_mdnrnn(
env=env,
trainer=trainer,
trainer_preprocessor=trainer_preprocessor,
num_train_transitions=num_train_transitions,
seq_len=seq_len,
batch_size=batch_size,
num_train_epochs=num_train_epochs,
test_replay_buffer=test_replay_buffer,
)
else:
# load a pretrained model, and just evaluate it
trainer.memory_network.mdnrnn.load_state_dict(torch.load(saved_mdnrnn_path))
with torch.no_grad():
trainer.memory_network.mdnrnn.eval()
test_batch = test_replay_buffer.sample_transition_batch(
batch_size=test_replay_buffer.size
)
preprocessed_test_batch = trainer_preprocessor(test_batch)
feature_importance = calculate_feature_importance(
env=env,
trainer=trainer,
use_gpu=use_gpu,
test_batch=preprocessed_test_batch,
)
feature_sensitivity = calculate_feature_sensitivity(
env=env,
trainer=trainer,
use_gpu=use_gpu,
test_batch=preprocessed_test_batch,
)
trainer.memory_network.mdnrnn.train()
return feature_importance, feature_sensitivity
def create_embed_rl_dataset(
env: EnvWrapper,
memory_network: MemoryNetwork,
num_state_embed_transitions: int,
batch_size: int,
seq_len: int,
hidden_dim: int,
use_gpu: bool,
):
assert isinstance(env.action_space, gym.spaces.Discrete)
assert isinstance(env.observation_space, gym.spaces.Box)
assert len(env.observation_space.shape) == 1
logger.info("Starting to create embedded RL Dataset!")
# seqlen+1 because MDNRNN embeds the first seq_len steps and then
# the embedded state will be concatenated with the last step
# Ie.. (o1,o2,...,on) -> RNN -> h1,h2,...,hn
# and we set s_{n+1} = [o_{n+1}, h_n]
embed_env = StateEmbedEnvironment(
gym_env=env, mdnrnn=memory_network, max_embed_seq_len=seq_len + 1
)
# now create a filled replay buffer of embeddings
# new obs shape dim = state_dim + hidden_dim
embed_rb = ReplayBuffer(
replay_capacity=num_state_embed_transitions, batch_size=batch_size, stack_size=1
)
random_policy = make_random_policy_for_env(env)
agent = Agent.create_for_env(env, policy=random_policy)
fill_replay_buffer(
env=embed_env,
replay_buffer=embed_rb,
desired_size=num_state_embed_transitions,
agent=agent,
)
batch = embed_rb.sample_transition_batch(batch_size=num_state_embed_transitions)
state_min = min(batch.state.min(), batch.next_state.min()).item()
state_max = max(batch.state.max(), batch.next_state.max()).item()
logger.info(
f"Finished making embed dataset with size {embed_rb.size}, "
f"min {state_min}, max {state_max}"
)
return embed_rb, state_min, state_max
def train_mdnrnn_and_train_on_embedded_env(
env_name: str,
embedding_model: ModelManager__Union,
num_embedding_train_transitions: int,
seq_len: int,
batch_size: int,
num_embedding_train_epochs: int,
train_model: ModelManager__Union,
num_state_embed_transitions: int,
num_agent_train_epochs: int,
num_agent_eval_epochs: int,
use_gpu: bool,
passing_score_bar: float,
# pyre-fixme[9]: saved_mdnrnn_path has type `str`; used as `None`.
saved_mdnrnn_path: str = None,
):
"""Train an agent on embedded states by the MDNRNN."""
env = Gym(env_name=env_name)
env.seed(SEED)
embedding_manager = embedding_model.value
embedding_trainer = embedding_manager.build_trainer(
use_gpu=use_gpu,
reward_options=RewardOptions(),
normalization_data_map=build_normalizer(env),
)
device = "cuda" if use_gpu else "cpu"
embedding_trainer_preprocessor = make_replay_buffer_trainer_preprocessor(
embedding_trainer,
# pyre-fixme[6]: Expected `device` for 2nd param but got `str`.
device,
env,
)
if saved_mdnrnn_path is None:
# train from scratch
embedding_trainer = train_mdnrnn(
env=env,
trainer=embedding_trainer,
trainer_preprocessor=embedding_trainer_preprocessor,
num_train_transitions=num_embedding_train_transitions,
seq_len=seq_len,
batch_size=batch_size,
num_train_epochs=num_embedding_train_epochs,
)
else:
# load a pretrained model, and just evaluate it
embedding_trainer.memory_network.mdnrnn.load_state_dict(
torch.load(saved_mdnrnn_path)
)
# create embedding dataset
embed_rb, state_min, state_max = create_embed_rl_dataset(
env=env,
memory_network=embedding_trainer.memory_network,
num_state_embed_transitions=num_state_embed_transitions,
batch_size=batch_size,
seq_len=seq_len,
hidden_dim=embedding_trainer.params.hidden_size,
use_gpu=use_gpu,
)
embed_env = StateEmbedEnvironment(
gym_env=env,
mdnrnn=embedding_trainer.memory_network,
max_embed_seq_len=seq_len,
state_min_value=state_min,
state_max_value=state_max,
)
agent_manager = train_model.value
agent_trainer = agent_manager.build_trainer(
use_gpu=use_gpu,
reward_options=RewardOptions(),
# pyre-fixme[6]: Expected `EnvWrapper` for 1st param but got
# `StateEmbedEnvironment`.
normalization_data_map=build_normalizer(embed_env),
)
device = "cuda" if use_gpu else "cpu"
agent_trainer_preprocessor = make_replay_buffer_trainer_preprocessor(
agent_trainer,
# pyre-fixme[6]: Expected `device` for 2nd param but got `str`.
device,
env,
)
num_batch_per_epoch = embed_rb.size // batch_size
# FIXME: This has to be wrapped in dataloader
for epoch in range(num_agent_train_epochs):
for _ in tqdm(range(num_batch_per_epoch), desc=f"epoch {epoch}"):
batch = embed_rb.sample_transition_batch(batch_size=batch_size)
preprocessed_batch = agent_trainer_preprocessor(batch)
# FIXME: This should be fitted with Lightning's trainer
agent_trainer.train(preprocessed_batch)
# evaluate model
rewards = []
policy = agent_manager.create_policy(agent_trainer, serving=False)
# pyre-fixme[6]: Expected `EnvWrapper` for 1st param but got
# `StateEmbedEnvironment`.
agent = Agent.create_for_env(embed_env, policy=policy, device=device)
# num_processes=1 needed to avoid workers from dying on CircleCI tests
rewards = evaluate_for_n_episodes(
n=num_agent_eval_epochs,
# pyre-fixme[6]: Expected `EnvWrapper` for 2nd param but got
# `StateEmbedEnvironment`.
env=embed_env,
agent=agent,
num_processes=1,
)
assert (
np.mean(rewards) >= passing_score_bar
), f"average reward doesn't pass our bar {passing_score_bar}"
return rewards
class TestWorldModel(HorizonTestBase):
@staticmethod
def verify_result(result_dict: Dict[str, float], expected_top_features: List[str]):
top_feature = max(result_dict, key=result_dict.get)
assert (
top_feature in expected_top_features
), f"top_feature: {top_feature}, expected_top_features: {expected_top_features}"
def test_mdnrnn(self):
"""Test MDNRNN feature importance and feature sensitivity."""
config_path = "configs/world_model/cartpole_features.yaml"
feature_importance, feature_sensitivity = self.run_from_config(
run_test=train_mdnrnn_and_compute_feature_stats,
config_path=os.path.join(curr_dir, config_path),
use_gpu=False,
)
TestWorldModel.verify_result(feature_importance, ["state1", "state3"])
TestWorldModel.verify_result(feature_sensitivity, ["state3"])
logger.info("MDNRNN feature test passes!")
@unittest.skip("This test has to be migrated to Lightning")
def test_world_model(self):
"""Train DQN on POMDP given features from world model."""
config_path = "configs/world_model/discrete_dqn_string.yaml"
HorizonTestBase.run_from_config(
run_test=train_mdnrnn_and_train_on_embedded_env,
config_path=os.path.join(curr_dir, config_path),
use_gpu=False,
)
logger.info("World model test passes!")
if __name__ == "__main__":
unittest.main()
| 15,345 | 35.451306 | 88 | py |
ReAgent | ReAgent-master/reagent/gym/tests/preprocessors/test_default_preprocessors.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import gym
import numpy.testing as npt
import torch
import torch.nn.functional as F
from reagent.gym.envs import Gym
try:
from reagent.gym.envs import RecSim
HAS_RECSIM = True
except ModuleNotFoundError:
HAS_RECSIM = False
class TestMakeDefaultObsPreprocessor(unittest.TestCase):
def test_box(self):
env = Gym(env_name="CartPole-v0")
obs_preprocessor = env.get_obs_preprocessor()
obs = env.reset()
state = obs_preprocessor(obs)
self.assertTrue(state.has_float_features_only)
self.assertEqual(state.float_features.shape, (1, obs.shape[0]))
self.assertEqual(state.float_features.dtype, torch.float32)
self.assertEqual(state.float_features.device, torch.device("cpu"))
npt.assert_array_almost_equal(obs, state.float_features.squeeze(0))
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_box_cuda(self):
env = Gym(env_name="CartPole-v0")
device = torch.device("cuda")
obs_preprocessor = env.get_obs_preprocessor(device=device)
obs = env.reset()
state = obs_preprocessor(obs)
self.assertTrue(state.has_float_features_only)
self.assertEqual(state.float_features.shape, (1, obs.shape[0]))
self.assertEqual(state.float_features.dtype, torch.float32)
# `device` doesn't have index. So we need this.
x = torch.zeros(1, device=device)
self.assertEqual(state.float_features.device, x.device)
npt.assert_array_almost_equal(obs, state.float_features.cpu().squeeze(0))
@unittest.skipIf(not HAS_RECSIM, "Recsim is not installed")
def test_recsim_interest_evolution(self):
num_candidate = 10
env = RecSim(
num_candidates=num_candidate, slate_size=3, resample_documents=False
)
obs_preprocessor = env.get_obs_preprocessor()
obs = env.reset()
state = obs_preprocessor(obs)
self.assertFalse(state.has_float_features_only)
self.assertEqual(state.float_features.shape, (1, obs["user"].shape[0]))
self.assertEqual(state.float_features.dtype, torch.float32)
self.assertEqual(state.float_features.device, torch.device("cpu"))
npt.assert_array_almost_equal(obs["user"], state.float_features.squeeze(0))
doc_float_features = state.candidate_docs.float_features
self.assertIsNotNone(doc_float_features)
self.assertEqual(
doc_float_features.shape, (1, num_candidate, obs["doc"]["0"].shape[0])
)
self.assertEqual(doc_float_features.dtype, torch.float32)
self.assertEqual(doc_float_features.device, torch.device("cpu"))
for i, v in enumerate(obs["doc"].values()):
npt.assert_array_almost_equal(v, doc_float_features[0, i])
@unittest.skipIf(not HAS_RECSIM, "Recsim is not installed")
def test_recsim_interest_exploration(self):
num_candidate = 10
env = RecSim(
num_candidates=num_candidate,
slate_size=3,
resample_documents=False,
is_interest_exploration=True,
)
obs_preprocessor = env.get_obs_preprocessor()
obs = env.reset()
state = obs_preprocessor(obs)
self.assertFalse(state.has_float_features_only)
self.assertEqual(state.float_features.shape, (1, obs["user"].shape[0]))
self.assertEqual(state.float_features.dtype, torch.float32)
self.assertEqual(state.float_features.device, torch.device("cpu"))
npt.assert_array_almost_equal(obs["user"], state.float_features.squeeze(0))
doc_float_features = state.candidate_docs.float_features
self.assertIsNotNone(doc_float_features)
quality_len = 1
expected_doc_feature_length = (
env.observation_space["doc"]["0"]["cluster_id"].n + quality_len
)
self.assertEqual(
doc_float_features.shape, (1, num_candidate, expected_doc_feature_length)
)
self.assertEqual(doc_float_features.dtype, torch.float32)
self.assertEqual(doc_float_features.device, torch.device("cpu"))
for i, v in enumerate(obs["doc"].values()):
expected_doc_feature = torch.cat(
[
F.one_hot(torch.tensor(v["cluster_id"]), 2).float(),
# This needs unsqueeze because it's a scalar
torch.tensor(v["quality"]).unsqueeze(0).float(),
],
dim=0,
)
npt.assert_array_almost_equal(
expected_doc_feature, doc_float_features[0, i]
)
| 4,748 | 40.657895 | 85 | py |
ReAgent | ReAgent-master/reagent/gym/tests/preprocessors/test_replay_buffer_inserters.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import unittest
import gym
import numpy as np
import numpy.testing as npt
import torch
from reagent.gym.envs import EnvWrapper
from reagent.gym.preprocessors import make_replay_buffer_inserter
from reagent.gym.types import Transition
from reagent.replay_memory import ReplayBuffer
from reagent.test.base.horizon_test_base import HorizonTestBase
logger = logging.getLogger(__name__)
try:
from reagent.gym.envs import RecSim
HAS_RECSIM = True
except ModuleNotFoundError:
HAS_RECSIM = False
def _create_replay_buffer_and_insert(env: EnvWrapper):
env.seed(1)
replay_buffer = ReplayBuffer(replay_capacity=6, batch_size=1)
replay_buffer_inserter = make_replay_buffer_inserter(env)
obs = env.reset()
inserted = []
terminal = False
i = 0
while not terminal and i < 5:
logger.info(f"Iteration: {i}")
action = env.action_space.sample()
next_obs, reward, terminal, _ = env.step(action)
inserted.append(
{
"observation": obs,
"action": action,
"reward": reward,
"terminal": terminal,
}
)
transition = Transition(
mdp_id=0,
sequence_number=i,
observation=obs,
action=action,
reward=reward,
terminal=terminal,
log_prob=0.0,
)
replay_buffer_inserter(replay_buffer, transition)
obs = next_obs
i += 1
return replay_buffer, inserted
class TestBasicReplayBufferInserter(HorizonTestBase):
def test_cartpole(self):
env = gym.make("CartPole-v0")
replay_buffer, inserted = _create_replay_buffer_and_insert(env)
batch = replay_buffer.sample_transition_batch(indices=torch.tensor([0]))
npt.assert_array_almost_equal(
inserted[0]["observation"], batch.state.squeeze(0)
)
npt.assert_array_almost_equal(
inserted[1]["observation"], batch.next_state.squeeze(0)
)
npt.assert_array_equal(inserted[0]["action"], batch.action.squeeze(0))
npt.assert_array_equal(inserted[1]["action"], batch.next_action.squeeze(0))
class TestRecSimReplayBufferInserter(HorizonTestBase):
@unittest.skipIf(not HAS_RECSIM, "RecSim not installed")
def test_recsim_interest_evolution(self):
num_candidate = 10
slate_size = 3
env = RecSim(
num_candidates=num_candidate,
slate_size=slate_size,
resample_documents=False,
)
replay_buffer, inserted = _create_replay_buffer_and_insert(env)
batch = replay_buffer.sample_transition_batch(indices=torch.tensor([0]))
npt.assert_array_almost_equal(
inserted[0]["observation"]["user"], batch.state.squeeze(0)
)
npt.assert_array_almost_equal(
inserted[1]["observation"]["user"], batch.next_state.squeeze(0)
)
docs = list(inserted[0]["observation"]["doc"].values())
next_docs = list(inserted[1]["observation"]["doc"].values())
for i in range(num_candidate):
npt.assert_array_equal(docs[i], batch.doc.squeeze(0)[i])
npt.assert_array_equal(next_docs[i], batch.next_doc.squeeze(0)[i])
npt.assert_array_equal(inserted[0]["action"], batch.action.squeeze(0))
npt.assert_array_equal(inserted[1]["action"], batch.next_action.squeeze(0))
npt.assert_array_equal([0, 0, 0], batch.response_click.squeeze(0))
npt.assert_array_equal([0, 0, 0], batch.response_cluster_id.squeeze(0))
npt.assert_array_equal([0, 0, 0], batch.response_liked.squeeze(0))
npt.assert_array_equal([0.0, 0.0, 0.0], batch.response_quality.squeeze(0))
npt.assert_array_equal([0.0, 0.0, 0.0], batch.response_watch_time.squeeze(0))
resp = inserted[1]["observation"]["response"]
for i in range(slate_size):
npt.assert_array_equal(
resp[i]["click"], batch.next_response_click.squeeze(0)[i]
)
npt.assert_array_equal(
resp[i]["cluster_id"], batch.next_response_cluster_id.squeeze(0)[i]
)
npt.assert_array_equal(
resp[i]["liked"], batch.next_response_liked.squeeze(0)[i]
)
npt.assert_array_almost_equal(
resp[i]["quality"], batch.next_response_quality.squeeze(0)[i]
)
npt.assert_array_almost_equal(
resp[i]["watch_time"], batch.next_response_watch_time.squeeze(0)[i]
)
@unittest.skipIf(not HAS_RECSIM, "RecSim not installed")
def test_recsim_interest_exploration(self):
num_candidate = 10
slate_size = 3
env = RecSim(
num_candidates=num_candidate,
slate_size=slate_size,
resample_documents=False,
is_interest_exploration=True,
)
replay_buffer, inserted = _create_replay_buffer_and_insert(env)
batch = replay_buffer.sample_transition_batch(indices=torch.tensor([0]))
npt.assert_array_almost_equal(
inserted[0]["observation"]["user"].astype(np.float32),
batch.state.squeeze(0),
)
npt.assert_array_almost_equal(
inserted[1]["observation"]["user"], batch.next_state.squeeze(0)
)
docs = list(inserted[0]["observation"]["doc"].values())
next_docs = list(inserted[1]["observation"]["doc"].values())
for i in range(num_candidate):
npt.assert_array_almost_equal(
docs[i]["quality"], batch.doc_quality.squeeze(0)[i]
)
npt.assert_array_almost_equal(
next_docs[i]["quality"], batch.next_doc_quality.squeeze(0)[i]
)
npt.assert_array_equal(inserted[0]["action"], batch.action.squeeze(0))
npt.assert_array_equal(inserted[1]["action"], batch.next_action.squeeze(0))
npt.assert_array_equal([0, 0, 0], batch.response_click.squeeze(0))
npt.assert_array_equal([0, 0, 0], batch.response_cluster_id.squeeze(0))
npt.assert_array_equal([0.0, 0.0, 0.0], batch.response_quality.squeeze(0))
resp = inserted[1]["observation"]["response"]
for i in range(slate_size):
npt.assert_array_equal(
resp[i]["click"], batch.next_response_click.squeeze(0)[i]
)
npt.assert_array_equal(
resp[i]["cluster_id"], batch.next_response_cluster_id.squeeze(0)[i]
)
npt.assert_array_almost_equal(
resp[i]["quality"].astype(np.float32),
batch.next_response_quality.squeeze(0)[i],
)
| 6,822 | 38.668605 | 85 | py |
ReAgent | ReAgent-master/reagent/gym/runners/gymrunner.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import pickle
from typing import Optional, Sequence
import numpy as np
import torch.multiprocessing as mp
from reagent.core.multiprocess_utils import (
unwrap_function_outputs,
wrap_function_arguments,
)
from reagent.core.tensorboardX import SummaryWriterContext
from reagent.gym.agents.agent import Agent
from reagent.gym.envs import EnvWrapper
from reagent.gym.types import Trajectory, Transition
logger = logging.getLogger(__name__)
def run_episode(
env: EnvWrapper, agent: Agent, mdp_id: int = 0, max_steps: Optional[int] = None
) -> Trajectory:
"""
Return sum of rewards from episode.
After max_steps (if specified), the environment is assumed to be terminal.
Can also specify the mdp_id and gamma of episode.
"""
trajectory = Trajectory()
obs = env.reset()
possible_actions_mask = env.possible_actions_mask
terminal = False
num_steps = 0
while not terminal:
action, log_prob = agent.act(obs, possible_actions_mask)
next_obs, reward, terminal, _ = env.step(action)
next_possible_actions_mask = env.possible_actions_mask
if max_steps is not None and num_steps >= (max_steps - 1):
terminal = True
# Only partially filled. Agent can fill in more fields.
transition = Transition(
mdp_id=mdp_id,
sequence_number=num_steps,
observation=obs,
action=action,
reward=float(reward),
terminal=bool(terminal),
log_prob=log_prob,
possible_actions_mask=possible_actions_mask,
)
agent.post_step(transition)
trajectory.add_transition(transition)
SummaryWriterContext.increase_global_step()
obs = next_obs
possible_actions_mask = next_possible_actions_mask
num_steps += 1
agent.post_episode(trajectory)
return trajectory
def evaluate_for_n_episodes(
n: int,
env: EnvWrapper,
agent: Agent,
max_steps: Optional[int] = None,
gammas: Sequence[float] = (1.0,),
num_processes: int = 0,
) -> np.ndarray:
"""Return an np array A of shape n x len(gammas)
where A[i, j] = ith episode evaluated with gamma=gammas[j].
Runs environments on num_processes, via multiprocessing.Pool.
"""
num_processes = min(num_processes, n)
def evaluate_one_episode(
mdp_id: int,
env: EnvWrapper,
agent: Agent,
max_steps: Optional[int],
gammas: Sequence[float],
) -> np.ndarray:
rewards = np.empty((len(gammas),))
trajectory = run_episode(
env=env, agent=agent, mdp_id=mdp_id, max_steps=max_steps
)
for i_gamma, gamma in enumerate(gammas):
rewards[i_gamma] = trajectory.calculate_cumulative_reward(gamma)
return rewards
rewards = None
if num_processes > 1:
try:
with mp.Pool(num_processes) as pool:
rewards = unwrap_function_outputs(
pool.map(
wrap_function_arguments(
evaluate_one_episode,
env=env,
agent=agent,
max_steps=max_steps,
gammas=gammas,
),
range(n),
)
)
except pickle.PickleError as e:
# NOTE: Probably tried to perform mixed serialization of ScriptModule
# and non-script modules. This isn't supported right now.
logger.info(e)
logger.info(
"This is probably from trying to serialize a TorchScript module, "
"wrapped in a non-script module. Mixed serialization is not supported."
)
# if we didn't run multiprocessing, or it failed, try single-processing instead.
if rewards is None:
rewards = []
for i in range(n):
rewards.append(
evaluate_one_episode(
mdp_id=i, env=env, agent=agent, max_steps=max_steps, gammas=gammas
)
)
rewards = np.array(rewards)
for i, gamma in enumerate(gammas):
gamma_rewards = rewards[:, i]
logger.info(
f"For gamma={gamma}, average reward is {gamma_rewards.mean()}\n"
f"Rewards list: {gamma_rewards}"
)
return rewards
| 4,541 | 32.397059 | 87 | py |
ReAgent | ReAgent-master/reagent/evaluation/evaluation_data_page.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from __future__ import annotations
import logging
import math
from dataclasses import dataclass, fields
from typing import TYPE_CHECKING, Optional, cast
import numpy as np
import torch
import torch.nn as nn
from reagent.core import types as rlt
from reagent.core.torch_utils import masked_softmax
from reagent.model_utils.seq2slate_utils import Seq2SlateMode
from reagent.models.seq2slate import Seq2SlateTransformerNet
if TYPE_CHECKING:
from reagent.training import ParametricDQNTrainer, ReAgentLightningModule
from reagent.training.dqn_trainer import DQNTrainer
logger = logging.getLogger(__name__)
@dataclass
class EvaluationDataPage(rlt.TensorDataClass):
mdp_id: Optional[torch.Tensor]
sequence_number: Optional[torch.Tensor]
logged_propensities: torch.Tensor
logged_rewards: torch.Tensor
action_mask: torch.Tensor
model_propensities: torch.Tensor
model_rewards: torch.Tensor
model_rewards_for_logged_action: torch.Tensor
model_values: Optional[torch.Tensor] = None
possible_actions_mask: Optional[torch.Tensor] = None
optimal_q_values: Optional[torch.Tensor] = None
eval_action_idxs: Optional[torch.Tensor] = None
logged_values: Optional[torch.Tensor] = None
logged_metrics: Optional[torch.Tensor] = None
logged_metrics_values: Optional[torch.Tensor] = None
model_metrics: Optional[torch.Tensor] = None
model_metrics_for_logged_action: Optional[torch.Tensor] = None
model_metrics_values: Optional[torch.Tensor] = None
model_metrics_values_for_logged_action: Optional[torch.Tensor] = None
possible_actions_state_concat: Optional[torch.Tensor] = None
contexts: Optional[torch.Tensor] = None
@classmethod
def create_from_training_batch(
cls,
tdb: rlt.PreprocessedRankingInput,
trainer: ReAgentLightningModule,
reward_network: Optional[nn.Module] = None,
):
if isinstance(tdb, rlt.DiscreteDqnInput):
discrete_training_input = cast(rlt.DiscreteDqnInput, tdb)
return EvaluationDataPage.create_from_tensors_dqn(
trainer,
tdb.extras.mdp_id,
tdb.extras.sequence_number,
discrete_training_input.state,
discrete_training_input.action,
tdb.extras.action_probability,
discrete_training_input.reward,
discrete_training_input.possible_actions_mask,
metrics=tdb.extras.metrics,
)
elif isinstance(tdb, rlt.ParametricDqnInput):
return EvaluationDataPage.create_from_tensors_parametric_dqn(
trainer,
tdb.extras.mdp_id,
tdb.extras.sequence_number,
tdb.state,
tdb.action,
tdb.extras.action_probability,
tdb.reward,
tdb.possible_actions_mask,
tdb.possible_actions,
tdb.extras.max_num_actions,
metrics=tdb.extras.metrics,
)
else:
raise NotImplementedError(f"training_input type: {type(tdb)}")
@classmethod
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def create_from_tensors_seq2slate(
cls,
seq2slate_net: Seq2SlateTransformerNet,
reward_network: nn.Module,
training_input: rlt.PreprocessedRankingInput,
eval_greedy: bool,
mdp_ids: Optional[torch.Tensor] = None,
sequence_numbers: Optional[torch.Tensor] = None,
):
"""
:param eval_greedy: If True, evaluate the greedy policy which
always picks the most probable output sequence. If False, evaluate
the stochastic ranking policy.
"""
assert (
training_input.slate_reward is not None
and training_input.tgt_out_probs is not None
and training_input.tgt_out_idx is not None
and training_input.tgt_out_seq is not None
)
(
batch_size,
tgt_seq_len,
candidate_dim,
) = training_input.tgt_out_seq.float_features.shape
device = training_input.state.float_features.device
rank_output = seq2slate_net(
training_input, Seq2SlateMode.RANK_MODE, greedy=True
)
assert rank_output.ranked_tgt_out_idx is not None
if eval_greedy:
model_propensities = torch.ones(batch_size, 1, device=device)
action_mask = torch.all(
# pyre-fixme[58]: `-` is not supported for operand types
# `Optional[torch.Tensor]` and `int`.
(training_input.tgt_out_idx - 2)
== (rank_output.ranked_tgt_out_idx - 2),
dim=1,
keepdim=True,
).float()
else:
# Fully evaluating a non-greedy ranking model is too expensive because
# we would need to compute propensities of all possible output sequences.
# Here we only compute the propensity of the output sequences in the data.
# As a result, we can still get a true IPS estimation but not correct
# direct method / doubly-robust.
model_propensities = torch.exp(
seq2slate_net(
training_input, Seq2SlateMode.PER_SEQ_LOG_PROB_MODE
).log_probs
)
action_mask = torch.ones(batch_size, 1, device=device).float()
model_rewards_for_logged_action = reward_network(
training_input.state.float_features,
training_input.src_seq.float_features,
# pyre-fixme[16]: `Optional` has no attribute `float_features`.
training_input.tgt_out_seq.float_features,
training_input.src_src_mask,
training_input.tgt_out_idx,
).reshape(-1, 1)
ranked_tgt_out_seq = training_input.src_seq.float_features[
torch.arange(batch_size, device=device).repeat_interleave(tgt_seq_len),
rank_output.ranked_tgt_out_idx.flatten() - 2,
].reshape(batch_size, tgt_seq_len, candidate_dim)
# model_rewards refers to predicted rewards for the slate generated
# greedily by the ranking model. It would be too expensive to
# compute model_rewards for all possible slates
model_rewards = reward_network(
training_input.state.float_features,
training_input.src_seq.float_features,
ranked_tgt_out_seq,
training_input.src_src_mask,
rank_output.ranked_tgt_out_idx,
).reshape(-1, 1)
# pyre-fixme[16]: `Optional` has no attribute `reshape`.
logged_rewards = training_input.slate_reward.reshape(-1, 1)
logged_propensities = training_input.tgt_out_probs.reshape(-1, 1)
return cls(
mdp_id=mdp_ids,
sequence_number=sequence_numbers,
model_propensities=model_propensities,
model_rewards=model_rewards,
action_mask=action_mask,
logged_rewards=logged_rewards,
model_rewards_for_logged_action=model_rewards_for_logged_action,
logged_propensities=logged_propensities,
)
@classmethod
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def create_from_tensors_parametric_dqn(
cls,
trainer: ParametricDQNTrainer,
mdp_ids: torch.Tensor,
sequence_numbers: torch.Tensor,
states: rlt.FeatureData,
actions: rlt.FeatureData,
propensities: torch.Tensor,
rewards: torch.Tensor,
possible_actions_mask: torch.Tensor,
possible_actions: rlt.FeatureData,
max_num_actions: int,
metrics: Optional[torch.Tensor] = None,
):
old_q_train_state = trainer.q_network.training
old_reward_train_state = trainer.reward_network.training
trainer.q_network.train(False)
trainer.reward_network.train(False)
tiled_state = states.float_features.repeat(1, max_num_actions).reshape(
-1, states.float_features.shape[1]
)
assert possible_actions is not None
# Get Q-value of action taken
possible_actions_state_concat = (rlt.FeatureData(tiled_state), possible_actions)
# FIXME: model_values and model_metrics_values should be
# calculated using q_network_cpe (as in discrete dqn).
# q_network_cpe has not been added in parametric dqn yet.
model_values = trainer.q_network(*possible_actions_state_concat)
optimal_q_values, _ = trainer.get_detached_model_outputs(
*possible_actions_state_concat
)
eval_action_idxs = None
assert (
model_values.shape[1] == 1
and model_values.shape[0]
== possible_actions_mask.shape[0] * possible_actions_mask.shape[1]
), (
"Invalid shapes: "
+ str(model_values.shape)
+ " != "
+ str(possible_actions_mask.shape)
)
model_values = model_values.reshape(possible_actions_mask.shape)
optimal_q_values = optimal_q_values.reshape(possible_actions_mask.shape)
model_propensities = masked_softmax(
optimal_q_values, possible_actions_mask, trainer.rl_temperature
)
rewards_and_metric_rewards = trainer.reward_network(
*possible_actions_state_concat
)
model_rewards = rewards_and_metric_rewards[:, :1]
assert (
model_rewards.shape[0] * model_rewards.shape[1]
== possible_actions_mask.shape[0] * possible_actions_mask.shape[1]
), (
"Invalid shapes: "
+ str(model_rewards.shape)
+ " != "
+ str(possible_actions_mask.shape)
)
model_rewards = model_rewards.reshape(possible_actions_mask.shape)
model_metrics = rewards_and_metric_rewards[:, 1:]
model_metrics = model_metrics.reshape(possible_actions_mask.shape[0], -1)
model_rewards_and_metrics_for_logged_action = trainer.reward_network(
states, actions
)
model_rewards_for_logged_action = model_rewards_and_metrics_for_logged_action[
:, :1
]
action_dim = possible_actions.float_features.shape[1]
action_mask = torch.all(
possible_actions.float_features.view(-1, max_num_actions, action_dim)
== actions.float_features.unsqueeze(dim=1),
dim=2,
).float()
assert torch.all(action_mask.sum(dim=1) == 1)
num_metrics = model_metrics.shape[1] // max_num_actions
model_metrics_values = None
model_metrics_for_logged_action = None
model_metrics_values_for_logged_action = None
if num_metrics > 0:
# FIXME: calculate model_metrics_values when q_network_cpe is added
# to parametric dqn
model_metrics_values = model_values.repeat(1, num_metrics)
trainer.q_network.train(old_q_train_state)
trainer.reward_network.train(old_reward_train_state)
return cls(
mdp_id=mdp_ids,
sequence_number=sequence_numbers,
logged_propensities=propensities,
logged_rewards=rewards,
action_mask=action_mask,
model_rewards=model_rewards,
model_rewards_for_logged_action=model_rewards_for_logged_action,
model_values=model_values,
model_metrics_values=model_metrics_values,
model_metrics_values_for_logged_action=model_metrics_values_for_logged_action,
model_propensities=model_propensities,
logged_metrics=metrics,
model_metrics=model_metrics,
model_metrics_for_logged_action=model_metrics_for_logged_action,
# Will compute later
logged_values=None,
logged_metrics_values=None,
possible_actions_mask=possible_actions_mask,
optimal_q_values=optimal_q_values,
eval_action_idxs=eval_action_idxs,
)
@classmethod
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def create_from_tensors_dqn(
cls,
trainer: DQNTrainer,
mdp_ids: torch.Tensor,
sequence_numbers: torch.Tensor,
states: rlt.FeatureData,
actions: rlt.FeatureData,
propensities: torch.Tensor,
rewards: torch.Tensor,
possible_actions_mask: torch.Tensor,
metrics: Optional[torch.Tensor] = None,
):
old_q_train_state = trainer.q_network.training
# pyre-fixme[16]: `DQNTrainer` has no attribute `reward_network`.
old_reward_train_state = trainer.reward_network.training
old_q_cpe_train_state = trainer.q_network_cpe.training
trainer.q_network.train(False)
# pyre-fixme[16]: `Tensor` has no attribute `train`.
trainer.reward_network.train(False)
trainer.q_network_cpe.train(False)
num_actions = trainer.num_actions
action_mask = actions.float()
rewards = trainer.boost_rewards(rewards, actions)
# pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a function.
model_values = trainer.q_network_cpe(states)[:, 0:num_actions]
# TODO: make generic get_action_idxs for each trainer class
# Note: model_outputs are obtained from the q_network for DQN algorithms
# and from the actor_network for CRR.
model_outputs, _ = trainer.get_detached_model_outputs(states)
# Note: eval_action_idxs is used in evaluate_post_training() function in evaluator.py
eval_action_idxs = trainer.get_max_q_values(
model_outputs, possible_actions_mask
)[1]
model_propensities = masked_softmax(
model_outputs, possible_actions_mask, trainer.rl_temperature
)
assert model_values.shape == actions.shape, (
"Invalid shape: " + str(model_values.shape) + " != " + str(actions.shape)
)
assert model_values.shape == possible_actions_mask.shape, (
"Invalid shape: "
+ str(model_values.shape)
+ " != "
+ str(possible_actions_mask.shape)
)
# pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a function.
rewards_and_metric_rewards = trainer.reward_network(states)
# In case we reuse the modular for Q-network
if hasattr(rewards_and_metric_rewards, "q_values"):
rewards_and_metric_rewards = rewards_and_metric_rewards
model_rewards = rewards_and_metric_rewards[:, 0:num_actions]
assert model_rewards.shape == actions.shape, (
"Invalid shape: " + str(model_rewards.shape) + " != " + str(actions.shape)
)
model_rewards_for_logged_action = torch.sum(
model_rewards * action_mask, dim=1, keepdim=True
)
model_metrics = rewards_and_metric_rewards[:, num_actions:]
assert model_metrics.shape[1] % num_actions == 0, (
"Invalid metrics shape: "
+ str(model_metrics.shape)
+ " "
+ str(num_actions)
)
num_metrics = model_metrics.shape[1] // num_actions
if num_metrics == 0:
model_metrics_values = None
model_metrics_for_logged_action = None
model_metrics_values_for_logged_action = None
else:
# pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a function.
model_metrics_values = trainer.q_network_cpe(states)
# Backward compatility
if hasattr(model_metrics_values, "q_values"):
model_metrics_values = model_metrics_values
model_metrics_values = model_metrics_values[:, num_actions:]
assert model_metrics_values.shape[1] == num_actions * num_metrics, (
"Invalid shape: "
+ str(model_metrics_values.shape[1])
+ " != "
+ str(actions.shape[1] * num_metrics)
)
model_metrics_for_logged_action_list = []
model_metrics_values_for_logged_action_list = []
for metric_index in range(num_metrics):
metric_start = metric_index * num_actions
metric_end = (metric_index + 1) * num_actions
model_metrics_for_logged_action_list.append(
torch.sum(
model_metrics[:, metric_start:metric_end] * action_mask,
dim=1,
keepdim=True,
)
)
model_metrics_values_for_logged_action_list.append(
torch.sum(
model_metrics_values[:, metric_start:metric_end] * action_mask,
dim=1,
keepdim=True,
)
)
model_metrics_for_logged_action = torch.cat(
model_metrics_for_logged_action_list, dim=1
)
model_metrics_values_for_logged_action = torch.cat(
model_metrics_values_for_logged_action_list, dim=1
)
trainer.q_network_cpe.train(old_q_cpe_train_state)
trainer.q_network.train(old_q_train_state)
trainer.reward_network.train(old_reward_train_state)
return cls(
mdp_id=mdp_ids,
sequence_number=sequence_numbers,
logged_propensities=propensities,
logged_rewards=rewards,
action_mask=action_mask,
model_rewards=model_rewards,
model_rewards_for_logged_action=model_rewards_for_logged_action,
model_values=model_values,
model_metrics_values=model_metrics_values,
model_metrics_values_for_logged_action=model_metrics_values_for_logged_action,
model_propensities=model_propensities,
logged_metrics=metrics,
model_metrics=model_metrics,
model_metrics_for_logged_action=model_metrics_for_logged_action,
# Will compute later
logged_values=None,
logged_metrics_values=None,
possible_actions_mask=possible_actions_mask,
optimal_q_values=model_outputs,
eval_action_idxs=eval_action_idxs,
)
def append(self, edp):
new_edp = {}
for x in fields(EvaluationDataPage):
t = getattr(self, x.name)
other_t = getattr(edp, x.name)
assert int(t is not None) + int(other_t is not None) != 1, (
"Tried to append when a tensor existed in one training page but not the other: "
+ x.name
)
if other_t is not None:
if isinstance(t, torch.Tensor):
new_edp[x.name] = torch.cat((t, other_t), dim=0)
elif isinstance(t, np.ndarray):
new_edp[x.name] = np.concatenate((t, other_t), axis=0)
else:
raise Exception("Invalid type in training data page")
else:
new_edp[x.name] = None
return EvaluationDataPage(**new_edp)
def sort(self):
idxs = []
for i, (mdp_id, seq_num) in enumerate(zip(self.mdp_id, self.sequence_number)):
idxs.append((mdp_id, int(seq_num), i))
sorted_idxs = [i for _mdp_id, _seq_num, i in sorted(idxs)]
new_edp = {}
for x in fields(EvaluationDataPage):
t = getattr(self, x.name)
new_edp[x.name] = t[sorted_idxs] if t is not None else None
return EvaluationDataPage(**new_edp)
def compute_values(self, gamma: float):
assert self.mdp_id is not None and self.sequence_number is not None
logged_values = EvaluationDataPage.compute_values_for_mdps(
self.logged_rewards,
self.mdp_id,
self.sequence_number,
gamma,
)
if self.logged_metrics is not None:
logged_metrics_values: Optional[
torch.Tensor
] = EvaluationDataPage.compute_values_for_mdps(
self.logged_metrics,
# pyre-fixme[6]: Expected `Tensor` for 2nd param but got
# `Optional[torch.Tensor]`.
self.mdp_id,
# pyre-fixme[6]: Expected `Tensor` for 3rd param but got
# `Optional[torch.Tensor]`.
self.sequence_number,
gamma,
)
else:
logged_metrics_values = None
return self._replace(
logged_values=logged_values, logged_metrics_values=logged_metrics_values
)
@staticmethod
def compute_values_for_mdps(
rewards: torch.Tensor,
mdp_ids: torch.Tensor,
sequence_numbers: torch.Tensor,
gamma: float,
) -> torch.Tensor:
values = rewards.clone()
for x in range(len(values) - 2, -1, -1):
if mdp_ids[x] != mdp_ids[x + 1]:
# Value = reward for new mdp_id
continue
values[x, 0] += values[x + 1, 0] * math.pow(
gamma, float(sequence_numbers[x + 1, 0] - sequence_numbers[x, 0])
)
return values
def validate(self):
assert len(self.logged_propensities.shape) == 2
assert len(self.logged_rewards.shape) == 2
assert len(self.logged_values.shape) == 2
assert len(self.model_propensities.shape) == 2
assert len(self.model_rewards.shape) == 2
assert len(self.model_values.shape) == 2
assert self.logged_propensities.shape[1] == 1
assert self.logged_rewards.shape[1] == 1
assert self.logged_values.shape[1] == 1
num_actions = self.model_propensities.shape[1]
assert self.model_rewards.shape[1] == num_actions
assert self.model_values.shape[1] == num_actions
assert self.action_mask.shape == self.model_propensities.shape
if self.logged_metrics is not None:
assert len(self.logged_metrics.shape) == 2
assert len(self.logged_metrics_values.shape) == 2
assert len(self.model_metrics.shape) == 2
assert len(self.model_metrics_values.shape) == 2
num_metrics = self.logged_metrics.shape[1]
assert self.logged_metrics_values.shape[1] == num_metrics, (
"Invalid shape: "
+ str(self.logged_metrics_values.shape)
+ " != "
+ str(num_metrics)
)
assert self.model_metrics.shape[1] == num_metrics * num_actions, (
"Invalid shape: "
+ str(self.model_metrics.shape)
+ " != "
+ str(num_metrics * num_actions)
)
assert self.model_metrics_values.shape[1] == num_metrics * num_actions
minibatch_size = self.logged_propensities.shape[0]
logger.info("EvaluationDataPage data size: {}".format(minibatch_size))
assert minibatch_size == self.logged_rewards.shape[0]
assert minibatch_size == self.logged_values.shape[0]
assert minibatch_size == self.model_propensities.shape[0]
assert minibatch_size == self.model_rewards.shape[0]
assert minibatch_size == self.model_values.shape[0]
if self.logged_metrics is not None:
assert minibatch_size == self.logged_metrics.shape[0]
assert minibatch_size == self.logged_metrics_values.shape[0]
assert minibatch_size == self.model_metrics.shape[0]
assert minibatch_size == self.model_metrics_values.shape[0]
logger.info("Average logged reward = %s", self.logged_rewards.mean())
logger.info(
"Average model propensity for action 0 = %s",
self.model_propensities[:, 0].mean(),
)
logger.info(
"Average model propensity for action 1 = %s",
self.model_propensities[:, 1].mean(),
)
logger.info(
"Average logged propensity = %s",
self.logged_propensities.mean(),
)
flatten_mdp_id = self.mdp_id.reshape(-1)
unique_mdp_ids = set(flatten_mdp_id.tolist())
prev_mdp_id, prev_seq_num = None, None
mdp_count = 0
for mdp_id, seq_num in zip(flatten_mdp_id, self.sequence_number):
if prev_mdp_id is None or mdp_id != prev_mdp_id:
mdp_count += 1
prev_mdp_id = mdp_id
else:
assert seq_num > prev_seq_num, (
f"For mdp_id {mdp_id}, got {seq_num} <= {prev_seq_num}."
f"Sequence number must be in increasing order.\n"
f"Zip(mdp_id, seq_num): "
f"{list(zip(flatten_mdp_id, self.sequence_number))}"
)
prev_seq_num = seq_num
assert len(unique_mdp_ids) == mdp_count, "MDPs are broken up. {} vs {}".format(
len(unique_mdp_ids), mdp_count
)
def set_metric_as_reward(self, i: int, num_actions: int):
assert self.logged_metrics is not None, "metrics must not be none"
assert self.logged_metrics_values is not None, "metrics must not be none"
assert self.model_metrics is not None, "metrics must not be none"
assert self.model_metrics_values is not None, "metrics must not be none"
return self._replace(
logged_rewards=self.logged_metrics[:, i : i + 1],
# pyre-fixme[16]: `Optional` has no attribute `__getitem__`.
logged_values=self.logged_metrics_values[:, i : i + 1],
model_rewards=self.model_metrics[
:, i * num_actions : (i + 1) * num_actions
],
model_values=self.model_metrics_values[
:, i * num_actions : (i + 1) * num_actions
],
logged_metrics=None,
logged_metrics_values=None,
model_metrics=None,
model_metrics_values=None,
)
| 26,496 | 40.401563 | 96 | py |
ReAgent | ReAgent-master/reagent/evaluation/world_model_evaluator.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from typing import Dict, List
import torch
from reagent.core.types import FeatureData, MemoryNetworkInput
from reagent.training.world_model.mdnrnn_trainer import MDNRNNTrainer
logger = logging.getLogger(__name__)
class LossEvaluator(object):
"""Evaluate losses on data pages"""
def __init__(self, trainer: MDNRNNTrainer, state_dim: int) -> None:
self.trainer = trainer
self.state_dim = state_dim
def evaluate(self, tdp: MemoryNetworkInput) -> Dict[str, float]:
self.trainer.memory_network.mdnrnn.eval()
losses = self.trainer.get_loss(tdp, state_dim=self.state_dim)
detached_losses = {
"loss": losses["loss"].cpu().detach().item(),
"gmm": losses["gmm"].cpu().detach().item(),
"bce": losses["bce"].cpu().detach().item(),
"mse": losses["mse"].cpu().detach().item(),
}
del losses
self.trainer.memory_network.mdnrnn.train()
return detached_losses
class FeatureImportanceEvaluator(object):
"""Evaluate feature importance weights on data pages"""
def __init__(
self,
trainer: MDNRNNTrainer,
discrete_action: bool,
state_feature_num: int,
action_feature_num: int,
sorted_action_feature_start_indices: List[int],
sorted_state_feature_start_indices: List[int],
) -> None:
"""
:param sorted_action_feature_start_indices: the starting index of each
action feature in the action vector (need this because some features
(e.g., one-hot encoding enum) may take multiple components)
:param sorted_state_feature_start_indices: the starting index of each
state feature in the state vector
"""
self.trainer = trainer
self.discrete_action = discrete_action
self.state_feature_num = state_feature_num
self.action_feature_num = action_feature_num
self.sorted_action_feature_start_indices = sorted_action_feature_start_indices
self.sorted_state_feature_start_indices = sorted_state_feature_start_indices
def evaluate(self, batch: MemoryNetworkInput):
"""Calculate feature importance: setting each state/action feature to
the mean value and observe loss increase."""
self.trainer.memory_network.mdnrnn.eval()
state_features = batch.state.float_features
action_features = batch.action
seq_len, batch_size, state_dim = state_features.size()
action_dim = action_features.size()[2]
action_feature_num = self.action_feature_num
state_feature_num = self.state_feature_num
feature_importance = torch.zeros(action_feature_num + state_feature_num)
orig_losses = self.trainer.get_loss(batch, state_dim=state_dim)
orig_loss = orig_losses["loss"].cpu().detach().item()
del orig_losses
action_feature_boundaries = self.sorted_action_feature_start_indices + [
action_dim
]
state_feature_boundaries = self.sorted_state_feature_start_indices + [state_dim]
for i in range(action_feature_num):
action_features = batch.action.reshape(
(batch_size * seq_len, action_dim)
).data.clone()
# if actions are discrete, an action's feature importance is the loss
# increase due to setting all actions to this action
if self.discrete_action:
assert action_dim == action_feature_num
action_vec = torch.zeros(action_dim)
action_vec[i] = 1
action_features[:] = action_vec
# if actions are continuous, an action's feature importance is the loss
# increase due to masking this action feature to its mean value
else:
boundary_start, boundary_end = (
action_feature_boundaries[i],
action_feature_boundaries[i + 1],
)
action_features[
:, boundary_start:boundary_end
] = self.compute_median_feature_value(
action_features[:, boundary_start:boundary_end]
)
action_features = action_features.reshape((seq_len, batch_size, action_dim))
new_batch = MemoryNetworkInput(
state=batch.state,
action=action_features,
next_state=batch.next_state,
reward=batch.reward,
time_diff=torch.ones_like(batch.reward).float(),
not_terminal=batch.not_terminal,
step=None,
)
losses = self.trainer.get_loss(new_batch, state_dim=state_dim)
feature_importance[i] = losses["loss"].cpu().detach().item() - orig_loss
del losses
for i in range(state_feature_num):
state_features = batch.state.float_features.reshape(
(batch_size * seq_len, state_dim)
).data.clone()
boundary_start, boundary_end = (
state_feature_boundaries[i],
state_feature_boundaries[i + 1],
)
state_features[
:, boundary_start:boundary_end
] = self.compute_median_feature_value(
state_features[:, boundary_start:boundary_end]
)
state_features = state_features.reshape((seq_len, batch_size, state_dim))
new_batch = MemoryNetworkInput(
state=FeatureData(float_features=state_features),
action=batch.action,
next_state=batch.next_state,
reward=batch.reward,
time_diff=torch.ones_like(batch.reward).float(),
not_terminal=batch.not_terminal,
step=None,
)
losses = self.trainer.get_loss(new_batch, state_dim=state_dim)
feature_importance[i + action_feature_num] = (
losses["loss"].cpu().detach().item() - orig_loss
)
del losses
self.trainer.memory_network.mdnrnn.train()
logger.info(
"**** Debug tool feature importance ****: {}".format(feature_importance)
)
return {"feature_loss_increase": feature_importance.numpy()}
def compute_median_feature_value(self, features):
# enum type
if features.shape[1] > 1:
feature_counts = torch.sum(features, dim=0)
median_feature_counts = torch.median(feature_counts)
# no similar method as numpy.where in torch
for i in range(features.shape[1]):
if feature_counts[i] == median_feature_counts:
break
median_feature = torch.zeros(features.shape[1])
median_feature[i] = 1
# other types
else:
median_feature = features.mean(dim=0)
return median_feature
class FeatureSensitivityEvaluator(object):
"""Evaluate state feature sensitivity caused by varying actions"""
def __init__(
self,
trainer: MDNRNNTrainer,
state_feature_num: int,
sorted_state_feature_start_indices: List[int],
) -> None:
self.trainer = trainer
self.state_feature_num = state_feature_num
self.sorted_state_feature_start_indices = sorted_state_feature_start_indices
def evaluate(self, batch: MemoryNetworkInput):
"""Calculate state feature sensitivity due to actions:
randomly permutating actions and see how much the prediction of next
state feature deviates."""
assert isinstance(batch, MemoryNetworkInput)
self.trainer.memory_network.mdnrnn.eval()
seq_len, batch_size, state_dim = batch.next_state.float_features.size()
state_feature_num = self.state_feature_num
feature_sensitivity = torch.zeros(state_feature_num)
# the input of world_model has seq-len as the first dimension
mdnrnn_output = self.trainer.memory_network(
batch.state, FeatureData(batch.action)
)
predicted_next_state_means = mdnrnn_output.mus
shuffled_mdnrnn_output = self.trainer.memory_network(
batch.state,
# shuffle the actions
FeatureData(batch.action[:, torch.randperm(batch_size), :]),
)
shuffled_predicted_next_state_means = shuffled_mdnrnn_output.mus
assert (
predicted_next_state_means.size()
== shuffled_predicted_next_state_means.size()
== (seq_len, batch_size, self.trainer.params.num_gaussians, state_dim)
)
state_feature_boundaries = self.sorted_state_feature_start_indices + [state_dim]
for i in range(state_feature_num):
boundary_start, boundary_end = (
state_feature_boundaries[i],
state_feature_boundaries[i + 1],
)
abs_diff = torch.mean(
torch.sum(
torch.abs(
shuffled_predicted_next_state_means[
:, :, :, boundary_start:boundary_end
]
- predicted_next_state_means[
:, :, :, boundary_start:boundary_end
]
),
dim=3,
)
)
feature_sensitivity[i] = abs_diff.cpu().detach().item()
self.trainer.memory_network.mdnrnn.train()
logger.info(
"**** Debug tool feature sensitivity ****: {}".format(feature_sensitivity)
)
return {"feature_sensitivity": feature_sensitivity.numpy()}
| 9,841 | 39.336066 | 88 | py |
ReAgent | ReAgent-master/reagent/evaluation/sequential_doubly_robust_estimator.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from typing import List
import numpy as np
import torch
from reagent.evaluation.cpe import CpeEstimate, bootstrapped_std_error_of_mean
from reagent.evaluation.evaluation_data_page import EvaluationDataPage
logger = logging.getLogger(__name__)
class SequentialDoublyRobustEstimator:
def __init__(self, gamma):
self.gamma = gamma
def estimate(self, edp: EvaluationDataPage) -> CpeEstimate:
# For details, visit https://arxiv.org/pdf/1511.03722.pdf
logged_rewards = edp.logged_rewards.squeeze()
logged_propensities = edp.logged_propensities.squeeze()
num_examples = edp.logged_rewards.shape[0]
estimated_state_values = torch.sum(
edp.model_propensities * edp.model_values, dim=1
)
estimated_q_values_for_logged_action = torch.sum(
edp.model_values * edp.action_mask, dim=1
)
target_propensity_for_action = torch.sum(
edp.model_propensities * edp.action_mask, dim=1
)
assert target_propensity_for_action.shape == logged_propensities.shape, (
"Invalid shape: "
+ str(target_propensity_for_action.shape)
+ " != "
+ str(logged_propensities.shape)
)
assert (
target_propensity_for_action.shape
== estimated_q_values_for_logged_action.shape
), (
"Invalid shape: "
+ str(target_propensity_for_action.shape)
+ " != "
+ str(estimated_q_values_for_logged_action.shape)
)
assert target_propensity_for_action.shape == logged_rewards.shape, (
"Invalid shape: "
+ str(target_propensity_for_action.shape)
+ " != "
+ str(logged_rewards.shape)
)
importance_weight = target_propensity_for_action / logged_propensities
doubly_robusts: List[float] = []
episode_values: List[float] = []
assert edp.mdp_id is not None
i = 0
last_episode_end = -1
while i < num_examples:
# calculate the doubly-robust Q-value for one episode
# pyre-ignore [16]: Optional type has no attribute `__getitem__`
if i == num_examples - 1 or edp.mdp_id[i] != edp.mdp_id[i + 1]:
episode_end = i
episode_value = 0.0
doubly_robust = 0.0
for j in range(episode_end, last_episode_end, -1):
doubly_robust = estimated_state_values[j] + importance_weight[j] * (
logged_rewards[j]
+ self.gamma * doubly_robust
- estimated_q_values_for_logged_action[j]
)
episode_value *= self.gamma
episode_value += logged_rewards[j]
doubly_robusts.append(float(doubly_robust))
episode_values.append(float(episode_value))
last_episode_end = episode_end
i += 1
if len(doubly_robusts) == 0:
torch.set_printoptions(profile="full")
zipped_data = list(
zip(
*map(
lambda x: x.tolist(),
[
edp.mdp_id,
logged_rewards,
estimated_state_values,
estimated_q_values_for_logged_action,
importance_weight,
],
)
)
)
raise RuntimeError(
f"No valid doubly robusts data is generated.\n"
f"mdp_ids x logged_rewards x estimated_state_values x "
f"estimated_q_values_for_logged_action x importance_weight:\n"
f"{zipped_data};\n"
f"gamma={self.gamma};\n"
f"Did you specify wrong metric names?"
)
# pyre-fixme[9]: doubly_robusts has type `List[float]`; used as `ndarray`.
doubly_robusts = np.array(doubly_robusts)
dr_score = float(np.mean(doubly_robusts))
dr_score_std_error = bootstrapped_std_error_of_mean(doubly_robusts)
# pyre-fixme[9]: episode_values has type `List[float]`; used as `ndarray`.
episode_values = np.array(episode_values)
logged_policy_score = np.mean(episode_values)
if logged_policy_score < 1e-6:
logger.warning(
"Can't normalize SDR-CPE because of small"
f" or negative logged_policy_score ({logged_policy_score})."
f"Episode values: {episode_values}."
)
return CpeEstimate(
raw=dr_score,
normalized=0.0,
raw_std_error=dr_score_std_error,
normalized_std_error=0.0,
)
return CpeEstimate(
raw=dr_score,
normalized=dr_score / logged_policy_score,
raw_std_error=dr_score_std_error,
normalized_std_error=dr_score_std_error / logged_policy_score,
)
| 5,254 | 36.535714 | 88 | py |
ReAgent | ReAgent-master/reagent/evaluation/doubly_robust_estimator.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import itertools
import logging
from dataclasses import dataclass
from typing import Dict, NamedTuple, Optional, Tuple, Union
import numpy as np
import torch
from reagent.evaluation.cpe import CpeEstimate, bootstrapped_std_error_of_mean
from reagent.evaluation.evaluation_data_page import EvaluationDataPage
from torch import Tensor
logger = logging.getLogger(__name__)
DEFAULT_FRAC_TRAIN = 0.4
DEFAULT_FRAC_VALID = 0.1
class DoublyRobustHP(NamedTuple):
frac_train: float = DEFAULT_FRAC_TRAIN
frac_valid: float = DEFAULT_FRAC_VALID
bootstrap_num_samples: int = 1000
bootstrap_sample_percent: float = 0.25
xgb_params: Optional[Dict[str, Union[float, int, str]]] = None
bope_mode: Optional[str] = None
bope_num_samples: Optional[int] = None
class TrainValidEvalData(NamedTuple):
contexts_dict: Dict[str, Tensor]
model_propensities_dict: Dict[str, Tensor]
actions_logged_dict: Dict[str, Tensor]
action_mask_dict: Dict[str, Tensor]
logged_rewards_dict: Dict[str, Tensor]
model_rewards_dict: Dict[str, Tensor]
model_rewards_for_logged_action_dict: Dict[str, Tensor]
logged_propensities_dict: Dict[str, Tensor]
num_examples_dict: Dict[str, int]
@dataclass
class EstimationData:
contexts_actions_train: Optional[Tensor]
policy_indicators_train: Optional[Tensor]
weights_train: Optional[Tensor]
contexts_actions_valid: Optional[Tensor]
policy_indicators_valid: Optional[Tensor]
weights_valid: Optional[Tensor]
contexts_actions_eval: Optional[Tensor]
contexts_train: Optional[Tensor]
actions_logged_train: Optional[Tensor]
contexts_valid: Optional[Tensor]
actions_logged_valid: Optional[Tensor]
contexts_eval: Optional[Tensor]
actions_logged_eval: Optional[Tensor]
model_propensities_eval: Tensor
model_rewards_eval: Tensor
action_mask_eval: Tensor
logged_rewards_eval: Tensor
model_rewards_for_logged_action_eval: Tensor
logged_propensities_eval: Tensor
def __post_init__(self):
assert (
self.model_propensities_eval.shape
== self.model_rewards_eval.shape
== self.action_mask_eval.shape
) and len(self.model_propensities_eval.shape) == 2, (
f"{self.model_propensities_eval.shape} "
f"{self.model_rewards_eval.shape} "
f"{self.action_mask_eval.shape}"
)
assert (
(
self.logged_rewards_eval.shape
== self.model_rewards_for_logged_action_eval.shape
== self.logged_propensities_eval.shape
)
and len(self.logged_rewards_eval.shape) == 2
and self.logged_rewards_eval.shape[1] == 1
), (
f"{self.logged_rewards_eval.shape} "
f"{self.model_rewards_for_logged_action_eval.shape} "
f"{self.logged_propensities_eval.shape}"
)
class ImportanceSamplingData(NamedTuple):
importance_weight: Tensor
logged_rewards: Tensor
model_rewards: Tensor
model_rewards_for_logged_action: Tensor
model_propensities: Tensor
class DoublyRobustEstimator:
"""
For details, visit https://arxiv.org/pdf/1612.01205.pdf
"""
def _split_data(
self,
edp: EvaluationDataPage,
frac_train: float = DEFAULT_FRAC_TRAIN,
frac_valid: float = DEFAULT_FRAC_VALID,
) -> TrainValidEvalData:
"""
Split the data into training, validation and evaluation parts.
Training and validation and used for model training to estimate
the importance weights.
Only evaluation data is used for policy estimation.
This function is used for BOP-E and Estimated Propensity Score methods,
but not for the standard Doubly Robust estimator.
"""
num_examples = edp.model_propensities.shape[0]
# split data into training, validation and eval
indices = np.random.permutation(num_examples)
idx_train = indices[0 : int(frac_train * num_examples)]
idx_valid = indices[
int(frac_train * num_examples) : int(
(frac_train + frac_valid) * num_examples
)
]
idx_eval = indices[int((frac_train + frac_valid) * num_examples) :]
if edp.contexts is None:
raise ValueError("contexts not provided in input")
contexts_dict = {
"train": edp.contexts[idx_train],
# pyre-fixme[16]: `Optional` has no attribute `__getitem__`.
"valid": edp.contexts[idx_valid],
"eval": edp.contexts[idx_eval],
}
model_propensities_dict = {
"train": edp.model_propensities[idx_train],
"valid": edp.model_propensities[idx_valid],
"eval": edp.model_propensities[idx_eval],
}
# edp.action_mask is N*N_actions tensor of indicators of which actions
# were actually taken by the logged algo
actions_logged = torch.max(edp.action_mask, dim=1, keepdim=True)[1].float()
actions_logged_dict = {
"train": actions_logged[idx_train],
"valid": actions_logged[idx_valid],
"eval": actions_logged[idx_eval],
}
action_mask_dict = {
"train": edp.action_mask[idx_train],
"valid": edp.action_mask[idx_valid],
"eval": edp.action_mask[idx_eval],
}
logged_rewards_dict = {
"train": edp.logged_rewards[idx_train],
"valid": edp.logged_rewards[idx_valid],
"eval": edp.logged_rewards[idx_eval],
}
model_rewards_dict = {
"train": edp.model_rewards[idx_train],
"valid": edp.model_rewards[idx_valid],
"eval": edp.model_rewards[idx_eval],
}
model_rewards_for_logged_action_dict = {
"train": edp.model_rewards_for_logged_action[idx_train],
"valid": edp.model_rewards_for_logged_action[idx_valid],
"eval": edp.model_rewards_for_logged_action[idx_eval],
}
logged_propensities_dict = {
"train": edp.logged_propensities[idx_train],
"valid": edp.logged_propensities[idx_valid],
"eval": edp.logged_propensities[idx_eval],
}
num_examples_dict = {
"train": int(frac_train * num_examples),
"valid": int((frac_train + frac_valid) * num_examples)
- int(frac_train * num_examples),
"eval": num_examples - int((frac_train + frac_valid) * num_examples),
}
return TrainValidEvalData(
contexts_dict=contexts_dict,
model_propensities_dict=model_propensities_dict,
actions_logged_dict=actions_logged_dict,
action_mask_dict=action_mask_dict,
logged_rewards_dict=logged_rewards_dict,
model_rewards_dict=model_rewards_dict,
model_rewards_for_logged_action_dict=model_rewards_for_logged_action_dict,
logged_propensities_dict=logged_propensities_dict,
num_examples_dict=num_examples_dict,
)
def _prepare_data(self, edp: EvaluationDataPage) -> EstimationData:
ed = EstimationData(
contexts_actions_train=None,
policy_indicators_train=None,
weights_train=None,
contexts_actions_valid=None,
policy_indicators_valid=None,
weights_valid=None,
contexts_actions_eval=None,
contexts_train=None,
actions_logged_train=None,
contexts_valid=None,
actions_logged_valid=None,
contexts_eval=edp.contexts,
actions_logged_eval=None,
model_propensities_eval=edp.model_propensities,
model_rewards_eval=edp.model_rewards,
action_mask_eval=edp.action_mask,
logged_rewards_eval=edp.logged_rewards,
model_rewards_for_logged_action_eval=edp.model_rewards_for_logged_action,
logged_propensities_eval=edp.logged_propensities,
)
return ed
def _get_importance_sampling_inputs(
self, ed: EstimationData
) -> ImportanceSamplingData:
target_propensity_for_action = torch.sum(
ed.model_propensities_eval * ed.action_mask_eval, dim=1, keepdim=True
)
# target_propensity_for_action is N*1 tensor of target algo propensities
# for historical actions
# logged_propensities_eval is N*1 tensor of propensity scores for historical
# actions by the prod algorithm at each context
importance_weights = (
target_propensity_for_action / ed.logged_propensities_eval
).float()
logger.info(f"Mean IPS weight on the eval dataset: {importance_weights.mean()}")
return ImportanceSamplingData(
importance_weight=importance_weights,
logged_rewards=ed.logged_rewards_eval,
model_rewards=ed.model_rewards_eval,
model_rewards_for_logged_action=ed.model_rewards_for_logged_action_eval,
model_propensities=ed.model_propensities_eval,
)
def _get_importance_sampling_estimates(
self, isd: ImportanceSamplingData, hp: DoublyRobustHP
) -> Tuple[CpeEstimate, CpeEstimate, CpeEstimate]:
# The score we would get if we evaluate the logged policy against itself
logged_policy_score = float(
torch.mean(isd.logged_rewards)
) # logged_rewards is N*1 tensor of historical rewards
if logged_policy_score < 1e-6:
logger.warning(
"Can't normalize DR-CPE because of small or negative "
+ "logged_policy_score"
)
normalizer = 0.0
else:
normalizer = 1.0 / logged_policy_score
if isd.model_rewards is None:
# Fill with zero, equivalent to just doing IPS
direct_method_values = torch.zeros(
[isd.model_propensities.shape[0], 1], dtype=torch.float32
)
else:
# model rewards is (N_samples)*N_actions tensor of predicted
# counterfactual rewards for each possible action at each
# historical context
direct_method_values = torch.sum(
isd.model_propensities * isd.model_rewards, dim=1, keepdim=True
)
direct_method_score = float(torch.mean(direct_method_values))
logger.info(
f"Normalized Direct method score = {direct_method_score * normalizer}"
)
direct_method_std_error = bootstrapped_std_error_of_mean(
direct_method_values.squeeze(),
sample_percent=hp.bootstrap_sample_percent,
num_samples=hp.bootstrap_num_samples,
)
direct_method_estimate = CpeEstimate(
raw=direct_method_score,
normalized=direct_method_score * normalizer,
raw_std_error=direct_method_std_error,
normalized_std_error=direct_method_std_error * normalizer,
)
ips = isd.importance_weight * isd.logged_rewards # N*1
doubly_robust = (
isd.importance_weight
* (isd.logged_rewards - isd.model_rewards_for_logged_action)
) + direct_method_values
# model_rewards_for_logged_action is N*1 of estimated rewards for target
# policy
ips_score = float(torch.mean(ips))
logger.info(f"Normalized IPS score = {ips_score * normalizer}")
ips_score_std_error = bootstrapped_std_error_of_mean(
ips.squeeze(),
sample_percent=hp.bootstrap_sample_percent,
num_samples=hp.bootstrap_num_samples,
)
inverse_propensity_estimate = CpeEstimate(
raw=ips_score,
normalized=ips_score * normalizer,
raw_std_error=ips_score_std_error,
normalized_std_error=ips_score_std_error * normalizer,
)
dr_score = float(torch.mean(doubly_robust))
dr_score_std_error = bootstrapped_std_error_of_mean(
doubly_robust.squeeze(),
sample_percent=hp.bootstrap_sample_percent,
num_samples=hp.bootstrap_num_samples,
)
doubly_robust_estimate = CpeEstimate(
raw=dr_score,
normalized=dr_score * normalizer,
raw_std_error=dr_score_std_error,
normalized_std_error=dr_score_std_error * normalizer,
)
return (
direct_method_estimate,
inverse_propensity_estimate,
doubly_robust_estimate,
)
def estimate(
self, edp: EvaluationDataPage, hp: Optional[DoublyRobustHP] = None
) -> Tuple[CpeEstimate, CpeEstimate, CpeEstimate]:
hp = hp or DoublyRobustHP()
ed = self._prepare_data(edp)
isd = self._get_importance_sampling_inputs(ed)
return self._get_importance_sampling_estimates(isd, hp=hp)
| 13,063 | 37.997015 | 88 | py |
ReAgent | ReAgent-master/reagent/evaluation/evaluator.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from collections import Counter
from typing import Dict, List, Optional
import torch
import torch.nn.functional as F
from reagent.core.tracker import observable
from reagent.evaluation.cpe import CpeDetails, CpeEstimateSet
from reagent.evaluation.doubly_robust_estimator import DoublyRobustEstimator
from reagent.evaluation.evaluation_data_page import EvaluationDataPage
from reagent.evaluation.sequential_doubly_robust_estimator import (
SequentialDoublyRobustEstimator,
)
from reagent.evaluation.weighted_sequential_doubly_robust_estimator import (
WeightedSequentialDoublyRobustEstimator,
)
logger = logging.getLogger(__name__)
def get_tensor(x, dtype=None):
"""
Input:
- x: list or a sequence
- dtype: target data type of the elements in tensor [optional]
It will be inferred automatically if not provided.
Output:
Tensor given a list or a sequence.
If the input is None, it returns None
If the input is a tensor it returns the tensor.
If type is provides the output Tensor will have that type
"""
if x is None:
return None
if not isinstance(x, torch.Tensor):
x = torch.tensor(x)
if dtype is not None:
x = x.type(dtype)
return x
def get_metrics_to_score(metric_reward_values: Optional[Dict[str, float]]) -> List[str]:
if metric_reward_values is None:
return []
return sorted([*metric_reward_values.keys()])
@observable(cpe_details=CpeDetails)
class Evaluator:
NUM_J_STEPS_FOR_MAGIC_ESTIMATOR = 25
def __init__(self, action_names, gamma, model, metrics_to_score=None) -> None:
self.action_names = action_names
self.metrics_to_score = metrics_to_score
self.gamma = gamma
self.model = model
self.doubly_robust_estimator = DoublyRobustEstimator()
self.sequential_doubly_robust_estimator = SequentialDoublyRobustEstimator(gamma)
self.weighted_sequential_doubly_robust_estimator = (
WeightedSequentialDoublyRobustEstimator(gamma)
)
def evaluate_post_training(self, edp: EvaluationDataPage) -> CpeDetails:
cpe_details = CpeDetails()
cpe_details.reward_estimates = self.score_cpe("Reward", edp)
if (
self.metrics_to_score is not None
and edp.logged_metrics is not None
and self.action_names is not None
):
for i, metric in enumerate(self.metrics_to_score):
logger.info(
"--------- Running CPE on metric: {} ---------".format(metric)
)
metric_reward_edp = edp.set_metric_as_reward(i, len(self.action_names))
cpe_details.metric_estimates[metric] = self.score_cpe(
metric, metric_reward_edp
)
if self.action_names is not None:
if edp.optimal_q_values is not None:
value_means = edp.optimal_q_values.mean(dim=0)
cpe_details.q_value_means = {
action: float(value_means[i])
for i, action in enumerate(self.action_names)
}
# pyre-ignore [16]: `Optional` has no attribute `std`
value_stds = edp.optimal_q_values.std(dim=0)
cpe_details.q_value_stds = {
action: float(value_stds[i])
for i, action in enumerate(self.action_names)
}
if edp.eval_action_idxs is not None:
cpe_details.action_distribution = {
# pyre-ignore [16]: `bool` has no attribute `sum`
action: float((edp.eval_action_idxs == i).sum())
# pyre-ignore [16]: `Optional` has no attribute `shape`
/ edp.eval_action_idxs.shape[0]
for i, action in enumerate(self.action_names)
}
# pyre-fixme[16]: `Evaluator` has no attribute `notify_observers`.
self.notify_observers(cpe_details=cpe_details)
return cpe_details
def score_cpe(self, metric_name, edp: EvaluationDataPage):
(
direct_method,
inverse_propensity,
doubly_robust,
) = self.doubly_robust_estimator.estimate(edp)
sequential_doubly_robust = self.sequential_doubly_robust_estimator.estimate(edp)
weighted_doubly_robust = (
self.weighted_sequential_doubly_robust_estimator.estimate(
edp, num_j_steps=1, whether_self_normalize_importance_weights=True
)
)
magic = self.weighted_sequential_doubly_robust_estimator.estimate(
edp,
num_j_steps=Evaluator.NUM_J_STEPS_FOR_MAGIC_ESTIMATOR,
whether_self_normalize_importance_weights=True,
)
return CpeEstimateSet(
direct_method=direct_method,
inverse_propensity=inverse_propensity,
doubly_robust=doubly_robust,
sequential_doubly_robust=sequential_doubly_robust,
weighted_doubly_robust=weighted_doubly_robust,
magic=magic,
)
def _get_batch_logged_actions(self, arr):
action_counter = Counter()
for actions in arr:
# torch.max() returns the element and the index.
# The latter is the argmax equivalent
_, argmax = torch.max(actions, dim=1)
# Counter object does not work well with Tensors, hence casting back to numpy
action_counter.update(Counter(argmax.numpy()))
total_actions = 1.0 * sum(action_counter.values())
return (
{
action_name: (action_counter[i] / total_actions)
for i, action_name in enumerate(self.action_names)
},
{
action_name: action_counter[i]
for i, action_name in enumerate(self.action_names)
},
)
def get_target_distribution_error(
self, actions, target_distribution, actual_distribution
):
"""Calculate MSE between actual and target action distribution."""
if not target_distribution:
return None
error = 0
for i, action in enumerate(actions):
error += (target_distribution[i] - actual_distribution[action]) ** 2
return error / len(actions)
@staticmethod
def huberLoss(label, output):
if abs(label - output) > 1:
return abs(label - output) - 0.5
else:
return 0.5 * (label - output) * (label - output)
| 6,718 | 35.516304 | 89 | py |
ReAgent | ReAgent-master/reagent/evaluation/cpe.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import math
from typing import Dict, NamedTuple, Optional
import numpy as np
import torch
from reagent.core.tensorboardX import SummaryWriterContext
logger = logging.getLogger(__name__)
class CpeEstimate(NamedTuple):
raw: float
normalized: float
raw_std_error: float
normalized_std_error: float
class CpeEstimateSet(NamedTuple):
direct_method: Optional[CpeEstimate] = None
inverse_propensity: Optional[CpeEstimate] = None
doubly_robust: Optional[CpeEstimate] = None
sequential_doubly_robust: Optional[CpeEstimate] = None
weighted_doubly_robust: Optional[CpeEstimate] = None
magic: Optional[CpeEstimate] = None
switch: Optional[CpeEstimate] = None
switch_dr: Optional[CpeEstimate] = None
def check_estimates_exist(self):
assert self.direct_method is not None
assert self.inverse_propensity is not None
assert self.doubly_robust is not None
assert self.sequential_doubly_robust is not None
assert self.weighted_doubly_robust is not None
assert self.magic is not None
def log(self):
self.check_estimates_exist()
logger.info(
"Reward Inverse Propensity Score : normalized {0:.3f} +/- {0:.3f} raw {1:.3f} +/- {1:.3f}".format(
self.inverse_propensity.normalized,
self.inverse_propensity.normalized_std_error,
self.inverse_propensity.raw,
self.inverse_propensity.raw_std_error,
)
)
logger.info(
"Reward Direct Method : normalized {0:.3f} +/- {0:.3f} raw {1:.3f} +/- {1:.3f}".format(
self.direct_method.normalized,
self.direct_method.normalized_std_error,
self.direct_method.raw,
self.direct_method.raw_std_error,
)
)
logger.info(
"Reward Doubly Robust P.E. : normalized {0:.3f} +/- {0:.3f} raw {1:.3f} +/- {1:.3f}".format(
self.doubly_robust.normalized,
self.doubly_robust.normalized_std_error,
self.doubly_robust.raw,
self.doubly_robust.raw_std_error,
)
)
logger.info(
"Value Weighted Doubly Robust P.E. : normalized {0:.3f} +/- {0:.3f} raw {1:.3f} +/- {1:.3f}".format(
self.weighted_doubly_robust.normalized,
self.weighted_doubly_robust.normalized_std_error,
self.weighted_doubly_robust.raw,
self.weighted_doubly_robust.raw_std_error,
)
)
logger.info(
"Value Sequential Doubly Robust P.E. : normalized {0:.3f} +/- {0:.3f} raw {1:.3f} +/- {1:.3f}".format(
self.sequential_doubly_robust.normalized,
self.sequential_doubly_robust.normalized_std_error,
self.sequential_doubly_robust.raw,
self.sequential_doubly_robust.raw_std_error,
)
)
logger.info(
"Value Magic Doubly Robust P.E. : normalized {0:.3f} +/- {0:.3f} raw {1:.3f} +/- {1:.3f}".format(
self.magic.normalized,
self.magic.normalized_std_error,
self.magic.raw,
self.magic.raw_std_error,
)
)
def log_to_tensorboard(self, metric_name: str) -> None:
self.check_estimates_exist()
def none_to_zero(x: Optional[float]) -> float:
if x is None or math.isnan(x):
return 0.0
return x
for name, value in [
(
"CPE/{}/Direct_Method_Reward".format(metric_name),
# pyre-fixme[16]: `Optional` has no attribute `normalized`.
self.direct_method.normalized,
),
(
"CPE/{}/IPS_Reward".format(metric_name),
self.inverse_propensity.normalized,
),
(
"CPE/{}/Doubly_Robust_Reward".format(metric_name),
self.doubly_robust.normalized,
),
(
"CPE/{}/Sequential_Doubly_Robust".format(metric_name),
self.sequential_doubly_robust.normalized,
),
(
"CPE/{}/Weighted_Sequential_Doubly_Robust".format(metric_name),
self.weighted_doubly_robust.normalized,
),
("CPE/{}/MAGIC".format(metric_name), self.magic.normalized),
]:
SummaryWriterContext.add_scalar(name, none_to_zero(value))
def fill_empty_with_zero(self):
retval = self
for name, value in self._asdict().items():
if value is None:
retval = retval._replace(
**{
name: CpeEstimate(
raw=0.0,
normalized=0.0,
raw_std_error=0.0,
normalized_std_error=0.0,
)
}
)
return retval
class CpeDetails:
def __init__(self):
self.reward_estimates: CpeEstimateSet = CpeEstimateSet()
self.metric_estimates: Dict[str, CpeEstimateSet] = {}
self.q_value_means: Optional[Dict[str, float]] = None
self.q_value_stds: Optional[Dict[str, float]] = None
self.action_distribution: Optional[Dict[str, float]] = None
def log(self):
logger.info("Reward Estimates:")
logger.info("-----------------")
self.reward_estimates.log()
logger.info("-----------------")
for metric in self.metric_estimates.keys():
logger.info(metric + " Estimates:")
logger.info("-----------------")
self.metric_estimates[metric].log()
logger.info("-----------------")
def log_to_tensorboard(self) -> None:
self.reward_estimates.log_to_tensorboard("Reward")
for metric_name, estimate_set in self.metric_estimates.items():
estimate_set.log_to_tensorboard(metric_name)
def bootstrapped_std_error_of_mean(data, sample_percent=0.25, num_samples=1000):
"""
Compute bootstrapped standard error of mean of input data.
:param data: Input data (1D torch tensor or numpy array).
:param sample_percent: Size of sample to use to calculate bootstrap statistic.
:param num_samples: Number of times to sample.
"""
if isinstance(data, torch.Tensor):
data = data.cpu().numpy()
sample_size = int(sample_percent * len(data))
means = [
np.mean(np.random.choice(data, size=sample_size, replace=True))
for i in range(num_samples)
]
return np.std(means)
| 6,820 | 34.9 | 114 | py |
ReAgent | ReAgent-master/reagent/evaluation/weighted_sequential_doubly_robust_estimator.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import itertools
import logging
import numpy as np
import scipy as sp
import torch
from reagent.evaluation.cpe import CpeEstimate
from reagent.evaluation.evaluation_data_page import EvaluationDataPage
logger = logging.getLogger(__name__)
class WeightedSequentialDoublyRobustEstimator:
NUM_SUBSETS_FOR_CB_ESTIMATES = 25
CONFIDENCE_INTERVAL = 0.9
NUM_BOOTSTRAP_SAMPLES = 50
BOOTSTRAP_SAMPLE_PCT = 0.5
def __init__(self, gamma):
self.gamma = gamma
def estimate(
self,
edp: EvaluationDataPage,
num_j_steps,
whether_self_normalize_importance_weights,
) -> CpeEstimate:
# For details, visit https://arxiv.org/pdf/1604.00923.pdf Section 5, 7, 8
assert edp.model_values is not None
(
actions,
rewards,
logged_propensities,
target_propensities,
estimated_q_values,
) = WeightedSequentialDoublyRobustEstimator.transform_to_equal_length_trajectories(
edp.mdp_id,
edp.action_mask.cpu().numpy(),
edp.logged_rewards.cpu().numpy().flatten(),
edp.logged_propensities.cpu().numpy().flatten(),
edp.model_propensities.cpu().numpy(),
# pyre-ignore [16]: Optional type has no attribute `cpu`
edp.model_values.cpu().numpy(),
)
num_trajectories = actions.shape[0]
trajectory_length = actions.shape[1]
j_steps = [float("inf")]
if num_j_steps > 1:
j_steps.append(-1)
if num_j_steps > 2:
interval = trajectory_length // (num_j_steps - 1)
j_steps.extend([i * interval for i in range(1, num_j_steps - 1)])
target_propensity_for_logged_action = np.sum(
np.multiply(target_propensities, actions), axis=2
)
estimated_q_values_for_logged_action = np.sum(
np.multiply(estimated_q_values, actions), axis=2
)
estimated_state_values = np.sum(
np.multiply(target_propensities, estimated_q_values), axis=2
)
importance_weights = target_propensity_for_logged_action / logged_propensities
importance_weights = np.cumprod(importance_weights, axis=1)
importance_weights = (
WeightedSequentialDoublyRobustEstimator.normalize_importance_weights(
importance_weights, whether_self_normalize_importance_weights
)
)
importance_weights_one_earlier = (
np.ones([num_trajectories, 1]) * 1.0 / num_trajectories
)
importance_weights_one_earlier = np.hstack(
[importance_weights_one_earlier, importance_weights[:, :-1]]
)
discounts = np.logspace(
start=0, stop=trajectory_length - 1, num=trajectory_length, base=self.gamma
)
j_step_return_trajectories = []
for j_step in j_steps:
j_step_return_trajectories.append(
WeightedSequentialDoublyRobustEstimator.calculate_step_return(
rewards,
discounts,
importance_weights,
importance_weights_one_earlier,
estimated_state_values,
estimated_q_values_for_logged_action,
j_step,
)
)
j_step_return_trajectories = np.array(j_step_return_trajectories)
j_step_returns = np.sum(j_step_return_trajectories, axis=1)
if len(j_step_returns) == 1:
weighted_doubly_robust = j_step_returns[0]
weighted_doubly_robust_std_error = 0.0
else:
# break trajectories into several subsets to estimate confidence bounds
infinite_step_returns = []
num_subsets = int(
min(
num_trajectories / 2,
WeightedSequentialDoublyRobustEstimator.NUM_SUBSETS_FOR_CB_ESTIMATES,
)
)
interval = num_trajectories / num_subsets
for i in range(num_subsets):
trajectory_subset = np.arange(
int(i * interval), int((i + 1) * interval)
)
importance_weights = (
target_propensity_for_logged_action[trajectory_subset]
/ logged_propensities[trajectory_subset]
)
importance_weights = np.cumprod(importance_weights, axis=1)
importance_weights = WeightedSequentialDoublyRobustEstimator.normalize_importance_weights(
importance_weights, whether_self_normalize_importance_weights
)
importance_weights_one_earlier = (
np.ones([len(trajectory_subset), 1]) * 1.0 / len(trajectory_subset)
)
importance_weights_one_earlier = np.hstack(
[importance_weights_one_earlier, importance_weights[:, :-1]]
)
infinite_step_return = np.sum(
WeightedSequentialDoublyRobustEstimator.calculate_step_return(
rewards[trajectory_subset],
discounts,
importance_weights,
importance_weights_one_earlier,
estimated_state_values[trajectory_subset],
estimated_q_values_for_logged_action[trajectory_subset],
float("inf"),
)
)
infinite_step_returns.append(infinite_step_return)
# Compute weighted_doubly_robust mean point estimate using all data
weighted_doubly_robust = self.compute_weighted_doubly_robust_point_estimate(
j_steps,
num_j_steps,
j_step_returns,
infinite_step_returns,
j_step_return_trajectories,
)
# Use bootstrapping to compute weighted_doubly_robust standard error
bootstrapped_means = []
sample_size = int(
WeightedSequentialDoublyRobustEstimator.BOOTSTRAP_SAMPLE_PCT
* num_subsets
)
for _ in range(
WeightedSequentialDoublyRobustEstimator.NUM_BOOTSTRAP_SAMPLES
):
random_idxs = np.random.choice(num_j_steps, sample_size, replace=False)
random_idxs.sort()
wdr_estimate = self.compute_weighted_doubly_robust_point_estimate(
j_steps=[j_steps[i] for i in random_idxs],
num_j_steps=sample_size,
j_step_returns=j_step_returns[random_idxs],
infinite_step_returns=infinite_step_returns,
j_step_return_trajectories=j_step_return_trajectories[random_idxs],
)
bootstrapped_means.append(wdr_estimate)
weighted_doubly_robust_std_error = np.std(bootstrapped_means)
episode_values = np.sum(np.multiply(rewards, discounts), axis=1)
logged_policy_score = np.nanmean(episode_values)
if logged_policy_score < 1e-6:
logger.warning(
"Can't normalize WSDR-CPE because of small or negative logged_policy_score"
)
return CpeEstimate(
raw=weighted_doubly_robust,
normalized=0.0,
raw_std_error=weighted_doubly_robust_std_error,
normalized_std_error=0.0,
)
return CpeEstimate(
raw=weighted_doubly_robust,
normalized=weighted_doubly_robust / logged_policy_score,
raw_std_error=weighted_doubly_robust_std_error,
normalized_std_error=weighted_doubly_robust_std_error / logged_policy_score,
)
def compute_weighted_doubly_robust_point_estimate(
self,
j_steps,
num_j_steps,
j_step_returns,
infinite_step_returns,
j_step_return_trajectories,
):
(
low_bound,
high_bound,
) = WeightedSequentialDoublyRobustEstimator.confidence_bounds(
infinite_step_returns,
WeightedSequentialDoublyRobustEstimator.CONFIDENCE_INTERVAL,
)
# decompose error into bias + variance
j_step_bias = np.zeros([num_j_steps])
where_lower = np.where(j_step_returns < low_bound)[0]
j_step_bias[where_lower] = low_bound - j_step_returns[where_lower]
where_higher = np.where(j_step_returns > high_bound)[0]
j_step_bias[where_higher] = j_step_returns[where_higher] - high_bound
covariance = np.cov(j_step_return_trajectories)
error = covariance + j_step_bias.T * j_step_bias
# minimize mse error
constraint = {"type": "eq", "fun": lambda x: np.sum(x) - 1.0}
x = np.zeros([len(j_steps)])
res = sp.optimize.minimize(
mse_loss,
x,
args=error,
constraints=constraint,
bounds=[(0, 1) for _ in range(x.shape[0])],
)
x = np.array(res.x)
return float(np.dot(x, j_step_returns))
@staticmethod
def transform_to_equal_length_trajectories(
mdp_ids,
actions,
rewards,
logged_propensities,
target_propensities,
estimated_q_values,
):
"""
Take in samples (action, rewards, propensities, etc.) and output lists
of equal-length trajectories (episodes) according to terminals.
As the raw trajectories are of various lengths, the shorter ones are
filled with zeros(ones) at the end.
"""
num_actions = len(target_propensities[0])
terminals = np.zeros(mdp_ids.shape[0])
for x in range(0, mdp_ids.shape[0]):
if x + 1 == mdp_ids.shape[0] or mdp_ids[x, 0] != mdp_ids[x + 1, 0]:
terminals[x] = 1
trajectories = []
episode_start = 0
episode_ends = np.nonzero(terminals)[0]
if len(terminals) - 1 not in episode_ends:
episode_ends = np.append(episode_ends, len(terminals) - 1)
for episode_end in episode_ends:
trajectories.append(np.arange(episode_start, episode_end + 1))
episode_start = episode_end + 1
action_trajectories = []
reward_trajectories = []
logged_propensity_trajectories = []
target_propensity_trajectories = []
Q_value_trajectories = []
for trajectory in trajectories:
action_trajectories.append(actions[trajectory])
reward_trajectories.append(rewards[trajectory])
logged_propensity_trajectories.append(logged_propensities[trajectory])
target_propensity_trajectories.append(target_propensities[trajectory])
Q_value_trajectories.append(estimated_q_values[trajectory])
def to_equal_length(x, fill_value):
x_equal_length = np.array(
list(itertools.zip_longest(*x, fillvalue=fill_value))
).swapaxes(0, 1)
return x_equal_length
action_trajectories = to_equal_length(
action_trajectories, np.zeros([num_actions])
)
reward_trajectories = to_equal_length(reward_trajectories, 0)
logged_propensity_trajectories = to_equal_length(
logged_propensity_trajectories, 1
)
target_propensity_trajectories = to_equal_length(
target_propensity_trajectories, np.zeros([num_actions])
)
Q_value_trajectories = to_equal_length(
Q_value_trajectories, np.zeros([num_actions])
)
return (
action_trajectories,
reward_trajectories,
logged_propensity_trajectories,
target_propensity_trajectories,
Q_value_trajectories,
)
@staticmethod
def normalize_importance_weights(
importance_weights, whether_self_normalize_importance_weights
):
if whether_self_normalize_importance_weights:
sum_importance_weights = np.sum(importance_weights, axis=0)
where_zeros = np.where(sum_importance_weights == 0.0)[0]
sum_importance_weights[where_zeros] = len(importance_weights)
importance_weights[:, where_zeros] = 1.0
importance_weights /= sum_importance_weights
return importance_weights
else:
importance_weights /= importance_weights.shape[0]
return importance_weights
@staticmethod
def calculate_step_return(
rewards,
discounts,
importance_weights,
importance_weights_one_earlier,
estimated_state_values,
estimated_q_values,
j_step,
):
trajectory_length = len(rewards[0])
num_trajectories = len(rewards)
j_step = int(min(j_step, trajectory_length - 1))
weighted_discounts = np.multiply(discounts, importance_weights)
weighted_discounts_one_earlier = np.multiply(
discounts, importance_weights_one_earlier
)
importance_sampled_cumulative_reward = np.sum(
np.multiply(weighted_discounts[:, : j_step + 1], rewards[:, : j_step + 1]),
axis=1,
)
if j_step < trajectory_length - 1:
direct_method_value = (
weighted_discounts_one_earlier[:, j_step + 1]
* estimated_state_values[:, j_step + 1]
)
else:
direct_method_value = np.zeros([num_trajectories])
control_variate = np.sum(
np.multiply(
weighted_discounts[:, : j_step + 1], estimated_q_values[:, : j_step + 1]
)
- np.multiply(
weighted_discounts_one_earlier[:, : j_step + 1],
estimated_state_values[:, : j_step + 1],
),
axis=1,
)
j_step_return = (
importance_sampled_cumulative_reward + direct_method_value - control_variate
)
return j_step_return
@staticmethod
def confidence_bounds(x, confidence):
n = len(x)
m, se = np.mean(x), sp.stats.sem(x)
h = se * sp.stats.t._ppf((1 + confidence) / 2.0, n - 1)
return m - h, m + h
def mse_loss(x, error):
return np.dot(np.dot(x, error), x.T)
| 14,541 | 36.576227 | 106 | py |
ReAgent | ReAgent-master/reagent/evaluation/ope_adapter.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import torch
from reagent.evaluation.cpe import (
CpeEstimate,
CpeEstimateSet,
bootstrapped_std_error_of_mean,
)
from reagent.evaluation.evaluation_data_page import EvaluationDataPage
from reagent.evaluation.evaluator import Evaluator
from reagent.evaluation.weighted_sequential_doubly_robust_estimator import (
WeightedSequentialDoublyRobustEstimator,
)
from reagent.ope.estimators.contextual_bandits_estimators import (
BanditsEstimatorInput,
DMEstimator,
DoublyRobustEstimator,
IPSEstimator,
LogSample,
ModelOutputs,
)
from reagent.ope.estimators.estimator import (
Estimator,
EstimatorResult,
EstimatorResults,
)
from reagent.ope.estimators.sequential_estimators import (
Action,
ActionDistribution,
DoublyRobustEstimator as SeqDREstimator,
MAGICEstimator,
RLEstimator,
RLEstimatorInput,
RLPolicy,
State,
Transition,
ValueFunction,
)
from reagent.ope.estimators.types import ActionSpace
logger = logging.getLogger(__name__)
class OPEstimatorAdapter:
def __init__(self, ope_estimator: Estimator, device=None):
self._ope_estimator = ope_estimator
self._device = device
@staticmethod
def edp_to_contextual_bandit_log(
edp: EvaluationDataPage, device=None
) -> BanditsEstimatorInput:
log = []
n = edp.model_rewards.shape[0]
for idx in range(n):
# Action is only 1 if tgt policy and log policy took same action?
action = torch.argmax(edp.action_mask[idx]).item()
if edp.action_mask[idx][action] == 0.0:
action = None
logged_propensities = torch.zeros(
edp.model_propensities[idx].shape, device=device
)
if action is not None:
logged_propensities[action] = edp.logged_propensities[idx]
log.append(
LogSample(
context=None if edp.contexts is None else edp.contexts[idx],
log_action=Action(action),
log_reward=edp.logged_rewards[idx],
log_action_probabilities=ActionDistribution(logged_propensities),
tgt_action_probabilities=ActionDistribution(
edp.model_propensities[idx]
),
tgt_action=Action(action),
model_outputs=ModelOutputs(
tgt_reward_from_log_action=edp.model_rewards_for_logged_action[
idx
],
tgt_rewards=edp.model_rewards[idx],
)
# item features not specified as edp came from trained reward model
)
)
return BanditsEstimatorInput(ActionSpace(edp.action_mask.shape[1]), log, True)
@staticmethod
def estimator_result_to_cpe_estimate(result: EstimatorResult) -> CpeEstimate:
assert result.estimated_reward_normalized is not None
assert result.estimated_reward_normalized is not None
assert result.estimated_reward_std_error is not None
assert result.estimated_reward_normalized_std_error is not None
return CpeEstimate(
raw=result.estimated_reward,
normalized=result.estimated_reward_normalized,
raw_std_error=result.estimated_reward_std_error,
normalized_std_error=result.estimated_reward_normalized_std_error,
)
def estimate(self, edp: EvaluationDataPage, **kwargs) -> CpeEstimate:
result = self._ope_estimator.evaluate(
OPEstimatorAdapter.edp_to_contextual_bandit_log(edp), **kwargs
)
assert isinstance(result, EstimatorResult)
logging.info(f"Got estimator result {result}, turning into cpe estimate")
return OPEstimatorAdapter.estimator_result_to_cpe_estimate(result)
class SequentialOPEstimatorAdapter:
def __init__(self, seq_ope_estimator: RLEstimator, gamma: float, device=None):
self.seq_ope_estimator = seq_ope_estimator
self.gamma = gamma
self._device = device
class EDPSeqPolicy(RLPolicy):
def __init__(
self, num_actions: int, model_propensities: torch.Tensor, device=None
):
super().__init__(ActionSpace(num_actions), device)
self.model_propensities = model_propensities
def action_dist(self, state: State) -> ActionDistribution:
# "state" is (trajectory, step)
return self.model_propensities[state.value]
class EDPValueFunc(ValueFunction):
def __init__(
self, model_values: torch.Tensor, target_propensities: torch.Tensor
):
self.model_values = model_values
self.target_propensities = target_propensities
def state_action_value(self, state: State, action: Action) -> float:
return self.model_values[state.value][action].item()
def state_value(self, state: State) -> float:
return torch.dot(
self.model_values[state.value], self.target_propensities[state.value]
).item()
def reset(self):
pass
@staticmethod
def edp_to_rl_input(
edp: EvaluationDataPage, gamma, device=None
) -> RLEstimatorInput:
assert edp.model_values is not None
eq_len = WeightedSequentialDoublyRobustEstimator.transform_to_equal_length_trajectories(
edp.mdp_id,
edp.action_mask.cpu().numpy(),
edp.logged_rewards.cpu().numpy().flatten(),
edp.logged_propensities.cpu().numpy().flatten(),
edp.model_propensities.cpu().numpy(),
# pyre-ignore [16]: Optional type has no attribute `cpu`
edp.model_values.cpu().numpy(),
)
(
actions,
rewards,
logged_propensities,
target_propensities,
estimated_q_values,
) = (
torch.tensor(x, dtype=torch.double, device=device, requires_grad=True)
for x in eq_len
)
num_examples = logged_propensities.shape[0]
horizon = logged_propensities.shape[1]
log = []
for traj in range(num_examples):
log.append(
[
Transition(
last_state=State((traj, i)),
action=torch.argmax(actions[traj, i]).item(),
action_prob=logged_propensities[traj, i].item(),
state=State((traj, i + 1)),
reward=rewards[traj, i].item(),
)
for i in range(horizon - 1)
if actions[traj, i][torch.argmax(actions[traj, i]).item()] != 0.0
]
)
return RLEstimatorInput(
gamma=gamma,
log=log,
target_policy=SequentialOPEstimatorAdapter.EDPSeqPolicy(
actions.shape[2], target_propensities
),
value_function=SequentialOPEstimatorAdapter.EDPValueFunc(
estimated_q_values, target_propensities
),
ground_truth=None,
horizon=horizon,
)
@staticmethod
def estimator_results_to_cpe_estimate(
estimator_results: EstimatorResults,
) -> CpeEstimate:
scores = torch.tensor(
[r.estimated_reward for r in estimator_results.results], dtype=torch.double
)
log_scores = torch.tensor(
[r.log_reward for r in estimator_results.results], dtype=torch.double
)
dr_score = float(torch.mean(scores).item())
dr_score_std_error = bootstrapped_std_error_of_mean(scores)
log_score = float(torch.mean(log_scores).item())
if log_score < 1e-6:
logger.warning(
"Can't normalize SDR-CPE because of small"
f" or negative logged_policy_score ({log_score})."
f"Episode values: {log_scores}."
)
return CpeEstimate(
raw=dr_score,
normalized=0.0,
raw_std_error=dr_score_std_error,
normalized_std_error=0.0,
)
return CpeEstimate(
raw=dr_score,
normalized=dr_score / log_score,
raw_std_error=dr_score_std_error,
normalized_std_error=dr_score_std_error / log_score,
)
def estimate(self, edp: EvaluationDataPage) -> CpeEstimate:
estimator_results = self.seq_ope_estimator.evaluate(
SequentialOPEstimatorAdapter.edp_to_rl_input(edp, self.gamma, self._device)
)
assert isinstance(estimator_results, EstimatorResults)
return SequentialOPEstimatorAdapter.estimator_results_to_cpe_estimate(
estimator_results
)
class OPEvaluator(Evaluator):
def __init__(
self, action_names, gamma, model, metrics_to_score=None, device=None
) -> None:
super().__init__(action_names, gamma, model, metrics_to_score)
self._device = device
self.ope_dm_estimator = OPEstimatorAdapter(DMEstimator(device=self._device))
self.ope_ips_estimator = OPEstimatorAdapter(IPSEstimator(device=self._device))
self.ope_dr_estimator = OPEstimatorAdapter(
DoublyRobustEstimator(device=self._device)
)
self.ope_seq_dr_estimator = SequentialOPEstimatorAdapter(
SeqDREstimator(device=self._device), gamma, device=self._device
)
self.ope_seq_weighted_dr_estimator = SequentialOPEstimatorAdapter(
SeqDREstimator(weighted=True, device=self._device),
gamma,
device=self._device,
)
self.ope_seq_magic_estimator = SequentialOPEstimatorAdapter(
MAGICEstimator(device=self._device), gamma
)
def score_cpe(self, metric_name, edp: EvaluationDataPage):
logger.info("Using OPE adapter")
direct_method = self.ope_dm_estimator.estimate(edp)
inverse_propensity = self.ope_ips_estimator.estimate(edp)
doubly_robust = self.ope_dr_estimator.estimate(edp)
sequential_doubly_robust = self.ope_seq_dr_estimator.estimate(edp)
weighted_doubly_robust = self.ope_seq_weighted_dr_estimator.estimate(edp)
magic = self.ope_seq_magic_estimator.estimate(edp)
return CpeEstimateSet(
direct_method=direct_method,
inverse_propensity=inverse_propensity,
doubly_robust=doubly_robust,
sequential_doubly_robust=sequential_doubly_robust,
weighted_doubly_robust=weighted_doubly_robust,
magic=magic,
)
| 10,869 | 36.353952 | 96 | py |
ReAgent | ReAgent-master/reagent/evaluation/feature_importance/feature_importance_perturbation.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import copy
import logging
from collections import defaultdict
from typing import Callable, Any, Optional
import pandas as pd
import torch
import torch.nn as nn
from reagent.core.dataclasses import dataclass
from reagent.evaluation.feature_importance.feature_importance_base import (
FeatureImportanceBase,
)
logger = logging.getLogger(__name__)
@dataclass
class FeatureImportancePerturbation(FeatureImportanceBase):
data_loader: Any
# Consume model (first arg) and data (second arg) to make model predictions
# Expected to return a tensor of shape (batch_size, 1)
pred_fn: Callable[[nn.Module, Any], torch.Tensor]
# Perturb data (first arg) on a specific feature id (second arg)
perturb_fn: Callable[[Any, int], Any]
# How many rounds of perturbations for collecting feature importance for each batch
# The higher it is, the less variance the result will have
repeat: int = 1
def compute_feature_importance(self) -> pd.DataFrame:
feature_importance_vals = defaultdict(list)
for batch_idx, data in enumerate(self.data_loader):
for r in range(self.repeat):
pred_value = self.pred_fn(self.model, data)
for feature_idx, feature_id in enumerate(self.sorted_feature_ids):
copy_data = copy.deepcopy(data)
perturbed_data = self.perturb_fn(copy_data, feature_idx)
perturbed_pred_value = self.pred_fn(self.model, perturbed_data)
feature_importance_vals[feature_id].append(
torch.mean(
torch.abs(perturbed_pred_value - pred_value)
).detach()
)
logger.info(f"Processed {batch_idx} batches {r}-th time")
feature_importance_mean = {
k: torch.mean(torch.stack(v)).item()
for k, v in feature_importance_vals.items()
}
result_df = pd.DataFrame.from_dict(
feature_importance_mean, orient="index", columns=["feature_importance"]
).sort_values(by=["feature_importance"], ascending=False)
# Fblearner UI can't show row names (index). So manually add names as a column
result_df.insert(0, "feature_id", result_df.index)
return result_df
def create_default_perturb_fn(key: str):
def default_perturb_fn(
data,
feature_idx,
):
val_data, presence_data = data[key]
batch_size = val_data.shape[0]
random_idx = torch.randperm(batch_size)
val_data[:, feature_idx] = val_data[:, feature_idx][random_idx]
presence_data[:, feature_idx] = presence_data[:, feature_idx][random_idx]
return data
return default_perturb_fn
| 2,857 | 37.106667 | 87 | py |
ReAgent | ReAgent-master/reagent/evaluation/feature_importance/feature_importance_base.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import List
import pandas as pd
import torch.nn as nn
from reagent.core.dataclasses import dataclass
@dataclass
class FeatureImportanceBase:
model: nn.Module
sorted_feature_ids: List[int]
def compute_feature_importance(self) -> pd.DataFrame:
raise NotImplementedError()
| 401 | 22.647059 | 71 | py |
ReAgent | ReAgent-master/reagent/core/aggregators.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from collections import deque
from typing import Callable, Deque, Dict, List, Optional, Any
import numpy as np
import torch
from reagent.core.tensorboardX import SummaryWriterContext
from reagent.core.tracker import Aggregator
logger = logging.getLogger(__name__)
class TensorAggregator(Aggregator):
def __call__(self, key: str, values):
# Ensure that tensor is on cpu before aggregation.
values = torch.cat(values, dim=0).cpu()
return super().__call__(key, values)
def _log_histogram_and_mean(log_key, val):
try:
SummaryWriterContext.add_histogram(log_key, val)
SummaryWriterContext.add_scalar(f"{log_key}/mean", val.mean())
except ValueError:
logger.warning(
f"Cannot create histogram for key: {log_key}; "
"this is likely because you have NULL value in your input; "
f"value: {val}"
)
raise
class TensorBoardHistogramAndMeanAggregator(TensorAggregator):
def __init__(self, key: str, log_key: str):
super().__init__(key)
self.log_key = log_key
def aggregate(self, values):
assert len(values.shape) == 1 or (
len(values.shape) == 2 and values.shape[1] == 1
), f"Unexpected shape for {self.key}: {values.shape}"
_log_histogram_and_mean(self.log_key, values)
class TensorBoardActionHistogramAndMeanAggregator(TensorAggregator):
def __init__(
self,
key: str,
category: str,
title: str,
actions: List[str],
log_key_prefix: Optional[str] = None,
):
super().__init__(key)
self.log_key_prefix = log_key_prefix or f"{category}/{title}"
self.actions = actions
SummaryWriterContext.add_custom_scalars_multilinechart(
[f"{self.log_key_prefix}/{action_name}/mean" for action_name in actions],
category=category,
title=title,
)
def aggregate(self, values):
if not (len(values.shape) == 2 and values.shape[1] == len(self.actions)):
raise ValueError(
"Unexpected shape for {}: {}; actions: {}".format(
self.key, values.shape, self.actions
)
)
for i, action in enumerate(self.actions):
_log_histogram_and_mean(f"{self.log_key_prefix}/{action}", values[:, i])
class TensorBoardActionCountAggregator(TensorAggregator):
def __init__(self, key: str, title: str, actions: List[str]):
super().__init__(key)
self.log_key = f"actions/{title}"
self.actions = actions
SummaryWriterContext.add_custom_scalars_multilinechart(
[f"{self.log_key}/{action_name}" for action_name in actions],
category="actions",
title=title,
)
def aggregate(self, values):
for i, action in enumerate(self.actions):
SummaryWriterContext.add_scalar(
f"{self.log_key}/{action}", (values == i).sum().item()
)
class MeanAggregator(TensorAggregator):
def __init__(self, key: str):
super().__init__(key)
self.values: List[float] = []
def aggregate(self, values):
mean = values.mean().item()
logger.info(f"{self.key}: {mean}")
self.values.append(mean)
class ListAggregator(Aggregator):
def __init__(self, key: str):
super().__init__(key)
self.values: Optional[Any] = []
def aggregate(self, values):
self.values.extend(values)
class EpochListAggregator(TensorAggregator):
def __init__(self, key: str):
super().__init__(key)
self.values: List = []
self.epoch_values: List = []
def aggregate(self, values):
flattened = torch.flatten(values).tolist()
self.values.extend(flattened)
def flush(self):
if self.values:
self.epoch_values = self.values
self.values = []
class FunctionsByActionAggregator(TensorAggregator):
"""
Aggregating the input by action, using the given functions. The input is
assumed to be an `N x D` tensor, where each column is an action, and
each row is an example. This takes a dictionary of functions so that the
values only need to be concatenated once.
Example:
agg = FunctionByActionAggregator(
"model_values", ["A", "B], {"mean": torch.mean, "std": torch.std}
)
input = torch.tensor([
[0.9626, 0.7142],
[0.7216, 0.5426],
[0.4225, 0.9485],
])
agg(input)
input2 = torch.tensor([
[0.0103, 0.0306],
[0.9846, 0.8373],
[0.4614, 0.0174],
])
agg(input2)
print(agg.values)
{
"mean": {
"A": [0.7022, 0.4854],
"B": [0.7351, 0.2951],
},
"std": {
"A": [0.2706, 0.4876],
"B": [0.2038, 0.4696],
}
}
"""
def __init__(self, key: str, actions: List[str], fns: Dict[str, Callable]):
super().__init__(key)
self.actions = actions
self.values: Dict[str, Dict[str, List[float]]] = {
fn: {action: [] for action in self.actions} for fn in fns
}
self.fns = fns
def aggregate(self, values):
for name, func in self.fns.items():
aggregated_values = func(values, dim=0)
for action, value in zip(self.actions, aggregated_values):
value = value.item()
self.values[name][action].append(value)
latest_values = {
action: values[-1] for action, values in self.values[name].items()
}
logger.info(f"{name} {self.key} {latest_values}")
class ActionCountAggregator(TensorAggregator):
"""
Counting the frequency of each action. Actions are indexed from `0` to
`len(actions) - 1`. The input is assumed to contain action index.
"""
def __init__(self, key: str, actions: List[str]):
super().__init__(key)
self.actions = actions
self.values: Dict[str, List[int]] = {action: [] for action in actions}
def aggregate(self, values):
for i, action in enumerate(self.actions):
self.values[action].append((values == i).sum().item())
latest_count = {action: counts[-1] for action, counts in self.values.items()}
logger.info(f"{self.key} {latest_count}")
def get_distributions(self) -> Dict[str, List[float]]:
"""
Returns the action disributions in each aggregating step
"""
totals = np.array([sum(counts) for counts in zip(*self.values.values())])
return {
action: (np.array(counts) / totals).tolist()
for action, counts in self.values.items()
}
def get_cumulative_distributions(self) -> Dict[str, float]:
"""
Returns the cumulative distributions in each aggregating step
"""
totals = sum(sum(counts) for counts in zip(*self.values.values()))
return {action: sum(counts) / totals for action, counts in self.values.items()}
_RECENT_DEFAULT_SIZE = int(1e6)
class RecentValuesAggregator(TensorAggregator):
def __init__(self, key: str, size: int = _RECENT_DEFAULT_SIZE):
super().__init__(key)
self.values: Deque[float] = deque(maxlen=size)
def aggregate(self, values):
flattened = torch.flatten(values).tolist()
self.values.extend(flattened)
| 7,651 | 30.751037 | 87 | py |
ReAgent | ReAgent-master/reagent/core/tracker.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import functools
import logging
from typing import Dict, List, Type
import torch
logger = logging.getLogger(__name__)
class Observer:
"""
Base class for observers
"""
def __init__(self, observing_keys: List[str]):
super().__init__()
assert isinstance(observing_keys, list)
self.observing_keys = observing_keys
def get_observing_keys(self) -> List[str]:
return self.observing_keys
def update(self, key: str, value):
pass
class Aggregator:
def __init__(self, key: str):
super().__init__()
self.key = key
def __call__(self, key: str, values):
assert key == self.key, f"Got {key}; expected {self.key}"
self.aggregate(values)
def aggregate(self, values):
pass
def flush(self):
pass
class ObservableMixin:
def __init__(self):
super().__init__()
self._observers = {v: [] for v in self._observable_value_types}
@property
def _observable_value_types(self) -> Dict[str, Type]:
raise NotImplementedError
def add_observer(self, observer: Observer):
observing_keys = observer.get_observing_keys()
unknown_keys = [
k for k in observing_keys if k not in self._observable_value_types
]
if unknown_keys:
logger.warning(f"{unknown_keys} cannot be observed in {type(self)}")
for k in observing_keys:
if k in self._observers and observer not in self._observers[k]:
self._observers[k].append(observer)
return self
def add_observers(self, observers: List[Observer]):
for observer in observers:
self.add_observer(observer)
return self
def notify_observers(self, **kwargs):
for key, value in kwargs.items():
if value is None:
# Allow optional reporting
continue
assert key in self._observers, f"Unknown key: {key}"
# TODO: Create a generic framework for type conversion
if self._observable_value_types[key] == torch.Tensor:
try:
if not isinstance(value, torch.Tensor):
value = torch.tensor(value)
if len(value.shape) == 0:
value = value.reshape(1)
value = value.detach()
except Exception:
# Be lenient about conversion since ReporterBase
# has inaccurate type
pass
for observer in self._observers[key]:
observer.update(key, value)
def observable(cls=None, **kwargs): # noqa: C901
"""
Decorator to mark a class as producing observable values. The names of the
observable values are the names of keyword arguments. The values of keyword
arguments are the types of the value. The type is currently not used for
anything.
"""
assert kwargs
observable_value_types = kwargs
def wrap(cls):
assert not hasattr(cls, "add_observer")
assert not hasattr(cls, "notify_observers")
original_init = cls.__init__
@functools.wraps(original_init)
def new_init(self, *args, **kwargs):
original_init(self, *args, **kwargs)
assert not hasattr(self, "_observable_value_types")
assert not hasattr(self, "_observers")
self._observable_value_types = observable_value_types
self._observers = {v: [] for v in observable_value_types}
cls.__init__ = new_init
cls.add_observer = ObservableMixin.add_observer
cls.add_observers = ObservableMixin.add_observers
cls.notify_observers = ObservableMixin.notify_observers
return cls
if cls is None:
return wrap
return wrap(cls)
| 3,958 | 28.110294 | 80 | py |
ReAgent | ReAgent-master/reagent/core/utils.py | #!/usr/bin/env python3
from typing import Tuple, Optional
import torch
def get_rank() -> int:
"""
Returns the torch.distributed rank of the process. 0 represents
the main process and is the default if torch.distributed isn't set up
"""
return (
torch.distributed.get_rank()
if torch.distributed.is_available() and torch.distributed.is_initialized()
else 0
)
class lazy_property(object):
"""
More or less copy-pasta: http://stackoverflow.com/a/6849299
Meant to be used for lazy evaluation of an object attribute.
property should represent non-mutable data, as it replaces itself.
"""
def __init__(self, fget):
self._fget = fget
self.__doc__ = fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, obj_cls_type):
if obj is None:
return None
value = self._fget(obj)
setattr(obj, self.__name__, value)
return value
| 975 | 24.684211 | 82 | py |
ReAgent | ReAgent-master/reagent/core/configuration.py | #!/usr/bin/python3
import functools
from dataclasses import MISSING, Field, fields
from inspect import Parameter, isclass, signature
from typing import List, Optional, Type, Union
from reagent.core.dataclasses import dataclass
from torch import nn
BLOCKLIST_TYPES = [nn.Module]
def _get_param_annotation(p):
# if not annotated, infer type from default
if p.annotation == Parameter.empty and p.default == Parameter.empty:
raise ValueError(
f"Param {p}: both annotation and default are empty, "
"so cannot infer any useful annotation."
)
if p.annotation != Parameter.empty:
return p.annotation
# case on default types
if p.default is None:
raise ValueError(
f"Param {p}: default is None and annotation is empty, "
"cannot infer useful annotation"
)
if isinstance(p.default, tuple):
raise ValueError(f"Param {p}: default is tuple, cannot infer type")
if isinstance(p.default, dict):
raise ValueError(f"Param{p}: default is tuple, cannot infer type")
return type(p.default)
def make_config_class(
func,
allowlist: Optional[List[str]] = None,
blocklist: Optional[List[str]] = None,
blocklist_types: List[Type] = BLOCKLIST_TYPES,
):
"""
Create a decorator to create dataclass with the arguments of `func` as fields.
Only annotated arguments are converted to fields. If the default value is mutable,
you must use `dataclass.field(default_factory=default_factory)` as default.
In that case, the func has to be wrapped with @resolve_defaults below.
`allowlist` & `blocklist` are mutually exclusive.
"""
parameters = signature(func).parameters
assert (
allowlist is None or blocklist is None
), "allowlist & blocklist are mutually exclusive"
blocklist_set = set(blocklist or [])
def _is_type_blocklisted(t):
if getattr(t, "__origin__", None) is Union:
assert len(t.__args__) == 2 and t.__args__[1] == type(
None
), "Only Unions of [X, None] (a.k.a. Optional[X]) are supported"
t = t.__args__[0]
if hasattr(t, "__origin__"):
t = t.__origin__
assert isclass(t), f"{t} is not a class."
return any(issubclass(t, blocklist_type) for blocklist_type in blocklist_types)
def _is_valid_param(p):
if p.name in blocklist_set:
return False
if p.annotation == Parameter.empty and p.default == Parameter.empty:
return False
ptype = _get_param_annotation(p)
if _is_type_blocklisted(ptype):
return False
return True
allowlist = allowlist or [p.name for p in parameters.values() if _is_valid_param(p)]
def wrapper(config_cls):
# Add __annotations__ for dataclass
config_cls.__annotations__ = {
field_name: _get_param_annotation(parameters[field_name])
for field_name in allowlist
}
# Set default values
for field_name in allowlist:
default = parameters[field_name].default
if default != Parameter.empty:
setattr(config_cls, field_name, default)
# Add hashing to support hashing list and dict
config_cls.__hash__ = param_hash
# Add non-recursive asdict(). dataclasses.asdict() is recursive
def asdict(self):
return {field.name: getattr(self, field.name) for field in fields(self)}
config_cls.asdict = asdict
return dataclass(frozen=True)(config_cls)
return wrapper
def _resolve_default(val):
if not isinstance(val, Field):
return val
if val.default != MISSING:
return val.default
if val.default_factory != MISSING:
return val.default_factory()
raise ValueError("No default value")
def resolve_defaults(func):
"""
Use this decorator to resolve default field values in the constructor.
"""
func_params = list(signature(func).parameters.values())
@functools.wraps(func)
def wrapper(*args, **kwargs):
if len(args) > len(func_params):
raise ValueError(
f"There are {len(func_params)} parameters in total, "
f"but args is {len(args)} long. \n"
f"{args}"
)
# go through unprovided default kwargs
for p in func_params[len(args) :]:
# only resolve defaults for Fields
if isinstance(p.default, Field):
if p.name not in kwargs:
kwargs[p.name] = _resolve_default(p.default)
return func(*args, **kwargs)
return wrapper
def param_hash(p):
"""
Use this to make parameters hashable. This is required because __hash__()
is not inherited when subclass redefines __eq__(). We only need this when
the parameter dataclass has a list or dict field.
"""
return hash(tuple(_hash_field(getattr(p, f.name)) for f in fields(p)))
def _hash_field(val):
"""
Returns hashable value of the argument. A list is converted to a tuple.
A dict is converted to a tuple of sorted pairs of key and value.
"""
if isinstance(val, list):
return tuple(val)
elif isinstance(val, dict):
return tuple(sorted(val.items()))
else:
return val
| 5,364 | 31.515152 | 88 | py |
ReAgent | ReAgent-master/reagent/core/types.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import dataclasses
import logging
# The dataclasses in this file should be vanilla dataclass to have minimal overhead
from dataclasses import dataclass, field
from typing import Dict, List, NamedTuple, Optional, Tuple, Union
# Triggering registration to registries
import reagent.core.result_types # noqa
import torch
import torch.nn.functional as F
from reagent.core.base_dataclass import BaseDataClass
from reagent.core.configuration import param_hash
from reagent.core.dataclasses import dataclass as pydantic_dataclass
from reagent.core.fb_checker import IS_FB_ENVIRONMENT
from reagent.core.registry_meta import wrap_oss_with_dataclass
from reagent.core.tagged_union import TaggedUnion
from reagent.core.torch_utils import gather
from reagent.model_utils.seq2slate_utils import DECODER_START_SYMBOL, subsequent_mask
from reagent.preprocessing.types import InputColumn
if IS_FB_ENVIRONMENT:
import reagent.core.fb.fb_result_types # noqa
class NoDuplicatedWarningLogger:
def __init__(self, logger):
self.logger = logger
self.msg = set()
def warning(self, msg):
if msg not in self.msg:
self.logger.warning(msg)
self.msg.add(msg)
logger = logging.getLogger(__name__)
no_dup_logger = NoDuplicatedWarningLogger(logger)
def isinstance_namedtuple(x):
return isinstance(x, tuple) and hasattr(x, "_fields")
@dataclass
class TensorDataClass(BaseDataClass):
def __getattr__(self, attr):
if attr.startswith("__") and attr.endswith("__"):
raise AttributeError
tensor_attr = getattr(torch.Tensor, attr, None)
if tensor_attr is None or not callable(tensor_attr):
# TODO: can we get this working well with jupyter?
logger.error(
f"Attempting to call {self.__class__.__name__}.{attr} on "
f"{type(self)} (instance of TensorDataClass)."
)
if tensor_attr is None:
raise AttributeError(
f"{self.__class__.__name__}doesn't have {attr} attribute."
)
else:
raise RuntimeError(f"{self.__class__.__name__}.{attr} is not callable.")
def continuation(*args, **kwargs):
def f(v):
# if possible, returns v.attr(*args, **kwargs).
# otws, return v
if isinstance(v, (torch.Tensor, TensorDataClass)):
return getattr(v, attr)(*args, **kwargs)
elif isinstance(v, dict):
return {kk: f(vv) for kk, vv in v.items()}
elif isinstance(v, tuple):
return tuple(f(vv) for vv in v)
return v
return type(self)(**f(self.__dict__))
return continuation
def cuda(self, *args, **kwargs):
cuda_tensor = {}
for k, v in self.__dict__.items(): # noqa F402
if isinstance(v, torch.Tensor):
kwargs["non_blocking"] = kwargs.get("non_blocking", True)
cuda_tensor[k] = v.cuda(*args, **kwargs)
elif isinstance(v, TensorDataClass):
cuda_tensor[k] = v.cuda(*args, **kwargs)
else:
cuda_tensor[k] = v
return type(self)(**cuda_tensor)
def cpu(self):
cpu_tensor = {}
for k, v in self.__dict__.items(): # noqa F402
if isinstance(v, (torch.Tensor, TensorDataClass)):
cpu_tensor[k] = v.cpu()
else:
cpu_tensor[k] = v
return type(self)(**cpu_tensor)
# (offset, value)
IdListFeatureValue = Tuple[torch.Tensor, torch.Tensor]
# (offset, key, value)
IdScoreListFeatureValue = Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
# name -> value
IdListFeature = Dict[str, IdListFeatureValue]
IdScoreListFeature = Dict[str, IdScoreListFeatureValue]
# id -> value
ServingIdListFeature = Dict[int, IdListFeatureValue]
ServingIdScoreListFeature = Dict[int, IdScoreListFeatureValue]
#####
# FIXME: These config types are misplaced but we need to write FBL config adapter
# if we moved them.
######
@pydantic_dataclass
class IdListFeatureConfig(BaseDataClass):
name: str
# integer feature ID
feature_id: int
# name of the embedding table to use
id_mapping_name: str
@pydantic_dataclass
class IdScoreListFeatureConfig(BaseDataClass):
name: str
# integer feature ID
feature_id: int
# name of the embedding table to use
id_mapping_name: str
@pydantic_dataclass
class FloatFeatureInfo(BaseDataClass):
name: str
feature_id: int
@pydantic_dataclass
class ExplicitMapping(object):
__hash__ = param_hash
ids: List[int] = field(default_factory=list)
def __post_init_post_parse__(self):
"""
used in preprocessing
ids list represents mapping from idx -> value
we want the reverse: from feature to embedding table indices
"""
self._id2index: Dict[int, int] = {}
@property
def id2index(self) -> Dict[int, int]:
# pyre-fixme[16]: `IdMapping` has no attribute `_id2index`.
if not self._id2index:
self._id2index = {id: i for i, id in enumerate(self.ids)}
return self._id2index
@property
def table_size(self):
return len(self.ids)
@pydantic_dataclass
class ModuloMapping:
"""
Map IDs to [0, table_size) via modulo `table_size`
"""
table_size: int
@wrap_oss_with_dataclass
class IdMappingUnion(TaggedUnion):
explicit_mapping: Optional[ExplicitMapping] = None
modulo: Optional[ModuloMapping] = None
@pydantic_dataclass
class ModelFeatureConfig(BaseDataClass):
float_feature_infos: List[FloatFeatureInfo] = field(default_factory=list)
# table name -> id mapping
id_mapping_config: Dict[str, IdMappingUnion] = field(default_factory=dict)
# id_list_feature_configs is feature_id -> list of values
id_list_feature_configs: List[IdListFeatureConfig] = field(default_factory=list)
# id_score_list_feature_configs is feature_id -> (keys -> values)
id_score_list_feature_configs: List[IdScoreListFeatureConfig] = field(
default_factory=list
)
def __post_init_post_parse__(self):
both_lists = self.id_list_feature_configs + self.id_score_list_feature_configs
if not self.only_dense:
# sanity check for keys in mapping config
ids = [config.feature_id for config in both_lists]
names = [config.name for config in both_lists]
assert len(ids) == len(set(ids)), f"duplicates in ids: {ids}"
assert len(names) == len(set(names)), f"duplicates in names: {names}"
assert len(ids) == len(names), f"{len(ids)} != {len(names)}"
self._id2name = {config.feature_id: config.name for config in both_lists}
self._name2id = {config.name: config.feature_id for config in both_lists}
self._id2config = {config.feature_id: config for config in both_lists}
self._name2config = {config.name: config for config in both_lists}
@property
def only_dense(self):
return not (self.id_list_feature_configs or self.id_score_list_feature_configs)
@property
def id2name(self):
return self._id2name
@property
def name2id(self):
return self._name2id
@property
def id2config(self):
return self._id2config
@property
def name2config(self):
return self._name2config
######
# dataclasses for internal API
######
@dataclass
class ValuePresence(TensorDataClass):
value: torch.Tensor
presence: Optional[torch.Tensor]
@dataclass
class ActorOutput(TensorDataClass):
action: torch.Tensor
log_prob: Optional[torch.Tensor] = None
squashed_mean: Optional[torch.Tensor] = None
@dataclass
class DocList(TensorDataClass):
# the shape is (batch_size, num_candidates, num_document_features)
float_features: torch.Tensor
# the shapes below are (batch_size, num_candidates)
# mask indicates whether the candidate is present or not; its dtype is torch.bool
# pyre-fixme[8]: Attribute has type `Tensor`; used as `None`.
mask: torch.Tensor = None
# value is context dependent; it could be action probability or the score
# of the document from another model
# pyre-fixme[8]: Attribute has type `Tensor`; used as `None`.
value: torch.Tensor = None
def __post_init__(self):
assert (
len(self.float_features.shape) == 3
), f"Unexpected shape: {self.float_features.shape}"
if self.mask is None:
self.mask = self.float_features.new_ones(
self.float_features.shape[:2], dtype=torch.bool
)
if self.value is None:
self.value = self.float_features.new_ones(self.float_features.shape[:2])
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def select_slate(self, action: torch.Tensor):
row_idx = torch.repeat_interleave(
torch.arange(action.shape[0]).unsqueeze(1), action.shape[1], dim=1
)
mask = self.mask[row_idx, action]
float_features = self.float_features[row_idx, action]
value = self.value[row_idx, action]
return DocList(float_features, mask, value)
def as_feature_data(self):
_batch_size, _slate_size, feature_dim = self.float_features.shape
return FeatureData(self.float_features.view(-1, feature_dim))
@dataclass
class FeatureData(TensorDataClass):
# For dense features, shape is (batch_size, feature_dim)
float_features: torch.Tensor
id_list_features: IdListFeature = dataclasses.field(default_factory=dict)
id_score_list_features: IdScoreListFeature = dataclasses.field(default_factory=dict)
# For sequence, shape is (stack_size, batch_size, feature_dim)
stacked_float_features: Optional[torch.Tensor] = None
# For ranking algos,
candidate_docs: Optional[DocList] = None
# Experimental: sticking this here instead of putting it in float_features
# because a lot of places derive the shape of float_features from
# normalization parameters.
time_since_first: Optional[torch.Tensor] = None
def __post_init__(self):
def usage():
return (
"For sequence features, use `stacked_float_features`."
"For document features, use `candidate_doc_float_features`."
)
if self.float_features.ndim == 3:
no_dup_logger.warning(f"`float_features` should be 2D.\n{usage()}")
elif self.float_features.ndim != 2:
raise ValueError(
f"float_features should be 2D; got {self.float_features.shape}.\n{usage()}"
)
@property
def has_float_features_only(self) -> bool:
return (
not self.id_list_features
and self.time_since_first is None
and self.candidate_docs is None
)
def get_tiled_batch(self, num_tiles: int):
assert (
self.has_float_features_only
), f"only works for float features now: {self}"
"""
tiled_feature should be (batch_size * num_tiles, feature_dim)
forall i in [batch_size],
tiled_feature[i*num_tiles:(i+1)*num_tiles] should be feat[i]
"""
feat = self.float_features
assert (
len(feat.shape) == 2
), f"Need feat shape to be (batch_size, feature_dim), got {feat.shape}."
batch_size, _ = feat.shape
tiled_feat = feat.repeat_interleave(repeats=num_tiles, dim=0)
return FeatureData(float_features=tiled_feat)
def concat_user_doc(self):
assert not self.has_float_features_only, "only works when DocList present"
assert self.float_features.dim() == 2 # batch_size x state_dim
batch_size, state_dim = self.float_features.shape
# batch_size x num_docs x candidate_dim
assert self.candidate_docs.float_features.dim() == 3
assert len(self.candidate_docs.float_features) == batch_size
_, num_docs, candidate_dim = self.candidate_docs.float_features.shape
state_tiled = (
torch.repeat_interleave(self.float_features, num_docs, dim=0)
.reshape(batch_size, num_docs, state_dim)
.float()
)
return torch.cat((state_tiled, self.candidate_docs.float_features), dim=2)
def get_ranking_state(self, has_user_feat: bool):
if has_user_feat:
return self.concat_user_doc()
else:
# pyre-fixme[16]: `Optional` has no attribute `float_features`.
return self.candidate_docs.float_features.float()
def _embed_states(x: FeatureData) -> FeatureData:
"""
Get dense feature from float and doc features.
TODO: make this an embedder.
"""
assert x.candidate_docs is not None
def _concat_state_candidates(state: torch.Tensor, candidates: torch.Tensor):
"""
Expect
state.shape = (n, state_dim),
candidate.shape = (n, num_candidates, candidate_dim),
Result has shape (n, state_dim + candidate_dim)
[state, mean of candidates]
"""
n = state.shape[0]
assert len(state.shape) == 2, f"{state.shape} != (batch_size, user_dim)"
assert (
len(candidates.shape) == 3
), f"{candidates.shape} != (batch_size, num_candidates, candidate_dim)"
assert candidates.shape[0] == n, f"{candidates.shape} 0th dim != {n}"
# TODO: have an embedder here
# NOTE: mean aggregation is not very effective here
candidates_embedding = candidates.view(n, -1)
return torch.cat([state, candidates_embedding], dim=1)
return FeatureData(
float_features=_concat_state_candidates(
x.float_features,
x.candidate_docs.float_features,
)
)
class TensorFeatureData(torch.nn.Module):
"""
Primarily for using in nn.Sequential
"""
def forward(self, input: torch.Tensor) -> FeatureData:
assert isinstance(input, torch.Tensor)
return FeatureData(input)
class ServingFeatureData(NamedTuple):
float_features_with_presence: Tuple[torch.Tensor, torch.Tensor]
id_list_features: ServingIdListFeature
id_score_list_features: ServingIdScoreListFeature
@dataclass
class ExtraData(TensorDataClass):
mdp_id: Optional[torch.Tensor] = None
sequence_number: Optional[torch.Tensor] = None
action_probability: Optional[torch.Tensor] = None
max_num_actions: Optional[int] = None
metrics: Optional[torch.Tensor] = None
@classmethod
def from_dict(cls, d):
return cls(**{f.name: d.get(f.name, None) for f in dataclasses.fields(cls)})
@dataclass
class PreprocessedRankingInput(TensorDataClass):
state: FeatureData
src_seq: FeatureData
src_src_mask: Optional[torch.Tensor] = None
tgt_in_seq: Optional[FeatureData] = None
tgt_out_seq: Optional[FeatureData] = None
tgt_tgt_mask: Optional[torch.Tensor] = None
slate_reward: Optional[torch.Tensor] = None
position_reward: Optional[torch.Tensor] = None
# all indices will be +2 to account for padding
# symbol (0) and decoder_start_symbol (1)
src_in_idx: Optional[torch.Tensor] = None
tgt_in_idx: Optional[torch.Tensor] = None
tgt_out_idx: Optional[torch.Tensor] = None
tgt_out_probs: Optional[torch.Tensor] = None
# store ground-truth target sequences
optim_tgt_in_idx: Optional[torch.Tensor] = None
optim_tgt_out_idx: Optional[torch.Tensor] = None
optim_tgt_in_seq: Optional[FeatureData] = None
optim_tgt_out_seq: Optional[FeatureData] = None
extras: Optional[ExtraData] = field(default_factory=ExtraData)
def batch_size(self) -> int:
return self.state.float_features.size()[0]
def __len__(self) -> int:
return self.batch_size()
@classmethod
def from_input(
cls,
state: torch.Tensor,
candidates: torch.Tensor,
device: torch.device,
action: Optional[torch.Tensor] = None,
optimal_action: Optional[torch.Tensor] = None,
logged_propensities: Optional[torch.Tensor] = None,
slate_reward: Optional[torch.Tensor] = None,
position_reward: Optional[torch.Tensor] = None,
extras: Optional[ExtraData] = None,
):
"""
Build derived fields (indices & masks) from raw input
"""
# Shape checking
assert len(state.shape) == 2
assert len(candidates.shape) == 3
state = state.to(device)
candidates = candidates.to(device)
if action is not None:
assert len(action.shape) == 2
action = action.to(device)
if logged_propensities is not None:
assert (
len(logged_propensities.shape) == 2
and logged_propensities.shape[1] == 1
)
logged_propensities = logged_propensities.to(device)
batch_size, candidate_num, candidate_dim = candidates.shape
if slate_reward is not None:
assert len(slate_reward.shape) == 2 and slate_reward.shape[1] == 1
slate_reward = slate_reward.to(device)
if position_reward is not None:
# pyre-fixme[16]: `Optional` has no attribute `shape`.
assert position_reward.shape == action.shape
position_reward = position_reward.to(device)
src_in_idx = (
torch.arange(candidate_num, device=device).repeat(batch_size, 1) + 2
)
src_src_mask = (
(torch.ones(batch_size, candidate_num, candidate_num))
.type(torch.int8)
.to(device)
)
def process_tgt_seq(action):
if action is not None:
_, output_size = action.shape
# Account for decoder starting symbol and padding symbol
candidates_augment = torch.cat(
(
torch.zeros(batch_size, 2, candidate_dim, device=device),
candidates,
),
dim=1,
)
tgt_out_idx = action + 2
tgt_in_idx = torch.full(
(batch_size, output_size), DECODER_START_SYMBOL, device=device
)
tgt_in_idx[:, 1:] = tgt_out_idx[:, :-1]
tgt_out_seq = gather(candidates_augment, tgt_out_idx)
tgt_in_seq = torch.zeros(
batch_size, output_size, candidate_dim, device=device
)
tgt_in_seq[:, 1:] = tgt_out_seq[:, :-1]
tgt_tgt_mask = subsequent_mask(output_size, device)
else:
tgt_in_idx = None
tgt_out_idx = None
tgt_in_seq = None
tgt_out_seq = None
tgt_tgt_mask = None
return tgt_in_idx, tgt_out_idx, tgt_in_seq, tgt_out_seq, tgt_tgt_mask
(
tgt_in_idx,
tgt_out_idx,
tgt_in_seq,
tgt_out_seq,
tgt_tgt_mask,
) = process_tgt_seq(action)
(
optim_tgt_in_idx,
optim_tgt_out_idx,
optim_tgt_in_seq,
optim_tgt_out_seq,
_,
) = process_tgt_seq(optimal_action)
return cls.from_tensors(
state=state,
src_seq=candidates,
src_src_mask=src_src_mask,
tgt_in_seq=tgt_in_seq,
tgt_out_seq=tgt_out_seq,
tgt_tgt_mask=tgt_tgt_mask,
slate_reward=slate_reward,
position_reward=position_reward,
src_in_idx=src_in_idx,
tgt_in_idx=tgt_in_idx,
tgt_out_idx=tgt_out_idx,
tgt_out_probs=logged_propensities,
optim_tgt_in_idx=optim_tgt_in_idx,
optim_tgt_out_idx=optim_tgt_out_idx,
optim_tgt_in_seq=optim_tgt_in_seq,
optim_tgt_out_seq=optim_tgt_out_seq,
extras=extras,
)
@classmethod
def from_tensors(
cls,
state: torch.Tensor,
src_seq: torch.Tensor,
src_src_mask: Optional[torch.Tensor] = None,
tgt_in_seq: Optional[torch.Tensor] = None,
tgt_out_seq: Optional[torch.Tensor] = None,
tgt_tgt_mask: Optional[torch.Tensor] = None,
slate_reward: Optional[torch.Tensor] = None,
position_reward: Optional[torch.Tensor] = None,
src_in_idx: Optional[torch.Tensor] = None,
tgt_in_idx: Optional[torch.Tensor] = None,
tgt_out_idx: Optional[torch.Tensor] = None,
tgt_out_probs: Optional[torch.Tensor] = None,
optim_tgt_in_idx: Optional[torch.Tensor] = None,
optim_tgt_out_idx: Optional[torch.Tensor] = None,
optim_tgt_in_seq: Optional[torch.Tensor] = None,
optim_tgt_out_seq: Optional[torch.Tensor] = None,
extras: Optional[ExtraData] = None,
**kwargs,
):
assert isinstance(state, torch.Tensor)
assert isinstance(src_seq, torch.Tensor)
assert src_src_mask is None or isinstance(src_src_mask, torch.Tensor)
assert tgt_in_seq is None or isinstance(tgt_in_seq, torch.Tensor)
assert tgt_out_seq is None or isinstance(tgt_out_seq, torch.Tensor)
assert tgt_tgt_mask is None or isinstance(tgt_tgt_mask, torch.Tensor)
assert slate_reward is None or isinstance(slate_reward, torch.Tensor)
assert position_reward is None or isinstance(position_reward, torch.Tensor)
assert src_in_idx is None or isinstance(src_in_idx, torch.Tensor)
assert tgt_in_idx is None or isinstance(tgt_in_idx, torch.Tensor)
assert tgt_out_idx is None or isinstance(tgt_out_idx, torch.Tensor)
assert tgt_out_probs is None or isinstance(tgt_out_probs, torch.Tensor)
assert optim_tgt_out_idx is None or isinstance(optim_tgt_out_idx, torch.Tensor)
assert optim_tgt_out_idx is None or isinstance(optim_tgt_out_idx, torch.Tensor)
assert optim_tgt_in_seq is None or isinstance(optim_tgt_in_seq, torch.Tensor)
assert optim_tgt_out_seq is None or isinstance(optim_tgt_out_seq, torch.Tensor)
assert extras is None or isinstance(extras, ExtraData)
return cls(
state=FeatureData(float_features=state),
src_seq=FeatureData(float_features=src_seq),
src_src_mask=src_src_mask,
tgt_in_seq=FeatureData(float_features=tgt_in_seq)
if tgt_in_seq is not None
else None,
tgt_out_seq=FeatureData(float_features=tgt_out_seq)
if tgt_out_seq is not None
else None,
tgt_tgt_mask=tgt_tgt_mask,
slate_reward=slate_reward,
position_reward=position_reward,
src_in_idx=src_in_idx,
tgt_in_idx=tgt_in_idx,
tgt_out_idx=tgt_out_idx,
tgt_out_probs=tgt_out_probs,
optim_tgt_in_idx=optim_tgt_in_idx,
optim_tgt_out_idx=optim_tgt_out_idx,
optim_tgt_in_seq=FeatureData(float_features=optim_tgt_in_seq)
if optim_tgt_in_seq is not None
else None,
optim_tgt_out_seq=FeatureData(float_features=optim_tgt_out_seq)
if optim_tgt_out_seq is not None
else None,
extras=extras if extras is not None else None,
)
def __post_init__(self):
if (
isinstance(self.state, torch.Tensor)
or isinstance(self.src_seq, torch.Tensor)
or isinstance(self.tgt_in_seq, torch.Tensor)
or isinstance(self.tgt_out_seq, torch.Tensor)
or isinstance(self.optim_tgt_in_seq, torch.Tensor)
or isinstance(self.optim_tgt_out_seq, torch.Tensor)
):
raise ValueError(
f"Use from_tensors() {type(self.state)} {type(self.src_seq)} "
f"{type(self.tgt_in_seq)} {type(self.tgt_out_seq)} "
f"{type(self.optim_tgt_in_seq)} {type(self.optim_tgt_out_seq)} "
)
@dataclass
class BaseInput(TensorDataClass):
"""
Base class for all inputs, both raw and preprocessed
"""
state: FeatureData
next_state: FeatureData
reward: torch.Tensor
time_diff: torch.Tensor
step: Optional[torch.Tensor]
not_terminal: torch.Tensor
def __len__(self):
return self.state.float_features.size()[0]
def batch_size(self):
return len(self)
def as_dict_shallow(self):
return {
"state": self.state,
"next_state": self.next_state,
"reward": self.reward,
"time_diff": self.time_diff,
"step": self.step,
"not_terminal": self.not_terminal,
}
@staticmethod
def from_dict(batch):
id_list_features = batch.get(InputColumn.STATE_ID_LIST_FEATURES, None) or {}
id_score_list_features = (
batch.get(InputColumn.STATE_ID_SCORE_LIST_FEATURES, None) or {}
)
next_id_list_features = (
batch.get(InputColumn.NEXT_STATE_ID_LIST_FEATURES, None) or {}
)
next_id_score_list_features = (
batch.get(InputColumn.NEXT_STATE_ID_SCORE_LIST_FEATURES, None) or {}
)
# TODO: handle value/mask of DocList
filler_mask_val = None
doc_list = None
candidate_features = batch.get(InputColumn.CANDIDATE_FEATURES, None)
if candidate_features is not None:
filler_mask_val = torch.zeros(
(candidate_features.shape[0], candidate_features.shape[1])
)
doc_list = DocList(
float_features=candidate_features,
mask=filler_mask_val.clone().bool(),
value=filler_mask_val.clone().float(),
)
next_doc_list = None
next_candidate_features = batch.get(InputColumn.NEXT_CANDIDATE_FEATURES, None)
if next_candidate_features is not None:
assert filler_mask_val is not None
next_doc_list = DocList(
float_features=next_candidate_features,
mask=filler_mask_val.clone().bool(),
value=filler_mask_val.clone().float(),
)
return BaseInput(
state=FeatureData(
float_features=batch[InputColumn.STATE_FEATURES],
id_list_features=id_list_features,
id_score_list_features=id_score_list_features,
candidate_docs=doc_list,
),
next_state=FeatureData(
float_features=batch[InputColumn.NEXT_STATE_FEATURES],
id_list_features=next_id_list_features,
id_score_list_features=next_id_score_list_features,
candidate_docs=next_doc_list,
),
reward=batch[InputColumn.REWARD],
time_diff=batch[InputColumn.TIME_DIFF],
step=batch.get(InputColumn.STEP, None),
not_terminal=batch[InputColumn.NOT_TERMINAL],
)
@dataclass
class DiscreteDqnInput(BaseInput):
"""
See input_prototype for DQN expected input shapes
"""
action: torch.Tensor
next_action: torch.Tensor
possible_actions_mask: torch.Tensor
possible_next_actions_mask: torch.Tensor
extras: ExtraData
@classmethod
def input_prototype(cls, action_dim=2, batch_size=10, state_dim=3):
return cls(
state=FeatureData(float_features=torch.randn(batch_size, state_dim)),
next_state=FeatureData(float_features=torch.randn(batch_size, state_dim)),
reward=torch.rand(batch_size, 1),
time_diff=torch.ones(batch_size, 1),
step=torch.ones(batch_size, 1),
not_terminal=torch.ones(batch_size, 1),
action=F.one_hot(
torch.randint(high=action_dim, size=(batch_size,)),
num_classes=action_dim,
),
next_action=F.one_hot(
torch.randint(high=action_dim, size=(batch_size,)),
num_classes=action_dim,
),
possible_actions_mask=torch.ones(batch_size, action_dim),
possible_next_actions_mask=torch.ones(batch_size, action_dim),
extras=ExtraData(action_probability=torch.ones(batch_size, 1)),
)
@classmethod
def from_dict(cls, batch):
base = super().from_dict(batch)
return cls(
action=batch[InputColumn.ACTION],
next_action=batch[InputColumn.NEXT_ACTION],
possible_actions_mask=batch[InputColumn.POSSIBLE_ACTIONS_MASK],
possible_next_actions_mask=batch[InputColumn.POSSIBLE_NEXT_ACTIONS_MASK],
extras=ExtraData.from_dict(batch),
**base.as_dict_shallow(),
)
@dataclass
class SlateQInput(BaseInput):
"""
The shapes of `reward`, `reward_mask`, & `next_item_mask` are
`(batch_size, slate_size)`.
`reward_mask` indicated whether the reward could be observed, e.g.,
the item got into viewport or not.
"""
action: torch.Tensor
next_action: torch.Tensor
reward_mask: torch.Tensor
extras: Optional[ExtraData] = None
@classmethod
def from_dict(cls, d):
action = d["action"]
next_action = d["next_action"]
return cls(
state=FeatureData(
float_features=d["state_features"],
candidate_docs=DocList(
float_features=d["candidate_features"],
mask=d["item_mask"],
value=d["item_probability"],
),
),
next_state=FeatureData(
float_features=d["next_state_features"],
candidate_docs=DocList(
float_features=d["next_candidate_features"],
mask=d["next_item_mask"],
value=d["next_item_probability"],
),
),
action=action,
next_action=next_action,
reward=d["position_reward"],
reward_mask=d["reward_mask"],
time_diff=d["time_diff"],
not_terminal=d["not_terminal"],
step=None,
extras=ExtraData.from_dict(d),
)
@dataclass
class ParametricDqnInput(BaseInput):
action: FeatureData
next_action: FeatureData
possible_actions: FeatureData
possible_actions_mask: torch.Tensor
possible_next_actions: FeatureData
possible_next_actions_mask: torch.Tensor
extras: Optional[ExtraData] = None
@classmethod
def from_dict(cls, batch):
return cls(
state=FeatureData(float_features=batch["state_features"]),
action=FeatureData(float_features=batch["action"]),
next_state=FeatureData(float_features=batch["next_state_features"]),
next_action=FeatureData(float_features=batch["next_action"]),
possible_actions=FeatureData(float_features=batch["possible_actions"]),
possible_actions_mask=batch["possible_actions_mask"],
possible_next_actions=FeatureData(
float_features=batch["possible_next_actions"]
),
possible_next_actions_mask=batch["possible_next_actions_mask"],
reward=batch["reward"],
not_terminal=batch["not_terminal"],
time_diff=batch["time_diff"],
step=batch["step"],
extras=batch["extras"],
)
@dataclass
class PolicyNetworkInput(BaseInput):
action: FeatureData
next_action: FeatureData
extras: Optional[ExtraData] = None
@classmethod
def from_dict(cls, batch):
base = super().from_dict(batch)
# TODO: Implement ExtraData.from_dict
extras = batch.get("extras", None)
return cls(
action=FeatureData(float_features=batch["action"]),
next_action=FeatureData(float_features=batch["next_action"]),
extras=extras,
**base.as_dict_shallow(),
)
@dataclass
class PolicyGradientInput(TensorDataClass):
"""
See input_prototype for expected input dimensions
"""
state: FeatureData
action: torch.Tensor
reward: torch.Tensor
log_prob: torch.Tensor
possible_actions_mask: Optional[torch.Tensor] = None
@classmethod
def input_prototype(cls, action_dim=2, batch_size=10, state_dim=3):
return cls(
state=FeatureData(float_features=torch.randn(batch_size, state_dim)),
action=F.one_hot(
torch.randint(high=action_dim, size=(batch_size,)),
num_classes=action_dim,
),
reward=torch.rand(batch_size),
log_prob=torch.log(torch.rand(batch_size)),
possible_actions_mask=torch.ones(batch_size, action_dim),
)
@classmethod
def from_dict(cls, d: Dict[str, torch.Tensor]):
# TODO: rename "observation" to "state" in Transition and return cls(**d)
return cls(
state=FeatureData(float_features=d["observation"]),
action=d["action"],
reward=d["reward"],
log_prob=d["log_prob"],
possible_actions_mask=d.get("possible_actions_mask", None),
)
def __len__(self):
return len(self.action)
@dataclass
class BanditRewardModelInput(TensorDataClass):
state: FeatureData
action: torch.Tensor
reward: torch.Tensor
action_prob: Optional[torch.Tensor] = None
@classmethod
def from_dict(cls, batch: Dict[str, torch.Tensor]):
return cls(
state=FeatureData(float_features=batch["state_features"]),
action=batch["action"],
reward=batch["reward"],
action_prob=batch.get("action_probability", None),
)
@dataclass
class MemoryNetworkInput(BaseInput):
action: torch.Tensor
valid_step: Optional[torch.Tensor] = None
extras: ExtraData = field(default_factory=ExtraData)
@classmethod
def from_dict(cls, d):
return cls(
state=FeatureData(
float_features=d["state"],
),
next_state=FeatureData(
float_features=d["next_state"],
),
action=d["action"],
reward=d["reward"],
time_diff=d["time_diff"],
not_terminal=d["not_terminal"],
step=d["step"],
extras=ExtraData.from_dict(d),
)
def __len__(self):
if len(self.state.float_features.size()) == 2:
return self.state.float_features.size()[0]
elif len(self.state.float_features.size()) == 3:
return self.state.float_features.size()[1]
else:
raise NotImplementedError()
@dataclass
class PreprocessedTrainingBatch(TensorDataClass):
training_input: Union[PreprocessedRankingInput]
# TODO: deplicate this and move into individual ones.
extras: ExtraData = field(default_factory=ExtraData)
def batch_size(self):
return self.training_input.state.float_features.size()[0]
@dataclass
class SlateScoreBatch:
mdp_id: torch.Tensor
sequence_number: torch.Tensor
scores: torch.Tensor
training_input: PolicyGradientInput
@dataclass
class MemoryNetworkOutput(TensorDataClass):
mus: torch.Tensor
sigmas: torch.Tensor
logpi: torch.Tensor
reward: torch.Tensor
not_terminal: torch.Tensor
last_step_lstm_hidden: torch.Tensor
last_step_lstm_cell: torch.Tensor
all_steps_lstm_hidden: torch.Tensor
@dataclass
class Seq2RewardOutput(TensorDataClass):
acc_reward: torch.Tensor
@dataclass
class DqnPolicyActionSet(TensorDataClass):
greedy: int
softmax: Optional[int] = None
greedy_act_name: Optional[str] = None
softmax_act_name: Optional[str] = None
softmax_act_prob: Optional[float] = None
@dataclass
class PlanningPolicyOutput(TensorDataClass):
# best action to take next
next_best_continuous_action: Optional[torch.Tensor] = None
next_best_discrete_action_one_hot: Optional[torch.Tensor] = None
next_best_discrete_action_idx: Optional[int] = None
@dataclass
class RankingOutput(TensorDataClass):
# a tensor of integer indices w.r.t. to possible candidates
# the values are offset by 2 to account for padding and decoder-starter symbol
# shape: batch_size, tgt_seq_len
# e.g., there are candidates C0, C1, C2, C3, C4, and the ranked order is
# C4, C1, C2, C3, C0. Then the ranked_tgt_out_idx = [6, 3, 4, 5, 2]
ranked_tgt_out_idx: Optional[torch.Tensor] = None
# generative probability of ranked tgt sequences at each decoding step
# shape: batch_size, tgt_seq_len, candidate_size
ranked_per_symbol_probs: Optional[torch.Tensor] = None
# generative probability of ranked tgt sequences
# shape: batch_size, 1
ranked_per_seq_probs: Optional[torch.Tensor] = None
# log probabilities of given tgt sequences are used in REINFORCE
# shape: batch_size, 1 if Seq2SlateMode == PER_SEQ_LOG_PROB_MODE
# shape: batch_size, tgt_seq_len if Seq2SlateMode == PER_SYMBOL_LOG_PROB_DIST_MODE
log_probs: Optional[torch.Tensor] = None
# encoder scores in tgt_out_idx order
encoder_scores: Optional[torch.Tensor] = None
@dataclass
class RewardNetworkOutput(TensorDataClass):
predicted_reward: torch.Tensor
@dataclass
class FrechetSortConfig:
shape: float
equiv_len: int
topk: Optional[int] = None
log_scores: bool = True
| 37,736 | 34.136872 | 91 | py |
ReAgent | ReAgent-master/reagent/core/torch_utils.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from io import BytesIO
from typing import Dict
import numpy as np
import torch
def dict_to_tensor(batch: Dict[str, np.ndarray], device: str = "cpu"):
return {k: torch.tensor(v).to(device) for k, v in batch.items()}
def rescale_torch_tensor(
tensor: torch.Tensor,
new_min: torch.Tensor,
new_max: torch.Tensor,
prev_min: torch.Tensor,
prev_max: torch.Tensor,
):
"""
Rescale column values in N X M torch tensor to be in new range.
Each column m in input tensor will be rescaled from range
[prev_min[m], prev_max[m]] to [new_min[m], new_max[m]]
"""
assert tensor.shape[1] == new_min.shape[1] == new_max.shape[1]
assert tensor.shape[1] == prev_min.shape[1] == prev_max.shape[1]
prev_range = prev_max - prev_min
new_range = new_max - new_min
return ((tensor - prev_min) / prev_range) * new_range + new_min
def stack(mems):
"""
Stack a list of tensors
Could use torch.stack here but torch.stack is much slower
than torch.cat + view
Submitted an issue for investigation:
https://github.com/pytorch/pytorch/issues/22462
FIXME: Remove this function after the issue above is resolved
"""
shape = (-1, *mems[0].shape)
return torch.cat(mems).view(*shape)
def export_module_to_buffer(module) -> BytesIO:
# traced_script_module = torch.jit.trace(module, module.input_prototype())
write_buffer = BytesIO()
torch.jit.save(module, write_buffer)
return write_buffer
def softmax(x, temperature):
"""Compute softmax values for each sets of scores in x."""
x = x / temperature
return torch.nn.functional.softmax(x, dim=1)
def masked_softmax(x, mask, temperature):
"""Compute softmax values for each sets of scores in x."""
x = x / temperature
mask_min_x = x - ((1.0 - mask) * 1e20)
mask_min_x -= torch.max(mask_min_x, dim=1, keepdim=True)[0]
e_x = torch.exp(mask_min_x)
e_x *= mask
out = e_x / e_x.sum(dim=1, keepdim=True)
# Set NaN values to 0 (NaN happens when a full mask row is passed in)
out[out != out] = 0
return out
def gather(data, index_2d):
"""
Gather data alongs the second dim. Assume data is 3d with shape (batch_size, dim1, dim2),
and index_2d's shape is (batch_size, dim1).
output[i][j] = data[i][index_2d[i][j]]
This function does not require data, output, or index_2d having the same shape, which
is mandated by torch.gather.
"""
batch_size = data.shape[0]
data_dim = data.shape[2]
index_len = index_2d.shape[1]
device = data.device
res = data[
torch.arange(batch_size, device=device).repeat_interleave(
# index_len has to be moved to the device explicitly, otherwise
# error will throw during jit.trace
torch.tensor([index_len], device=device)
),
index_2d.flatten(),
].view(batch_size, index_len, data_dim)
return res
def get_device(model):
return next(model.parameters()).device
| 3,085 | 29.86 | 93 | py |
ReAgent | ReAgent-master/reagent/core/parameters.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import enum
from typing import Dict, List, Optional
from reagent.core.base_dataclass import BaseDataClass
from reagent.core.configuration import param_hash
from reagent.core.dataclasses import dataclass, field
from reagent.core.parameters_seq2slate import (
IPSClamp,
LearningMethod,
SimulationParameters,
)
# For TD3 and SAC: actions are normalized in this range for training and
# rescaled back to action_space.low/high at serving time.
CONTINUOUS_TRAINING_ACTION_RANGE = (-1.0, 1.0)
class ProblemDomain(enum.Enum):
CONTINUOUS_ACTION = "continuous_action"
DISCRETE_ACTION = "discrete_action"
PARAMETRIC_ACTION = "parametric_action"
# I don't think the data generated for these 2 types are generic
SEQ_TO_REWARD = "seq2reward"
MDN_RNN = "mdn_rnn"
class SlateOptMethod(enum.Enum):
GREEDY = "greedy"
TOP_K = "top_k"
EXACT = "exact"
@dataclass(frozen=True)
class SlateOptParameters(BaseDataClass):
__hash__ = param_hash
method: SlateOptMethod = SlateOptMethod.TOP_K
@dataclass(frozen=True)
class RLParameters(BaseDataClass):
__hash__ = param_hash
gamma: float = 0.9
epsilon: float = 0.1
target_update_rate: float = 0.001
maxq_learning: bool = True
reward_boost: Optional[Dict[str, float]] = None
temperature: float = 0.01
softmax_policy: bool = False
use_seq_num_diff_as_time_diff: bool = False
q_network_loss: str = "mse"
set_missing_value_to_zero: bool = False
tensorboard_logging_freq: int = 0
predictor_atol_check: float = 0.0
predictor_rtol_check: float = 5e-5
time_diff_unit_length: float = 1.0
multi_steps: Optional[int] = None
# for pytorch discrete model, specify the max number of prediction change
# allowed during conversions between model frameworks in ratio
ratio_different_predictions_tolerance: float = 0
@dataclass(frozen=True)
class MDNRNNTrainerParameters(BaseDataClass):
__hash__ = param_hash
hidden_size: int = 64
num_hidden_layers: int = 2
learning_rate: float = 0.001
num_gaussians: int = 5
# weight in calculating world-model loss
reward_loss_weight: float = 1.0
next_state_loss_weight: float = 1.0
not_terminal_loss_weight: float = 1.0
fit_only_one_next_step: bool = False
action_dim: int = 2
action_names: Optional[List[str]] = None
multi_steps: int = 1
@dataclass(frozen=True)
class Seq2RewardTrainerParameters(BaseDataClass):
__hash__ = param_hash
learning_rate: float = 0.001
multi_steps: int = 1
action_names: List[str] = field(default_factory=lambda: [])
compress_model_learning_rate: float = 0.001
gamma: float = 1.0
view_q_value: bool = False
step_predict_net_size: int = 64
reward_boost: Optional[Dict[str, float]] = None
@dataclass(frozen=True)
class CEMTrainerParameters(BaseDataClass):
__hash__ = param_hash
plan_horizon_length: int = 0
num_world_models: int = 0
cem_population_size: int = 0
cem_num_iterations: int = 0
ensemble_population_size: int = 0
num_elites: int = 0
mdnrnn: MDNRNNTrainerParameters = MDNRNNTrainerParameters()
rl: RLParameters = RLParameters()
alpha: float = 0.25
epsilon: float = 0.001
@dataclass(frozen=True)
class EvaluationParameters(BaseDataClass):
calc_cpe_in_training: bool = True
@dataclass(frozen=True)
class EvolutionParameters(BaseDataClass):
population_size: int = 1000
mutation_power: float = 0.1
learning_rate: float = 0.01
@dataclass(frozen=True)
class StateFeatureParameters(BaseDataClass):
__hash__ = param_hash
state_feature_names_override: List[str] = field(default_factory=lambda: [])
state_feature_hashes_override: List[int] = field(default_factory=lambda: [])
@dataclass(frozen=True)
class NormalizationParameters(BaseDataClass):
__hash__ = param_hash
feature_type: str
boxcox_lambda: Optional[float] = None
boxcox_shift: Optional[float] = None
mean: Optional[float] = None
stddev: Optional[float] = None
possible_values: Optional[List[int]] = None # Assume present for ENUM type
quantiles: Optional[
List[float]
] = None # Assume present for QUANTILE type and sorted
min_value: Optional[float] = None
max_value: Optional[float] = None
class NormalizationKey(object):
"""Keys for dictionaries of NormalizationData"""
STATE = "state"
ACTION = "action"
ITEM = "item"
CANDIDATE = "candidate"
@dataclass(frozen=True)
class NormalizationData(BaseDataClass):
__hash__ = param_hash
dense_normalization_parameters: Dict[int, NormalizationParameters]
@dataclass(frozen=True)
class ConvNetParameters(BaseDataClass):
conv_dims: List[int]
conv_height_kernels: List[int]
pool_types: List[str]
pool_kernel_sizes: List[int]
conv_width_kernels: Optional[List[int]] = None
#################################################
# RL Ranking parameters #
#################################################
@dataclass(frozen=True)
class TransformerParameters(BaseDataClass):
num_heads: int = 1
dim_model: int = 64
dim_feedforward: int = 32
num_stacked_layers: int = 2
state_embed_dim: Optional[int] = None
@dataclass(frozen=True)
class GRUParameters(BaseDataClass):
dim_model: int
num_stacked_layers: int
@dataclass(frozen=True)
class BaselineParameters(BaseDataClass):
dim_feedforward: int
num_stacked_layers: int
warmup_num_batches: int = 0
@dataclass(frozen=True)
class Seq2SlateParameters(BaseDataClass):
on_policy: bool = True
learning_method: LearningMethod = LearningMethod.REINFORCEMENT_LEARNING
ips_clamp: Optional[IPSClamp] = None
simulation: Optional[SimulationParameters] = None
@dataclass(frozen=True)
class RankingParameters(BaseDataClass):
max_src_seq_len: int = 0
max_tgt_seq_len: int = 0
greedy_serving: bool = False
| 6,022 | 26.884259 | 80 | py |
ReAgent | ReAgent-master/reagent/core/oss_tensorboard_logger.py | from typing import Optional, Union, Dict, List, Tuple
import torch
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.utilities import rank_zero_only
class LocalCacheLogger:
@staticmethod
def store_metrics(
tb_logger,
metrics: Dict[
str, Union[float, torch.Tensor, Dict[str, Union[float, torch.Tensor]]]
],
step: Optional[int] = None,
):
for plot_name, plot_value_or_dict in metrics.items():
if isinstance(plot_value_or_dict, dict):
if plot_name not in tb_logger.line_plot_buffer:
tb_logger.line_plot_buffer[plot_name] = {}
for line_name, plot_value in plot_value_or_dict.items():
LocalCacheLogger._add_point(
tb_logger, plot_name, line_name, plot_value, step
)
else:
LocalCacheLogger._add_point(
tb_logger, plot_name, "", plot_value_or_dict, step
)
@staticmethod
def _add_point(
tb_logger,
plot_name: str,
line_name: str,
plot_value: Union[float, torch.Tensor],
step: Optional[int],
):
"""Adds a point to a multi-line plot given the plot name, the line name, and optionally the step (x coordinate)."""
if isinstance(plot_value, torch.Tensor):
plot_value = plot_value.item()
if step is None:
if (
plot_name in tb_logger.line_plot_buffer
and line_name in tb_logger.line_plot_buffer[plot_name]
):
x = tb_logger.line_plot_buffer[plot_name][line_name][-1][0] + 1.0
else:
x = 0.0
else:
x = float(step)
LocalCacheLogger._create_plots_and_append(
tb_logger.line_plot_buffer, plot_name, line_name, x, plot_value
)
if len(tb_logger.line_plot_buffer[plot_name][line_name]) >= 50:
mean = float(
torch.mean(
torch.FloatTensor(
[
float(p[1])
for p in tb_logger.line_plot_buffer[plot_name][line_name]
]
)
).item()
)
LocalCacheLogger._create_plots_and_append(
tb_logger.line_plot_aggregated, plot_name, line_name, x, mean
)
tb_logger.line_plot_buffer[plot_name][line_name].clear()
@staticmethod
def _create_plots_and_append(
plot_store: Dict[str, Dict[str, List[Tuple[float, float]]]],
plot_name: str,
line_name: str,
x: int,
y: float,
):
if plot_name in plot_store and line_name in plot_store[plot_name]:
plot_store[plot_name][line_name].append((x, y))
elif plot_name in plot_store:
plot_store[plot_name][line_name] = [(x, y)]
else:
plot_store[plot_name] = {line_name: [(x, y)]}
class OssTensorboardLogger(TensorBoardLogger):
"""Wrapper around ManifoldTensorBoardLogger that collects the plot data in memory and can flush to create fblearner plot objects."""
def __init__(
self,
save_dir: str,
name: Optional[str] = "default",
version: Optional[Union[int, str]] = None,
log_graph: bool = False,
default_hp_metric: bool = True,
prefix: str = "",
**kwargs
):
super().__init__(
save_dir,
name,
version,
log_graph,
default_hp_metric,
prefix,
**kwargs,
)
self.line_plot_aggregated: Dict[str, Dict[str, List[Tuple[float, float]]]] = {}
self.line_plot_buffer: Dict[str, Dict[str, List[Tuple[float, float]]]] = {}
@rank_zero_only
def log_metrics(
self,
metrics: Dict[
str, Union[float, torch.Tensor, Dict[str, Union[float, torch.Tensor]]]
],
step: Optional[int] = None,
) -> None:
"""Log a set of metrics. A metric is either a scalar or a set of scalars that will be plotted together"""
super().log_metrics(metrics, step)
LocalCacheLogger.store_metrics(self, metrics, step)
def clear_local_data(self):
# We don't call clear here because it's a lot of data and someone else probably owns it
self.line_plot_aggregated = {}
self.line_plot_buffer = {}
| 4,531 | 33.861538 | 136 | py |
ReAgent | ReAgent-master/reagent/core/tensorboardX.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""
Context library to allow dropping tensorboardX anywhere in the codebase.
If there is no SummaryWriter in the context, function calls will be no-op.
Usage:
writer = SummaryWriter()
with summary_writer_context(writer):
some_func()
def some_func():
SummaryWriterContext.add_scalar("foo", tensor)
"""
import contextlib
import logging
from typing import Any, Dict, List
from torch.utils.tensorboard import SummaryWriter
logger = logging.getLogger(__name__)
class SummaryWriterContextMeta(type):
def __getattr__(cls, func):
if func.startswith("__"):
return super().__getattr__(func)
if not cls._writer_stacks:
def noop(*args, **kwargs):
return
return noop
writer = cls._writer_stacks[-1]
def call(*args, **kwargs):
if "global_step" not in kwargs:
kwargs["global_step"] = cls._global_step
try:
return getattr(writer, func)(*args, **kwargs)
except Exception as e:
if hasattr(writer, "exceptions_to_ignore") and isinstance(
e, writer.exceptions_to_ignore
):
logger.warning("Ignoring exception: {}".format(e))
if hasattr(writer, "exception_logging_func"):
writer.exception_logging_func(e)
return
raise
return call
class SummaryWriterContext(metaclass=SummaryWriterContextMeta):
_writer_stacks: List[SummaryWriter] = []
_global_step = 0
_custom_scalars: Dict[str, Any] = {}
@classmethod
def _reset_globals(cls):
cls._global_step = 0
cls._custom_scalars = {}
@classmethod
def increase_global_step(cls):
cls._global_step += 1
@classmethod
def add_histogram(cls, key, val, *args, **kwargs):
try:
return cls.__getattr__("add_histogram")(key, val, *args, **kwargs)
except ValueError:
logger.warning(f"Cannot create histogram for {key}, got values: {val}")
@classmethod
def add_custom_scalars(cls, writer):
"""
Call this once you are satisfied setting up custom scalar
"""
writer.add_custom_scalars(cls._custom_scalars)
@classmethod
def add_custom_scalars_multilinechart(cls, tags, category=None, title=None):
assert category and title, "category & title must be set"
if category not in cls._custom_scalars:
cls._custom_scalars[category] = {}
assert (
title not in cls._custom_scalars[category]
), "Title ({}) is already in category ({})".format(title, category)
cls._custom_scalars[category][title] = ["Multiline", tags]
@classmethod
def push(cls, writer):
assert isinstance(
writer, SummaryWriter
), "writer is not a SummaryWriter: {}".format(writer)
cls._writer_stacks.append(writer)
@classmethod
def pop(cls):
return cls._writer_stacks.pop()
@contextlib.contextmanager
def summary_writer_context(writer):
if writer is not None:
SummaryWriterContext.push(writer)
try:
yield
finally:
if writer is not None:
SummaryWriterContext.pop()
| 3,412 | 26.97541 | 83 | py |
ReAgent | ReAgent-master/reagent/training/cem_trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""
The Trainer for Cross-Entropy Method. The idea is that an ensemble of
world models are fitted to predict transitions and reward functions.
A cross entropy method-based planner will then plan the best next action
based on simulation data generated by the fitted world models.
The idea is inspired by: https://arxiv.org/abs/1805.12114
"""
import logging
from typing import List
import reagent.core.types as rlt
import torch.nn as nn
from reagent.core.parameters import CEMTrainerParameters
from reagent.models.cem_planner import CEMPlannerNetwork
from reagent.training.reagent_lightning_module import ReAgentLightningModule
from reagent.training.world_model.mdnrnn_trainer import MDNRNNTrainer
logger = logging.getLogger(__name__)
def print_mdnrnn_losses(minibatch, model_index, losses) -> None:
logger.info(
f"{minibatch}-th minibatch {model_index}-th model: \n"
f'loss={losses["loss"]}, bce={losses["bce"]}, '
f'gmm={losses["gmm"]}, mse={losses["mse"]}\n'
)
class CEMTrainer(ReAgentLightningModule):
def __init__(
self,
cem_planner_network: CEMPlannerNetwork,
world_model_trainers: List[MDNRNNTrainer],
parameters: CEMTrainerParameters,
) -> None:
super().__init__()
self.cem_planner_network = cem_planner_network
self.world_model_trainers = nn.ModuleList(world_model_trainers)
def configure_optimizers(self):
return [o for t in self.world_model_trainers for o in t.configure_optimizers()]
def train_step_gen(self, training_batch: rlt.MemoryNetworkInput, batch_idx: int):
for t in self.world_model_trainers:
yield from t.train_step_gen(training_batch, batch_idx)
| 1,805 | 35.12 | 87 | py |
ReAgent | ReAgent-master/reagent/training/qrdqn_trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from typing import List, Tuple
import reagent.core.types as rlt
import torch
from reagent.core.configuration import resolve_defaults
from reagent.core.dataclasses import field
from reagent.core.parameters import EvaluationParameters, RLParameters
from reagent.optimizer import SoftUpdate
from reagent.optimizer.union import Optimizer__Union
from reagent.training.dqn_trainer_base import DQNTrainerBaseLightning
logger = logging.getLogger(__name__)
class QRDQNTrainer(DQNTrainerBaseLightning):
"""
Implementation of QR-DQN (Quantile Regression Deep Q-Network)
See https://arxiv.org/abs/1710.10044 for details
"""
@resolve_defaults
def __init__(
self,
q_network,
q_network_target,
metrics_to_score=None,
reward_network=None,
q_network_cpe=None,
q_network_cpe_target=None,
actions: List[str] = field(default_factory=list), # noqa: B008
rl: RLParameters = field(default_factory=RLParameters), # noqa: B008
double_q_learning: bool = True,
num_atoms: int = 51,
minibatch_size: int = 1024,
minibatches_per_step: int = 1,
optimizer: Optimizer__Union = field( # noqa: B008
default_factory=Optimizer__Union.default
),
cpe_optimizer: Optimizer__Union = field( # noqa: B008
default_factory=Optimizer__Union.default
),
evaluation: EvaluationParameters = field( # noqa: B008
default_factory=EvaluationParameters
),
) -> None:
super().__init__(
rl_parameters=rl,
metrics_to_score=metrics_to_score,
actions=actions,
evaluation_parameters=evaluation,
)
# TODO: check to ensure no rl parameter value is set that isn't actively used by class
self.double_q_learning = double_q_learning
self.minibatch_size = minibatch_size
self.minibatches_per_step = minibatches_per_step
self._actions = actions
self.q_network = q_network
self.q_network_target = q_network_target
self.q_network_optimizer = optimizer
self.num_atoms = num_atoms
self.register_buffer("quantiles", None)
self.quantiles = (
(0.5 + torch.arange(self.num_atoms).float()) / float(self.num_atoms)
).view(1, -1)
self._initialize_cpe(
reward_network, q_network_cpe, q_network_cpe_target, optimizer=cpe_optimizer
)
self.register_buffer("reward_boosts", None)
self.reward_boosts = torch.zeros([1, len(self._actions)])
if rl.reward_boost is not None:
# pyre-fixme[16]: Optional type has no attribute `keys`.
for k in rl.reward_boost.keys():
i = self._actions.index(k)
# pyre-fixme[16]: Optional type has no attribute `__getitem__`.
self.reward_boosts[0, i] = rl.reward_boost[k]
def configure_optimizers(self):
optimizers = []
target_params = list(self.q_network_target.parameters())
source_params = list(self.q_network.parameters())
optimizers.append(
self.q_network_optimizer.make_optimizer_scheduler(
self.q_network.parameters()
)
)
if self.calc_cpe_in_training:
(
cpe_target_params,
cpe_source_params,
cpe_optimizers,
) = self._configure_cpe_optimizers()
target_params += cpe_target_params
source_params += cpe_source_params
optimizers += cpe_optimizers
optimizers.append(
SoftUpdate.make_optimizer_scheduler(
target_params, source_params, tau=self.tau
)
)
return optimizers
def train_step_gen(self, training_batch: rlt.DiscreteDqnInput, batch_idx: int):
self._check_input(training_batch)
rewards = self.boost_rewards(training_batch.reward, training_batch.action)
discount_tensor = torch.full_like(rewards, self.gamma)
possible_next_actions_mask = training_batch.possible_next_actions_mask.float()
possible_actions_mask = training_batch.possible_actions_mask.float()
not_done_mask = training_batch.not_terminal.float()
if self.use_seq_num_diff_as_time_diff:
assert self.multi_steps is None
discount_tensor = torch.pow(self.gamma, training_batch.time_diff.float())
if self.multi_steps is not None:
assert training_batch.step is not None
discount_tensor = torch.pow(self.gamma, training_batch.step.float())
next_qf = self.q_network_target(training_batch.next_state)
if self.maxq_learning:
# Select distribution corresponding to max valued action
next_q_values = (
self.q_network(training_batch.next_state)
if self.double_q_learning
else next_qf
).mean(dim=2)
next_action = self.argmax_with_mask(
next_q_values, possible_next_actions_mask
)
next_qf = next_qf[range(rewards.shape[0]), next_action.reshape(-1)]
else:
next_qf = (next_qf * training_batch.next_action.unsqueeze(-1)).sum(1)
# Build target distribution
target_Q = rewards + discount_tensor * not_done_mask * next_qf
current_qf = self.q_network(training_batch.state)
# for reporting only
all_q_values = current_qf.mean(2).detach()
current_qf = (current_qf * training_batch.action.unsqueeze(-1)).sum(1)
# (batch, atoms) -> (atoms, batch, 1) -> (atoms, batch, atoms)
td = target_Q.t().unsqueeze(-1) - current_qf
loss = (
self.huber(td) * (self.quantiles - (td.detach() < 0).float()).abs()
).mean()
yield loss
# pyre-fixme[16]: `DQNTrainer` has no attribute `loss`.
self.loss = loss.detach()
# Get Q-values of next states, used in computing cpe
all_next_action_scores = (
self.q_network(training_batch.next_state).detach().mean(dim=2)
)
logged_action_idxs = torch.argmax(training_batch.action, dim=1, keepdim=True)
yield from self._calculate_cpes(
training_batch,
training_batch.state,
training_batch.next_state,
all_q_values,
all_next_action_scores,
logged_action_idxs,
discount_tensor,
not_done_mask,
)
model_action_idxs = self.argmax_with_mask(
all_q_values,
possible_actions_mask if self.maxq_learning else training_batch.action,
)
self.reporter.log(
td_loss=loss,
logged_actions=logged_action_idxs,
logged_propensities=training_batch.extras.action_probability,
logged_rewards=rewards,
logged_values=None, # Compute at end of each epoch for CPE
model_values=all_q_values,
model_values_on_logged_actions=None, # Compute at end of each epoch for CPE
model_action_idxs=model_action_idxs,
)
yield self.soft_update_result()
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def boost_rewards(
self, rewards: torch.Tensor, actions: torch.Tensor
) -> torch.Tensor:
# Apply reward boost if specified
reward_boosts = torch.sum(
actions.float() * self.reward_boosts, dim=1, keepdim=True
)
return rewards + reward_boosts
def argmax_with_mask(self, q_values, possible_actions_mask):
# Set q-values of impossible actions to a very large negative number.
q_values = q_values.reshape(possible_actions_mask.shape)
q_values = q_values + self.ACTION_NOT_POSSIBLE_VAL * (1 - possible_actions_mask)
return q_values.argmax(1)
# Used to prevent warning when a.shape != b.shape
def huber(self, x):
return torch.where(x.abs() < 1, 0.5 * x.pow(2), x.abs() - 0.5)
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def get_detached_model_outputs(
self, state: rlt.FeatureData
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Gets the q values from the model and target networks"""
q_values = self.q_network(state).mean(dim=2)
q_values_target = self.q_network_target(state).mean(dim=2)
return q_values, q_values_target
| 8,782 | 36.216102 | 94 | py |
ReAgent | ReAgent-master/reagent/training/reinforce_trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import inspect
import logging
import math
from dataclasses import field
from typing import List, Optional
import reagent.core.types as rlt
import torch
import torch.optim
from reagent.gym.policies.policy import Policy
from reagent.models.base import ModelBase
from reagent.optimizer.union import Optimizer__Union
from reagent.training.reagent_lightning_module import ReAgentLightningModule
from reagent.training.utils import discounted_returns, whiten
logger = logging.getLogger(__name__)
class ReinforceTrainer(ReAgentLightningModule):
def __init__(
self,
policy: Policy,
gamma: float = 0.0,
optimizer: Optimizer__Union = field( # noqa: B008
default_factory=Optimizer__Union.default
),
optimizer_value_net: Optimizer__Union = field( # noqa: B008
default_factory=Optimizer__Union.default
),
actions: List[str] = field(default_factory=list), # noqa: B008
off_policy: bool = False,
reward_clip: float = 1e6,
clip_param: float = 1e6,
normalize: bool = True,
subtract_mean: bool = True,
offset_clamp_min: bool = False,
value_net: Optional[ModelBase] = None,
):
super().__init__()
self._actions = actions
self.scorer = policy.scorer
self.sampler = policy.sampler
self.gamma = gamma
self.off_policy = off_policy
self.reward_clip = reward_clip
self.clip_param = clip_param
self.normalize = normalize
self.subtract_mean = subtract_mean
self.offset_clamp_min = offset_clamp_min
self.optimizer = optimizer
self.optimizer_value_net = optimizer_value_net
if value_net is not None:
self.value_net = value_net
self.value_loss_fn = torch.nn.MSELoss(reduction="mean")
else:
self.value_net = None
def _check_input(self, training_batch: rlt.PolicyGradientInput):
assert training_batch.reward.ndim == 1
if self.off_policy:
assert training_batch.log_prob.ndim == 1
def configure_optimizers(self):
optimizers = []
# value net optimizer
if self.value_net is not None:
optimizers.append(
self.optimizer_value_net.make_optimizer_scheduler(
self.value_net.parameters()
)
)
# policy optimizer
optimizers.append(
self.optimizer.make_optimizer_scheduler(self.scorer.parameters())
)
return optimizers
def train_step_gen(self, training_batch: rlt.PolicyGradientInput, batch_idx: int):
self._check_input(training_batch)
actions = training_batch.action
rewards = training_batch.reward.detach()
scorer_inputs = []
if inspect.getattr_static(training_batch, "graph", None) is not None:
# GNN
scorer_inputs.append(training_batch.graph)
else:
scorer_inputs.append(training_batch.state)
if training_batch.possible_actions_mask is not None:
scorer_inputs.append(training_batch.possible_actions_mask)
scores = self.scorer(*scorer_inputs)
characteristic_eligibility = self.sampler.log_prob(scores, actions).float()
offset_reinforcement = discounted_returns(
torch.clamp(rewards, max=self.reward_clip).clone(), self.gamma
)
if self.normalize:
offset_reinforcement = whiten(
offset_reinforcement, subtract_mean=self.subtract_mean
)
if self.offset_clamp_min:
offset_reinforcement = offset_reinforcement.clamp(min=0)
if self.value_net is not None:
if self.normalize:
raise RuntimeError(
"Can't apply a baseline and normalize rewards simultaneously"
)
baselines = self.value_net(training_batch.state).squeeze()
yield self.value_loss_fn(baselines, offset_reinforcement)
# subtract learned value function baselines from rewards
offset_reinforcement = offset_reinforcement - baselines
if self.off_policy:
characteristic_eligibility = torch.exp(
torch.clamp(
characteristic_eligibility - training_batch.log_prob,
max=math.log(float(self.clip_param)),
)
).float()
yield -(offset_reinforcement.float()) @ characteristic_eligibility # PG "loss"
| 4,640 | 37.040984 | 87 | py |
ReAgent | ReAgent-master/reagent/training/parametric_dqn_trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from typing import Tuple
import reagent.core.parameters as rlp
import reagent.core.types as rlt
import torch
import torch.nn.functional as F
from reagent.core.configuration import resolve_defaults
from reagent.core.dataclasses import field
from reagent.optimizer import Optimizer__Union, SoftUpdate
from reagent.training.dqn_trainer_base import DQNTrainerMixin
from reagent.training.reagent_lightning_module import ReAgentLightningModule
from reagent.training.rl_trainer_pytorch import RLTrainerMixin
logger = logging.getLogger(__name__)
class ParametricDQNTrainer(DQNTrainerMixin, RLTrainerMixin, ReAgentLightningModule):
@resolve_defaults
def __init__(
self,
q_network,
q_network_target,
reward_network,
# Start ParametricDQNTrainerParameters
rl: rlp.RLParameters = field(default_factory=rlp.RLParameters), # noqa: B008
double_q_learning: bool = True,
minibatches_per_step: int = 1,
optimizer: Optimizer__Union = field( # noqa: B008
default_factory=Optimizer__Union.default
),
) -> None:
super().__init__()
self.rl_parameters = rl
self.double_q_learning = double_q_learning
self.minibatches_per_step = minibatches_per_step or 1
self.q_network = q_network
self.q_network_target = q_network_target
self.reward_network = reward_network
self.optimizer = optimizer
if rl.q_network_loss == "mse":
self.q_network_loss = F.mse_loss
elif rl.q_network_loss == "huber":
self.q_network_loss = F.smooth_l1_loss
elif rl.q_network_loss == "bce_with_logits":
# The loss is only used when gamma = 0, reward is between 0 and 1
# and we need to calculate NE as metrics.
assert (
rl.gamma == 0
), "bce_with_logits loss is only supported when gamma is 0."
self.q_network_loss = F.binary_cross_entropy_with_logits
else:
raise Exception(
"Q-Network loss type {} not valid loss.".format(rl.q_network_loss)
)
def configure_optimizers(self):
optimizers = []
optimizers.append(
self.optimizer.make_optimizer_scheduler(self.q_network.parameters())
)
optimizers.append(
self.optimizer.make_optimizer_scheduler(self.reward_network.parameters())
)
# soft-update
target_params = list(self.q_network_target.parameters())
source_params = list(self.q_network.parameters())
optimizers.append(
SoftUpdate.make_optimizer_scheduler(
target_params, source_params, tau=self.tau
)
)
return optimizers
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def get_detached_model_outputs(
self, state, action
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Gets the q values from the model and target networks"""
q_values = self.q_network(state, action)
q_values_target = self.q_network_target(state, action)
return q_values, q_values_target
def train_step_gen(self, training_batch: rlt.ParametricDqnInput, batch_idx: int):
reward = training_batch.reward
not_terminal = training_batch.not_terminal.float()
discount_tensor = torch.full_like(reward, self.gamma)
if self.use_seq_num_diff_as_time_diff:
assert self.multi_steps is None
discount_tensor = torch.pow(self.gamma, training_batch.time_diff.float())
if self.multi_steps is not None:
# pyre-fixme[16]: Optional type has no attribute `float`.
discount_tensor = torch.pow(self.gamma, training_batch.step.float())
if self.maxq_learning:
# Assuming actions are parametrized in a k-dimensional space
# tiled_state = (batch_size * max_num_action, state_dim)
# possible_actions = (batch_size* max_num_action, k)
# possible_actions_mask = (batch_size, max_num_action)
product = training_batch.possible_next_actions.float_features.shape[0]
batch_size = training_batch.possible_actions_mask.shape[0]
assert product % batch_size == 0, (
f"batch_size * max_num_action {product} is "
f"not divisible by batch_size {batch_size}"
)
max_num_action = product // batch_size
tiled_next_state = training_batch.next_state.get_tiled_batch(max_num_action)
(
all_next_q_values,
all_next_q_values_target,
) = self.get_detached_model_outputs(
tiled_next_state, training_batch.possible_next_actions
)
# Compute max a' Q(s', a') over all possible actions using target network
next_q_values, _ = self.get_max_q_values_with_target(
all_next_q_values,
all_next_q_values_target,
training_batch.possible_next_actions_mask.float(),
)
assert (
len(next_q_values.shape) == 2 and next_q_values.shape[1] == 1
), f"{next_q_values.shape}"
else:
# SARSA (Use the target network)
_, next_q_values = self.get_detached_model_outputs(
training_batch.next_state, training_batch.next_action
)
assert (
len(next_q_values.shape) == 2 and next_q_values.shape[1] == 1
), f"{next_q_values.shape}"
target_q_values = reward + not_terminal * discount_tensor * next_q_values
assert (
target_q_values.shape[-1] == 1
), f"{target_q_values.shape} doesn't end with 1"
# Get Q-value of action taken
q_values = self.q_network(training_batch.state, training_batch.action)
assert (
target_q_values.shape == q_values.shape
), f"{target_q_values.shape} != {q_values.shape}."
td_loss = self.q_network_loss(q_values, target_q_values)
yield td_loss
# pyre-fixme[16]: Optional type has no attribute `metrics`.
if training_batch.extras.metrics is not None:
metrics_reward_concat_real_vals = torch.cat(
(reward, training_batch.extras.metrics), dim=1
)
else:
metrics_reward_concat_real_vals = reward
# get reward estimates
reward_estimates = self.reward_network(
training_batch.state, training_batch.action
)
reward_loss = F.mse_loss(
reward_estimates.squeeze(-1),
metrics_reward_concat_real_vals.squeeze(-1),
)
yield reward_loss
self.reporter.log(
td_loss=td_loss.detach().cpu(),
reward_loss=reward_loss.detach().cpu(),
logged_rewards=reward,
model_values_on_logged_actions=q_values.detach().cpu(),
)
# Use the soft update rule to update target network
yield self.soft_update_result()
| 7,284 | 39.027473 | 88 | py |
ReAgent | ReAgent-master/reagent/training/sac_trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import copy
import logging
from typing import List, Optional
import numpy as np
import reagent.core.types as rlt
import torch
import torch.nn.functional as F
from reagent.core.configuration import resolve_defaults
from reagent.core.dataclasses import dataclass
from reagent.core.dataclasses import field
from reagent.core.parameters import RLParameters
from reagent.models.actor import LOG_PROB_MIN, LOG_PROB_MAX
from reagent.optimizer import Optimizer__Union, SoftUpdate
from reagent.training.reagent_lightning_module import ReAgentLightningModule
from reagent.training.rl_trainer_pytorch import RLTrainerMixin
logger = logging.getLogger(__name__)
@dataclass
class CRRWeightFn:
# pick indicator or exponent
indicator_fn_threshold: Optional[float] = None
exponent_beta: Optional[float] = None
exponent_clamp: Optional[float] = None
def __post_init_post_parse__(self):
assert self.exponent_beta or self.indicator_fn_threshold
assert not (self.exponent_beta and self.indicator_fn_threshold)
if self.exponent_beta:
assert self.exponent_beta > 1e-6
if self.exponent_clamp:
assert self.exponent_clamp > 1e-6
def get_weight_from_advantage(self, advantage):
if self.indicator_fn_threshold:
return (advantage >= self.indicator_fn_threshold).float()
if self.exponent_beta:
exp = torch.exp(advantage / self.exponent_beta)
if self.exponent_clamp:
exp = torch.clamp(exp, 0.0, self.exponent_clamp)
return exp
class SACTrainer(RLTrainerMixin, ReAgentLightningModule):
"""
Soft Actor-Critic trainer as described in https://arxiv.org/pdf/1801.01290
The actor is assumed to implement reparameterization trick.
"""
@resolve_defaults
def __init__(
self,
actor_network,
q1_network,
q2_network=None,
value_network=None,
# Start SACTrainerParameters
rl: RLParameters = field(default_factory=RLParameters), # noqa: B008
q_network_optimizer: Optimizer__Union = field( # noqa: B008
default_factory=Optimizer__Union.default
),
value_network_optimizer: Optimizer__Union = field( # noqa: B008
default_factory=Optimizer__Union.default
),
actor_network_optimizer: Optimizer__Union = field( # noqa: B008
default_factory=Optimizer__Union.default
),
alpha_optimizer: Optional[Optimizer__Union] = field( # noqa: B008
default_factory=Optimizer__Union.default
),
minibatch_size: int = 1024,
entropy_temperature: float = 0.01,
logged_action_uniform_prior: bool = True,
target_entropy: float = -1.0,
action_embedding_kld_weight: Optional[float] = None,
apply_kld_on_mean: bool = False,
action_embedding_mean: Optional[List[float]] = None,
action_embedding_variance: Optional[List[float]] = None,
crr_config: Optional[CRRWeightFn] = None,
backprop_through_log_prob: bool = True,
) -> None:
"""
Args:
actor_network: states -> actions, trained to maximize soft value,
which is value + policy entropy.
q1_network: states, action -> q-value
q2_network (optional): double q-learning to stabilize training
from overestimation bias
value_network (optional): states -> value of state under actor
# alpha in the paper; controlling explore & exploit
backprop_through_log_prob: This is mostly for backward compatibility issue;
we used to have a bug that does this and it yields a better result in
some cases
# TODO: finish
"""
super().__init__()
self.rl_parameters = rl
self.q1_network = q1_network
self.q2_network = q2_network
self.q_network_optimizer = q_network_optimizer
self.value_network = value_network
self.value_network_optimizer = value_network_optimizer
if self.value_network is not None:
self.value_network_target = copy.deepcopy(self.value_network)
else:
self.q1_network_target = copy.deepcopy(self.q1_network)
self.q2_network_target = copy.deepcopy(self.q2_network)
self.actor_network = actor_network
self.actor_network_optimizer = actor_network_optimizer
self.entropy_temperature = entropy_temperature
self.alpha_optimizer = alpha_optimizer
if alpha_optimizer is not None:
self.target_entropy = target_entropy
self.log_alpha = torch.nn.Parameter(
torch.tensor([np.log(self.entropy_temperature)])
)
self.logged_action_uniform_prior = logged_action_uniform_prior
self.add_kld_to_loss = bool(action_embedding_kld_weight)
self.apply_kld_on_mean = apply_kld_on_mean
if self.add_kld_to_loss:
self.kld_weight = action_embedding_kld_weight
# Calling register_buffer so that the tensors got moved to the right device
self.register_buffer("action_emb_mean", None)
self.register_buffer("action_emb_variance", None)
# Assigning the values here instead of above so that typechecker wouldn't complain
self.action_emb_mean = torch.tensor(action_embedding_mean)
self.action_emb_variance = torch.tensor(action_embedding_variance)
self.crr_config = crr_config
if crr_config:
assert self.value_network is not None
self.backprop_through_log_prob = backprop_through_log_prob
def configure_optimizers(self):
optimizers = []
optimizers.append(
self.q_network_optimizer.make_optimizer_scheduler(
self.q1_network.parameters()
)
)
if self.q2_network:
optimizers.append(
self.q_network_optimizer.make_optimizer_scheduler(
self.q2_network.parameters()
)
)
optimizers.append(
self.actor_network_optimizer.make_optimizer_scheduler(
self.actor_network.parameters()
)
)
if self.alpha_optimizer is not None:
optimizers.append(
self.alpha_optimizer.make_optimizer_scheduler([self.log_alpha])
)
if self.value_network:
optimizers.append(
self.value_network_optimizer.make_optimizer_scheduler(
self.value_network.parameters()
)
)
# soft-update
if self.value_network:
target_params = self.value_network_target.parameters()
source_params = self.value_network.parameters()
else:
target_params = list(self.q1_network_target.parameters())
source_params = list(self.q1_network.parameters())
if self.q2_network:
target_params += list(self.q2_network_target.parameters())
source_params += list(self.q2_network.parameters())
optimizers.append(
SoftUpdate.make_optimizer_scheduler(
target_params, source_params, tau=self.tau
)
)
return optimizers
def train_step_gen(self, training_batch: rlt.PolicyNetworkInput, batch_idx: int):
"""
IMPORTANT: the input action here is assumed to match the
range of the output of the actor.
"""
assert isinstance(training_batch, rlt.PolicyNetworkInput)
state = training_batch.state
action = training_batch.action
reward = training_batch.reward
discount = torch.full_like(reward, self.gamma)
not_done_mask = training_batch.not_terminal
#
# First, optimize Q networks; minimizing MSE between
# Q(s, a) & r + discount * V'(next_s)
#
if self.value_network is not None:
next_state_value = self.value_network_target(training_batch.next_state)
else:
next_state_actor_output = self.actor_network(training_batch.next_state)
next_state_actor_action = (
training_batch.next_state,
rlt.FeatureData(next_state_actor_output.action),
)
next_state_value = self.q1_network_target(*next_state_actor_action)
if self.q2_network is not None:
target_q2_value = self.q2_network_target(*next_state_actor_action)
next_state_value = torch.min(next_state_value, target_q2_value)
log_prob_a = self.actor_network.get_log_prob(
training_batch.next_state, next_state_actor_output.action
).clamp(LOG_PROB_MIN, LOG_PROB_MAX)
next_state_value -= self.entropy_temperature * log_prob_a
if self.gamma > 0.0:
target_q_value = (
reward + discount * next_state_value * not_done_mask.float()
)
else:
# This is useful in debugging instability issues
target_q_value = reward
q1_value = self.q1_network(state, action)
q1_loss = F.mse_loss(q1_value, target_q_value)
yield q1_loss
if self.q2_network:
q2_value = self.q2_network(state, action)
q2_loss = F.mse_loss(q2_value, target_q_value)
yield q2_loss
# Second, optimize the actor; minimizing KL-divergence between
# propensity & softmax of value. Due to reparameterization trick,
# it ends up being log_prob(actor_action) - Q(s, actor_action)
actor_output = self.actor_network(state)
state_actor_action = (state, rlt.FeatureData(actor_output.action))
q1_actor_value = self.q1_network(*state_actor_action)
min_q_actor_value = q1_actor_value
if self.q2_network:
q2_actor_value = self.q2_network(*state_actor_action)
min_q_actor_value = torch.min(q1_actor_value, q2_actor_value)
actor_log_prob = actor_output.log_prob.clamp(LOG_PROB_MIN, LOG_PROB_MAX)
if not self.backprop_through_log_prob:
actor_log_prob = actor_log_prob.detach()
if self.crr_config is not None:
cur_value = self.value_network(training_batch.state)
advantage = (min_q_actor_value - cur_value).detach()
# pyre-fixme[16]: `Optional` has no attribute `get_weight_from_advantage`.
crr_weight = self.crr_config.get_weight_from_advantage(advantage)
assert (
actor_log_prob.shape == crr_weight.shape
), f"{actor_log_prob.shape} != {crr_weight.shape}"
actor_loss = -(actor_log_prob * crr_weight.detach())
else:
actor_loss = self.entropy_temperature * actor_log_prob - min_q_actor_value
# Do this in 2 steps so we can log histogram of actor loss
actor_loss_mean = actor_loss.mean()
if self.add_kld_to_loss:
if self.apply_kld_on_mean:
action_batch_m = torch.mean(actor_output.squashed_mean, axis=0)
action_batch_v = torch.var(actor_output.squashed_mean, axis=0)
else:
action_batch_m = torch.mean(actor_output.action, axis=0)
action_batch_v = torch.var(actor_output.action, axis=0)
kld = (
0.5
* (
(action_batch_v + (action_batch_m - self.action_emb_mean) ** 2)
/ self.action_emb_variance
- 1
+ self.action_emb_variance.log()
- action_batch_v.log()
).sum()
)
actor_loss_mean += self.kld_weight * kld
yield actor_loss_mean
# Optimize Alpha
if self.alpha_optimizer is not None:
alpha_loss = -(
(
self.log_alpha
* (
actor_output.log_prob.clamp(LOG_PROB_MIN, LOG_PROB_MAX)
+ self.target_entropy
).detach()
).mean()
)
yield alpha_loss
self.entropy_temperature = self.log_alpha.exp()
#
# Lastly, if applicable, optimize value network; minimizing MSE between
# V(s) & E_a~pi(s) [ Q(s,a) - log(pi(a|s)) ]
#
if self.value_network is not None:
state_value = self.value_network(state)
if self.logged_action_uniform_prior:
log_prob_a = torch.zeros_like(min_q_actor_value)
target_value = min_q_actor_value
else:
log_prob_a = actor_output.log_prob.clamp(LOG_PROB_MIN, LOG_PROB_MAX)
target_value = min_q_actor_value - self.entropy_temperature * log_prob_a
value_loss = F.mse_loss(state_value, target_value.detach())
yield value_loss
self.logger.log_metrics(
{
"td_loss": q1_loss,
"logged_rewards": reward.mean(),
"model_values_on_logged_actions": q1_value.mean(),
"q1_value": q1_value.mean(),
"entropy_temperature": self.entropy_temperature,
"log_prob_a": log_prob_a.mean(),
"next_state_value": next_state_value.mean(),
"target_q_value": target_q_value.mean(),
"min_q_actor_value": min_q_actor_value.mean(),
"actor_output_log_prob": actor_output.log_prob.mean(),
"actor_loss": actor_loss.mean(),
},
step=self.all_batches_processed,
)
if self.q2_network:
self.logger.log_metrics(
{"q2_value": q2_value.mean()},
step=self.all_batches_processed,
)
if self.value_network:
self.logger.log_metrics(
{"target_state_value": target_value.mean()},
step=self.all_batches_processed,
)
if self.add_kld_to_loss:
self.logger.log_metrics(
{
"action_batch_mean": action_batch_m.mean(),
"action_batch_var": action_batch_v.mean(),
# pyre-fixme[61]: `kld` may not be initialized here.
"kld": kld,
},
step=self.all_batches_processed,
)
# Use the soft update rule to update the target networks
result = self.soft_update_result()
self.log("td_loss", q1_loss, prog_bar=True)
yield result
| 14,825 | 38.118734 | 94 | py |
ReAgent | ReAgent-master/reagent/training/utils.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
import torch
import torch.nn.functional as F
EPS = np.finfo(float).eps.item()
def rescale_actions(
actions: torch.Tensor,
new_min: torch.Tensor,
new_max: torch.Tensor,
prev_min: torch.Tensor,
prev_max: torch.Tensor,
) -> torch.Tensor:
"""Scale from [prev_min, prev_max] to [new_min, new_max]"""
assert torch.all(prev_min <= actions) and torch.all(
actions <= prev_max
), f"{actions} has values outside of [{prev_min}, {prev_max}]."
assert torch.all(
new_min <= new_max
), f"{new_min} is (has coordinate) greater than {new_max}."
prev_range = prev_max - prev_min
new_range = new_max - new_min
return ((actions - prev_min) / prev_range) * new_range + new_min
def whiten(x: torch.Tensor, subtract_mean: bool) -> torch.Tensor:
numer = x
if subtract_mean:
numer -= x.mean()
return numer / (x.std() + EPS)
def discounted_returns(rewards: torch.Tensor, gamma: float = 0) -> torch.Tensor:
"""Perform rollout to compute reward to go
and do a baseline subtraction."""
if gamma == 0:
return rewards.float()
else:
R = 0
returns = []
for r in rewards.numpy()[::-1]:
R = r + gamma * R
returns.insert(0, R)
return torch.tensor(returns).float()
def gen_permutations(seq_len: int, num_action: int) -> torch.Tensor:
"""
generate all seq_len permutations for a given action set
the return shape is (SEQ_LEN, PERM_NUM, ACTION_DIM)
"""
all_permut = torch.cartesian_prod(*[torch.arange(num_action)] * seq_len)
if seq_len == 1:
all_permut = all_permut.unsqueeze(1)
all_permut = F.one_hot(all_permut, num_action).transpose(0, 1)
return all_permut.float()
| 1,864 | 29.080645 | 80 | py |
ReAgent | ReAgent-master/reagent/training/reagent_lightning_module.py | #!/usr/bin/env python3
import inspect
import logging
import pytorch_lightning as pl
import torch
from reagent.core.tensorboardX import SummaryWriterContext
from reagent.core.utils import lazy_property
from typing_extensions import final
logger = logging.getLogger(__name__)
class ReAgentLightningModule(pl.LightningModule):
def __init__(self, automatic_optimization=True):
super().__init__()
self._automatic_optimization = automatic_optimization
self._training_step_generator = None
self._reporter = pl.loggers.base.DummyExperiment()
# For the generator API
self._verified_steps = False
# For summary_writer property
self._summary_writer_logger = None
self._summary_writer = None
# To enable incremental training
self.register_buffer("_next_stopping_epoch", None)
self.register_buffer("_cleanly_stopped", None)
self._next_stopping_epoch = torch.tensor([-1]).int()
self._cleanly_stopped = torch.ones(1)
self._setup_input_type()
self.batches_processed_this_epoch = 0
self.all_batches_processed = 0
def _setup_input_type(self):
self._training_batch_type = None
sig = inspect.signature(self.train_step_gen)
assert "training_batch" in sig.parameters
param = sig.parameters["training_batch"]
annotation = param.annotation
if annotation == inspect.Parameter.empty:
return
if hasattr(annotation, "from_dict"):
self._training_batch_type = annotation
def set_reporter(self, reporter):
if reporter is None:
reporter = pl.loggers.base.DummyExperiment()
self._reporter = reporter
return self
@property
def reporter(self):
return self._reporter
def set_clean_stop(self, clean_stop: bool):
self._cleanly_stopped[0] = int(clean_stop)
def increase_next_stopping_epochs(self, num_epochs: int):
self._next_stopping_epoch += num_epochs
self.set_clean_stop(False)
return self
def train_step_gen(self, training_batch, batch_idx: int):
"""
Implement training step as generator here
"""
raise NotImplementedError
def soft_update_result(self) -> torch.Tensor:
"""
A dummy loss to trigger soft-update
"""
one = torch.ones(1, requires_grad=True)
return one + one
@property
def summary_writer(self):
"""
Accessor to TensorBoard's SummaryWriter
"""
if self._summary_writer_logger is self.logger:
# If self.logger doesn't change between call, then return cached result
return self._summary_writer
# Invalidate
self._summary_writer = None
self._summary_writer_logger = self.logger
if isinstance(self.logger, pl.loggers.base.LoggerCollection):
for logger in self.logger._logger_iterable:
if isinstance(logger, pl.loggers.tensorboard.TensorBoardLogger):
self._summary_writer = logger.experiment
break
elif isinstance(logger, pl.loggers.tensorboard.TensorBoardLogger):
self._summary_writer = logger.experiment
return self._summary_writer
# pyre-fixme[14]: `training_step` overrides method defined in `LightningModule`
# inconsistently.
def training_step(self, batch, batch_idx: int, optimizer_idx: int = 0):
assert (optimizer_idx == 0) or (self._num_optimizing_steps > 1)
if self._training_step_generator is None:
if self._training_batch_type and isinstance(batch, dict):
batch = self._training_batch_type.from_dict(batch)
self._training_step_generator = self.train_step_gen(batch, batch_idx)
ret = next(self._training_step_generator)
if optimizer_idx == self._num_optimizing_steps - 1:
if not self._verified_steps:
try:
next(self._training_step_generator)
except StopIteration:
self._verified_steps = True
if not self._verified_steps:
raise RuntimeError(
"training_step_gen() yields too many times."
"The number of yields should match the number of optimizers,"
f" in this case {self._num_optimizing_steps}"
)
self._training_step_generator = None
SummaryWriterContext.increase_global_step()
return ret
def optimizers(self, use_pl_optimizer: bool = True):
o = super().optimizers(use_pl_optimizer)
if isinstance(o, list):
return o
return [o]
@lazy_property
def _num_optimizing_steps(self) -> int:
return len(self.configure_optimizers())
@final
def on_epoch_end(self):
logger.info(
f"Finished epoch with {self.batches_processed_this_epoch} batches processed"
)
self.batches_processed_this_epoch = 0
# Flush the reporter which has accumulated data in
# training/validation/test
self.reporter.flush(self.current_epoch)
# Tell the trainer to stop.
if self.current_epoch == self._next_stopping_epoch.item():
self.trainer.should_stop = True
@final
def on_train_batch_end(self, *args, **kwargs):
self.batches_processed_this_epoch += 1
self.all_batches_processed += 1
@final
def on_validation_batch_end(self, *args, **kwargs):
self.batches_processed_this_epoch += 1
@final
def on_test_batch_end(self, *args, **kwargs):
self.batches_processed_this_epoch += 1
def train(self, *args):
# trainer.train(batch) was the old, pre-Lightning ReAgent trainer API.
# make sure that nobody is trying to call trainer.train() this way.
# trainer.train() or trainer.train(True/False) is allowed - this puts the network into training/eval mode.
if (len(args) == 0) or ((len(args) == 1) and (isinstance(args[0], bool))):
super().train(*args)
else:
raise NotImplementedError(
"Method .train() is not used for ReAgent Lightning trainers. Please use .fit() method of the pl.Trainer instead"
)
class StoppingEpochCallback(pl.Callback):
"""
We use this callback to control the number of training epochs in incremental
training. Epoch & step counts are not reset in the checkpoint. If we were to set
`max_epochs` on the trainer, we would have to keep track of the previous `max_epochs`
and add to it manually. This keeps the infomation in one place.
Note that we need to set `_cleanly_stopped` back to True before saving the checkpoint.
This is done in `ModelManager.save_trainer()`.
"""
def __init__(self, num_epochs):
super().__init__()
self.num_epochs = num_epochs
def on_pretrain_routine_end(self, trainer, pl_module):
assert isinstance(pl_module, ReAgentLightningModule)
cleanly_stopped = pl_module._cleanly_stopped.item()
logger.info(f"cleanly stopped: {cleanly_stopped}")
if cleanly_stopped:
pl_module.increase_next_stopping_epochs(self.num_epochs)
def has_test_step_override(trainer_module: ReAgentLightningModule):
"""Detect if a subclass of LightningModule has test_step overridden"""
return type(trainer_module).test_step != pl.LightningModule.test_step
| 7,568 | 35.921951 | 128 | py |
ReAgent | ReAgent-master/reagent/training/c51_trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import List
import reagent.core.types as rlt
import torch
from reagent.core.configuration import resolve_defaults
from reagent.core.dataclasses import field
from reagent.core.parameters import RLParameters
from reagent.optimizer import Optimizer__Union, SoftUpdate
from reagent.training.reagent_lightning_module import ReAgentLightningModule
from reagent.training.rl_trainer_pytorch import RLTrainerMixin
class C51Trainer(RLTrainerMixin, ReAgentLightningModule):
"""
Implementation of 51 Categorical DQN (C51)
See https://arxiv.org/abs/1707.06887 for details
"""
@resolve_defaults
def __init__(
self,
q_network,
q_network_target,
actions: List[str] = field(default_factory=list), # noqa: B008
rl: RLParameters = field(default_factory=RLParameters), # noqa: B008
double_q_learning: bool = True,
minibatch_size: int = 1024,
minibatches_per_step: int = 1,
num_atoms: int = 51,
qmin: float = -100,
qmax: float = 200,
optimizer: Optimizer__Union = field( # noqa: B008
default_factory=Optimizer__Union.default
),
) -> None:
"""
Args:
q_network: states, action -> q-value
q_network_target: model that provides targets
actions(optional): list of agent's actions
rl (optional): an instance of the RLParameter class, which
defines relevant hyperparameters
double_q_learning (optional): whether or not double Q learning, enabled by default,
minibatch_size (optional): the size of the minibatch
minibatches_per_step (optional): the number of minibatch updates
per training step
num_atoms (optional): number of "canonical returns"in the discretized value distributions
qmin (optional): minimum q-value
qmax (optional): maximum q-value
optimizer (optional): the optimizer class and
optimizer hyperparameters for the q network(s) optimizer
"""
super().__init__()
self.double_q_learning = double_q_learning
self.minibatch_size = minibatch_size
self.minibatches_per_step = minibatches_per_step
self._actions = actions
self.q_network = q_network
self.q_network_target = q_network_target
self.q_network_optimizer = optimizer
self.qmin = qmin
self.qmax = qmax
self.num_atoms = num_atoms
self.rl_parameters = rl
self.register_buffer("support", None)
self.support = torch.linspace(self.qmin, self.qmax, self.num_atoms)
self.scale_support = (self.qmax - self.qmin) / (self.num_atoms - 1.0)
self.register_buffer("reward_boosts", None)
self.reward_boosts = torch.zeros([1, len(self._actions)])
if self.rl_parameters.reward_boost is not None:
# pyre-fixme[16]: Optional type has no attribute `keys`.
for k in self.rl_parameters.reward_boost.keys():
i = self._actions.index(k)
# pyre-fixme[16]: Optional type has no attribute `__getitem__`.
self.reward_boosts[0, i] = self.rl_parameters.reward_boost[k]
def configure_optimizers(self):
optimizers = [
self.q_network_optimizer.make_optimizer_scheduler(
self.q_network.parameters()
)
]
# soft-update
target_params = list(self.q_network_target.parameters())
source_params = list(self.q_network.parameters())
optimizers.append(
SoftUpdate.make_optimizer_scheduler(
target_params, source_params, tau=self.tau
)
)
return optimizers
def train_step_gen(self, training_batch: rlt.DiscreteDqnInput, batch_idx: int):
rewards = self.boost_rewards(training_batch.reward, training_batch.action)
discount_tensor = torch.full_like(rewards, self.gamma)
possible_next_actions_mask = training_batch.possible_next_actions_mask.float()
possible_actions_mask = training_batch.possible_actions_mask.float()
not_terminal = training_batch.not_terminal.float()
if self.use_seq_num_diff_as_time_diff:
assert self.multi_steps is None
discount_tensor = torch.pow(self.gamma, training_batch.time_diff.float())
if self.multi_steps is not None:
assert training_batch.step is not None
discount_tensor = torch.pow(self.gamma, training_batch.step.float())
next_dist = self.q_network_target.log_dist(training_batch.next_state).exp()
if self.maxq_learning:
# Select distribution corresponding to max valued action
if self.double_q_learning:
next_q_values = (
self.q_network.log_dist(training_batch.next_state).exp()
* self.support
).sum(2)
else:
next_q_values = (next_dist * self.support).sum(2)
next_action = self.argmax_with_mask(
next_q_values, possible_next_actions_mask
)
next_dist = next_dist[range(rewards.shape[0]), next_action.reshape(-1)]
else:
next_dist = (next_dist * training_batch.next_action.unsqueeze(-1)).sum(1)
# Build target distribution
target_Q = rewards + discount_tensor * not_terminal * self.support
target_Q = target_Q.clamp(self.qmin, self.qmax)
# rescale to indicies [0, 1, ..., N-1]
b = (target_Q - self.qmin) / self.scale_support
lo = b.floor().to(torch.int64)
up = b.ceil().to(torch.int64)
# handle corner cases of l == b == u
# without the following, it would give 0 signal, whereas we want
# m to add p(s_t+n, a*) to index l == b == u.
# So we artificially adjust l and u.
# (1) If 0 < l == u < N-1, we make l = l-1, so b-l = 1
# (2) If 0 == l == u, we make u = 1, so u-b=1
# (3) If l == u == N-1, we make l = N-2, so b-1 = 1
# This first line handles (1) and (3).
lo[(up > 0) * (lo == up)] -= 1
# Note: l has already changed, so the only way l == u is possible is
# if u == 0, in which case we let u = 1
# I don't even think we need the first condition in the next line
up[(lo < (self.num_atoms - 1)) * (lo == up)] += 1
# distribute the probabilities
# m_l = m_l + p(s_t+n, a*)(u - b)
# m_u = m_u + p(s_t+n, a*)(b - l)
m = torch.zeros_like(next_dist)
# pyre-fixme[16]: `Tensor` has no attribute `scatter_add_`.
m.scatter_add_(dim=1, index=lo, src=next_dist * (up.float() - b))
m.scatter_add_(dim=1, index=up, src=next_dist * (b - lo.float()))
log_dist = self.q_network.log_dist(training_batch.state)
# for reporting only
all_q_values = (log_dist.exp() * self.support).sum(2).detach()
model_action_idxs = self.argmax_with_mask(
all_q_values,
possible_actions_mask if self.maxq_learning else training_batch.action,
)
log_dist = (log_dist * training_batch.action.unsqueeze(-1)).sum(1)
loss = -(m * log_dist).sum(1).mean()
if batch_idx % self.trainer.log_every_n_steps == 0:
self.reporter.log(
td_loss=loss,
logged_actions=torch.argmax(training_batch.action, dim=1, keepdim=True),
logged_propensities=training_batch.extras.action_probability,
logged_rewards=rewards,
model_values=all_q_values,
model_action_idxs=model_action_idxs,
)
self.log("td_loss", loss, prog_bar=True)
yield loss
result = self.soft_update_result()
yield result
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def boost_rewards(
self, rewards: torch.Tensor, actions: torch.Tensor
) -> torch.Tensor:
# Apply reward boost if specified
reward_boosts = torch.sum(
actions.float() * self.reward_boosts, dim=1, keepdim=True
)
return rewards + reward_boosts
def argmax_with_mask(self, q_values, possible_actions_mask):
# Set q-values of impossible actions to a very large negative number.
q_values = q_values.reshape(possible_actions_mask.shape)
q_values = q_values + RLTrainerMixin.ACTION_NOT_POSSIBLE_VAL * (
1 - possible_actions_mask
)
return q_values.argmax(1)
| 8,784 | 41.033493 | 101 | py |
ReAgent | ReAgent-master/reagent/training/multi_stage_trainer.py | #!/usr/bin/env python3
import bisect
import functools
import itertools
from collections import OrderedDict
from typing import List, Dict, Tuple
import torch.nn as nn
from pytorch_lightning.loops.optimization.optimizer_loop import ClosureResult
from reagent.core.utils import lazy_property
from .reagent_lightning_module import ReAgentLightningModule
class MultiStageTrainer(ReAgentLightningModule):
def __init__(
self,
trainers: List[ReAgentLightningModule],
epochs: List[int],
assign_reporter_function=None,
flush_reporter_function=None,
automatic_optimization=True,
):
super().__init__(automatic_optimization=automatic_optimization)
# NB: wrapping in a ModuleList so the state can be saved
self._trainers = nn.ModuleList(trainers)
self._assign_reporter_function = assign_reporter_function
self._flush_reporter_function = (
functools.partial(flush_reporter_function, self)
if flush_reporter_function
else self._flush_reporter
)
self._in_testing_loop = False
# Cumulative sum of number of epochs up to the index (of trainers)
self._trainer_acc_epochs = [0] + epochs
for i in range(1, len(epochs) + 1):
self._trainer_acc_epochs[i] += self._trainer_acc_epochs[i - 1]
# Num of epochs for each trainer. Used to check if the sum of them
# equals to num_epochs used in pytorch-lightning trainer
self.trainer_epoch_mapping = OrderedDict()
for t, e in zip(trainers, epochs):
trainer_name = type(t).__name__
self.trainer_epoch_mapping[trainer_name] = e
@property
def multi_stage_total_epochs(self):
return self._trainer_acc_epochs[-1]
def set_reporter(self, reporter):
super().set_reporter(reporter)
if self._assign_reporter_function:
self._assign_reporter_function(self._trainers, reporter)
else:
# By default, assume CompoundReporter with the same
# number of reporters as trainers
assert len(self._trainers) == len(
reporter._reporters
), f"{len(self._trainers)} != {len(reporter._reporters)}"
for t, r in zip(self._trainers, reporter._reporters):
t.set_reporter(r)
@lazy_property
def _optimizer_step_to_trainer_idx(self) -> Dict[int, Tuple[int, int]]:
mapping = {}
offset = 0
for i, t in enumerate(self._trainers):
num_optimizing_steps = t._num_optimizing_steps
for j in range(num_optimizing_steps):
mapping[offset + j] = (i, offset)
offset += num_optimizing_steps
return mapping
def _flush_reporter(self, reporter, epoch):
"""
By default, assume CompoundReporter with the same
number of reporters as trainers
"""
if not self._in_testing_loop:
epoch_trainer_idx = self._get_trainer_idx_from_epoch()
reporter._reporters[epoch_trainer_idx].flush(epoch)
else:
for r in reporter._reporters:
r.flush(epoch)
def on_fit_start(self):
self._starting_epoch = self.trainer.current_epoch
# Connecting pl.Trainer to stage trainers
for t in self._trainers:
t.trainer = self.trainer
t.on_fit_start()
self.reporter.set_flush_function(self._flush_reporter_function)
def on_fit_end(self):
del self._starting_epoch
# Disconnecting
for t in self._trainers:
t.on_fit_end()
del t.trainer
self.reporter.set_flush_function(None)
def on_test_start(self):
self._starting_epoch = self.trainer.current_epoch
self._in_testing_loop = True
for t in self._trainers:
t.on_test_start()
def on_test_end(self):
del self._starting_epoch
self._in_testing_loop = False
for t in self._trainers:
t.on_test_end()
def _get_trainer_idx_from_epoch(self):
# Cycling through the trainers
epoch = (self.trainer.current_epoch - self._starting_epoch) % (
self._trainer_acc_epochs[-1]
)
trainer_idx = bisect.bisect_right(self._trainer_acc_epochs, epoch) - 1
return trainer_idx
def configure_optimizers(self):
# FIXME: Doesn't support LRScheduler yet
return list(
itertools.chain(*[t.configure_optimizers() for t in self._trainers])
)
def training_step(self, batch, batch_idx: int, optimizer_idx: int = 0):
trainer_idx, offset = self._optimizer_step_to_trainer_idx[optimizer_idx]
epoch_trainer_idx = self._get_trainer_idx_from_epoch()
assert (
trainer_idx == epoch_trainer_idx
), f"Got {trainer_idx}; expected {epoch_trainer_idx}"
return self._trainers[trainer_idx].training_step(
batch, batch_idx, optimizer_idx - offset
)
def training_epoch_end(self, outputs):
epoch_trainer_idx = self._get_trainer_idx_from_epoch()
self._trainers[epoch_trainer_idx].training_epoch_end(outputs)
def validation_step(self, *args, **kwargs):
epoch_trainer_idx = self._get_trainer_idx_from_epoch()
return self._trainers[epoch_trainer_idx].validation_step(*args, **kwargs)
def validation_epoch_end(self, outputs):
epoch_trainer_idx = self._get_trainer_idx_from_epoch()
self._trainers[epoch_trainer_idx].validation_epoch_end(outputs)
def test_step(self, *args, **kwargs):
return {
str(i): trainer.test_step(*args, **kwargs)
for i, trainer in enumerate(self._trainers)
}
def test_epoch_end(self, outputs):
for i, trainer in enumerate(self._trainers):
trainer.test_epoch_end([o[str(i)] for o in outputs])
def optimizer_step(
self,
epoch: int,
batch_idx: int,
optimizer,
optimizer_idx: int,
optimizer_closure,
on_tpu: int = False,
using_native_amp: int = False,
using_lbfgs: int = False,
):
assert epoch == self.trainer.current_epoch
epoch_trainer_idx = self._get_trainer_idx_from_epoch()
optimizer_trainer_idx, offset = self._optimizer_step_to_trainer_idx[
optimizer_idx
]
if epoch_trainer_idx == optimizer_trainer_idx:
# FIXME: epoch argument is not really correct
# Trainer will see the total epochs, including those epochs they
# are inactive.
self._trainers[epoch_trainer_idx].optimizer_step(
epoch,
batch_idx,
optimizer,
optimizer_idx - offset,
optimizer_closure,
on_tpu=on_tpu,
using_native_amp=using_native_amp,
using_lbfgs=using_lbfgs,
)
# FIXME: this is a hack around https://github.com/PyTorchLightning/pytorch-lightning/pull/9360
# which assumes that the optimizer closure will be consumed per training step invocation
# however this is not true in the multi-stage trainer as the training step is called for *all* of the
# optimizers configured under `trainers` even though only one lightning module is active at a given time
# A more robust solution would be to use manual optimization, where the lightning trainer does no inspection
# of the optimization closure for further processing
elif hasattr(optimizer_closure, "_result"):
optimizer_closure._result = ClosureResult(closure_loss=None)
| 7,726 | 36.509709 | 116 | py |
ReAgent | ReAgent-master/reagent/training/ppo_trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import inspect
import logging
from dataclasses import field
from typing import Dict, List, Optional, Union
import reagent.core.types as rlt
import torch
import torch.optim
from reagent.core.configuration import resolve_defaults
from reagent.gym.policies.policy import Policy
from reagent.models.base import ModelBase
from reagent.optimizer.union import Optimizer__Union
from reagent.training.reagent_lightning_module import ReAgentLightningModule
from reagent.training.utils import discounted_returns, whiten
logger = logging.getLogger(__name__)
class PPOTrainer(ReAgentLightningModule):
"""
Proximal Policy Optimization (PPO). See https://arxiv.org/pdf/1707.06347.pdf
This is the "clip" version of PPO. It does not include:
- KL divergence
- Bootstrapping with a critic model (our approach only works if full trajectories up to terminal state are fed in)
Optionally, a value network can be trained and used as a baseline for rewards.
"""
@resolve_defaults
def __init__(
self,
policy: Policy,
gamma: float = 0.9,
optimizer: Optimizer__Union = field( # noqa: B008
default_factory=Optimizer__Union.default
),
optimizer_value_net: Optimizer__Union = field( # noqa: B008
default_factory=Optimizer__Union.default
),
actions: List[str] = field(default_factory=list), # noqa: B008
reward_clip: float = 1e6,
normalize: bool = True,
subtract_mean: bool = True,
offset_clamp_min: bool = False,
update_freq: int = 1, # how many env steps between updates
update_epochs: int = 1, # how many epochs to run when updating (for PPO)
ppo_batch_size: int = 1, # batch size (number of trajectories) used for PPO updates
ppo_epsilon: float = 0.2, # clamp importance weights between 1-epsilon and 1+epsilon
entropy_weight: float = 0.0, # weight of the entropy term in the PPO loss
value_net: Optional[ModelBase] = None,
):
# PPO relies on customized update schemas, achieved by manual_backward()
super().__init__(automatic_optimization=False)
self.scorer = policy.scorer
self.sampler = policy.sampler
self.gamma = gamma
self.optimizer_value_net = optimizer_value_net
self.actions = actions
self.reward_clip = reward_clip
self.normalize = normalize
self.subtract_mean = subtract_mean
self.offset_clamp_min = offset_clamp_min
self.update_freq = update_freq
self.update_epochs = update_epochs
self.ppo_batch_size = ppo_batch_size
self.ppo_epsilon = ppo_epsilon
self.entropy_weight = entropy_weight
self.optimizer = optimizer
self.value_net = value_net
if value_net is not None:
self.value_loss_fn = torch.nn.MSELoss(reduction="mean")
assert (
not self.normalize
), "Can't apply a value baseline and normalize rewards simultaneously"
assert (ppo_epsilon >= 0) and (
ppo_epsilon <= 1
), "ppo_epslion has to be in [0;1]"
self.traj_buffer = []
def _trajectory_to_losses(
self, trajectory: rlt.PolicyGradientInput
) -> Dict[str, torch.Tensor]:
"""
Get a dict of losses for the trajectory. Dict always includes PPO loss.
If a value baseline is trained, a loss for the value network is also included.
"""
losses = {}
actions = trajectory.action
rewards = trajectory.reward.detach()
scorer_inputs = []
if inspect.getattr_static(trajectory, "graph", None) is not None:
# TODO: can this line be hit currently in ReAgent?
# GNN
scorer_inputs.append(trajectory.graph)
else:
scorer_inputs.append(trajectory.state)
if trajectory.possible_actions_mask is not None:
scorer_inputs.append(trajectory.possible_actions_mask)
scores = self.scorer(*scorer_inputs)
offset_reinforcement = discounted_returns(
torch.clamp(rewards, max=self.reward_clip).clone(), self.gamma
)
if self.normalize:
offset_reinforcement = whiten(
offset_reinforcement, subtract_mean=self.subtract_mean
)
if self.offset_clamp_min:
offset_reinforcement = offset_reinforcement.clamp(min=0)
if self.value_net is not None:
# subtract learned value function baselines from rewards
baselines = self.value_net(trajectory.state).squeeze()
# use reward-to-go as label for training the value function
losses["value_net_loss"] = self.value_loss_fn(
baselines, offset_reinforcement
)
# detach bcs we want PPO to tweak policy, not baseline
offset_reinforcement = offset_reinforcement - baselines.detach()
target_propensity = self.sampler.log_prob(scores, actions).float()
characteristic_eligibility = torch.exp(
target_propensity - trajectory.log_prob.detach()
).float()
losses["ppo_loss"] = -torch.min(
offset_reinforcement.float() @ characteristic_eligibility,
offset_reinforcement.float()
@ torch.clamp(
characteristic_eligibility,
1 - self.ppo_epsilon,
1 + self.ppo_epsilon,
),
)
if self.entropy_weight != 0:
entropy = self.sampler.entropy(scores)
# "-" bcs minimizing, not maximizing
losses["ppo_loss"] = losses["ppo_loss"] - self.entropy_weight * entropy
return losses
def configure_optimizers(self):
optimizers = []
# value net optimizer
if self.value_net is not None:
optimizers.append(
self.optimizer_value_net.make_optimizer_scheduler(
self.value_net.parameters()
)
)
# policy optimizer
optimizers.append(
self.optimizer.make_optimizer_scheduler(self.scorer.parameters())
)
return optimizers
def get_optimizers(self):
opts = self.optimizers()
if self.value_net is not None:
return opts[0], opts[1]
return None, opts[0]
# pyre-fixme[14]: `training_step` overrides method defined in
# `ReAgentLightningModule` inconsistently.
def training_step(
self,
training_batch: Union[rlt.PolicyGradientInput, Dict[str, torch.Tensor]],
batch_idx: int,
):
if isinstance(training_batch, dict):
training_batch = rlt.PolicyGradientInput.from_dict(training_batch)
self.traj_buffer.append(training_batch)
if len(self.traj_buffer) == self.update_freq:
self.update_model()
def update_model(self):
assert (
len(self.traj_buffer) == self.update_freq
), "trajectory buffer does not have sufficient samples for model_update"
for _ in range(self.update_epochs):
# iterate through minibatches of PPO updates in random order
random_order = torch.randperm(len(self.traj_buffer))
for i in range(0, len(self.traj_buffer), self.ppo_batch_size):
idx = random_order[i : i + self.ppo_batch_size]
training_batch_list = [self.traj_buffer[i] for i in idx]
self._update_model(training_batch_list)
self.traj_buffer = [] # empty the buffer
def _update_model(self, training_batch_list: List[rlt.PolicyGradientInput]):
losses = {
"ppo_loss": [],
"value_net_loss": [],
}
value_net_opt, ppo_opt = self.get_optimizers()
for traj in training_batch_list:
loss = self._trajectory_to_losses(traj)
for k, v in loss.items():
losses[k].append(v)
if self.value_net is not None:
# TD loss for the baseline value network
value_net_loss = torch.stack(losses["value_net_loss"]).sum()
value_net_opt.zero_grad()
self.manual_backward(value_net_loss)
value_net_opt.step()
# PPO "loss" for the policy network
ppo_loss = torch.stack(losses["ppo_loss"]).sum()
ppo_opt.zero_grad()
self.manual_backward(ppo_loss)
ppo_opt.step()
| 8,551 | 38.410138 | 118 | py |
ReAgent | ReAgent-master/reagent/training/dqn_trainer_base.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from typing import List, Optional
import reagent.core.types as rlt
import torch
import torch.nn.functional as F
from reagent.core.parameters import EvaluationParameters, RLParameters
from reagent.core.torch_utils import masked_softmax
from reagent.evaluation.evaluation_data_page import EvaluationDataPage
from reagent.evaluation.evaluator import Evaluator
from reagent.optimizer import Optimizer__Union
from reagent.training.reagent_lightning_module import ReAgentLightningModule
from reagent.training.rl_trainer_pytorch import RLTrainerMixin
logger = logging.getLogger(__name__)
class DQNTrainerMixin:
# Q-value for action that is not possible. Guaranteed to be worse than any
# legitimate action
ACTION_NOT_POSSIBLE_VAL = -1e9
def get_max_q_values(self, q_values, possible_actions_mask):
return self.get_max_q_values_with_target(
q_values, q_values, possible_actions_mask
)
def get_max_q_values_with_target(
self, q_values, q_values_target, possible_actions_mask
):
"""
Used in Q-learning update.
:param q_values: PyTorch tensor with shape (batch_size, action_dim). Each row
contains the list of Q-values for each possible action in this state.
:param q_values_target: PyTorch tensor with shape (batch_size, action_dim). Each row
contains the list of Q-values from the target network
for each possible action in this state.
:param possible_actions_mask: PyTorch tensor with shape (batch_size, action_dim).
possible_actions[i][j] = 1 iff the agent can take action j from
state i.
Returns a tensor of maximum Q-values for every state in the batch
and also the index of the corresponding action (which is used in
evaluation_data_page.py, in create_from_tensors_dqn()).
"""
# The parametric DQN can create flattened q values so we reshape here.
q_values = q_values.reshape(possible_actions_mask.shape)
q_values_target = q_values_target.reshape(possible_actions_mask.shape)
# Set q-values of impossible actions to a very large negative number.
inverse_pna = 1 - possible_actions_mask
impossible_action_penalty = self.ACTION_NOT_POSSIBLE_VAL * inverse_pna
q_values = q_values + impossible_action_penalty
q_values_target = q_values_target + impossible_action_penalty
if self.double_q_learning:
# Use indices of the max q_values from the online network to select q-values
# from the target network. This prevents overestimation of q-values.
# The torch.gather function selects the entry from each row that corresponds
# to the max_index in that row.
max_q_values, max_indicies = torch.max(q_values, dim=1, keepdim=True)
max_q_values_target = torch.gather(q_values_target, 1, max_indicies)
else:
max_q_values_target, max_indicies = torch.max(
q_values_target, dim=1, keepdim=True
)
return max_q_values_target, max_indicies
class DQNTrainerBaseLightning(DQNTrainerMixin, RLTrainerMixin, ReAgentLightningModule):
def __init__(
self,
rl_parameters: RLParameters,
metrics_to_score=None,
actions: Optional[List[str]] = None,
evaluation_parameters: Optional[EvaluationParameters] = None,
):
super().__init__()
self.rl_parameters = rl_parameters
self.time_diff_unit_length = rl_parameters.time_diff_unit_length
self.tensorboard_logging_freq = rl_parameters.tensorboard_logging_freq
self.calc_cpe_in_training = (
evaluation_parameters and evaluation_parameters.calc_cpe_in_training
)
assert actions is not None
self._actions: List[str] = actions
if rl_parameters.q_network_loss == "mse":
self.q_network_loss = F.mse_loss
elif rl_parameters.q_network_loss == "huber":
self.q_network_loss = F.smooth_l1_loss
else:
raise Exception(
"Q-Network loss type {} not valid loss.".format(
rl_parameters.q_network_loss
)
)
if metrics_to_score:
self.metrics_to_score = metrics_to_score + ["reward"]
else:
self.metrics_to_score = ["reward"]
def _check_input(self, training_batch: rlt.DiscreteDqnInput):
assert isinstance(training_batch, rlt.DiscreteDqnInput)
assert training_batch.not_terminal.dim() == training_batch.reward.dim() == 2
assert (
training_batch.not_terminal.shape[1] == training_batch.reward.shape[1] == 1
)
assert training_batch.action.dim() == training_batch.next_action.dim() == 2
assert (
training_batch.action.shape[1]
== training_batch.next_action.shape[1]
== self.num_actions
)
if torch.logical_and(
training_batch.possible_next_actions_mask.float().sum(dim=1) == 0,
training_batch.not_terminal.squeeze().bool(),
).any():
# make sure there's no non-terminal state with no possible next actions
raise ValueError(
"No possible next actions. Should the environment have terminated?"
)
@property
def num_actions(self) -> int:
assert self._actions is not None, "Not a discrete action DQN"
return len(self._actions)
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def boost_rewards(
self, rewards: torch.Tensor, actions: torch.Tensor
) -> torch.Tensor:
# Apply reward boost if specified
reward_boosts = torch.sum(
actions.float() * self.reward_boosts,
dim=1,
keepdim=True,
)
return rewards + reward_boosts
def _initialize_cpe(
self,
reward_network,
q_network_cpe,
q_network_cpe_target,
optimizer: Optimizer__Union,
) -> None:
if not self.calc_cpe_in_training:
# pyre-fixme[16]: `DQNTrainerBase` has no attribute `reward_network`.
self.reward_network = None
return
assert reward_network is not None, "reward_network is required for CPE"
self.reward_network = reward_network
# pyre-fixme[16]: `DQNTrainerBase` has no attribute `reward_network_optimizer`.
self.reward_network_optimizer = optimizer
assert (
q_network_cpe is not None and q_network_cpe_target is not None
), "q_network_cpe and q_network_cpe_target are required for CPE"
# pyre-fixme[16]: `DQNTrainerBase` has no attribute `q_network_cpe`.
self.q_network_cpe = q_network_cpe
# pyre-fixme[16]: `DQNTrainerBase` has no attribute `q_network_cpe_target`.
self.q_network_cpe_target = q_network_cpe_target
# pyre-fixme[16]: `DQNTrainerBase` has no attribute `q_network_cpe_optimizer`.
self.q_network_cpe_optimizer = optimizer
num_output_nodes = len(self.metrics_to_score) * self.num_actions
reward_idx_offsets = torch.arange(
0,
num_output_nodes,
self.num_actions,
dtype=torch.long,
)
self.register_buffer("reward_idx_offsets", reward_idx_offsets)
reward_stripped_metrics_to_score = (
self.metrics_to_score[:-1] if len(self.metrics_to_score) > 1 else None
)
# pyre-fixme[16]: `DQNTrainerBase` has no attribute `evaluator`.
self.evaluator = Evaluator(
self._actions,
self.rl_parameters.gamma,
self,
metrics_to_score=reward_stripped_metrics_to_score,
)
def _configure_cpe_optimizers(self):
target_params = list(self.q_network_cpe_target.parameters())
source_params = list(self.q_network_cpe.parameters())
# TODO: why is reward net commented out?
# source_params += list(self.reward_network.parameters())
optimizers = []
optimizers.append(
self.reward_network_optimizer.make_optimizer_scheduler(
self.reward_network.parameters()
)
)
optimizers.append(
self.q_network_cpe_optimizer.make_optimizer_scheduler(
self.q_network_cpe.parameters()
)
)
return target_params, source_params, optimizers
def _calculate_cpes(
self,
training_batch,
states,
next_states,
all_action_scores,
all_next_action_scores,
logged_action_idxs,
discount_tensor,
not_done_mask,
):
if not self.calc_cpe_in_training:
return
if training_batch.extras.metrics is None:
metrics_reward_concat_real_vals = training_batch.reward
else:
metrics_reward_concat_real_vals = torch.cat(
(training_batch.reward, training_batch.extras.metrics), dim=1
)
model_propensities_next_states = masked_softmax(
all_next_action_scores,
training_batch.possible_next_actions_mask
if self.maxq_learning
else training_batch.next_action,
self.rl_temperature,
)
######### Train separate reward network for CPE evaluation #############
reward_estimates = self.reward_network(states)
reward_estimates_for_logged_actions = reward_estimates.gather(
1, self.reward_idx_offsets + logged_action_idxs
)
reward_loss = F.mse_loss(
reward_estimates_for_logged_actions, metrics_reward_concat_real_vals
)
yield reward_loss
######### Train separate q-network for CPE evaluation #############
metric_q_values = self.q_network_cpe(states).gather(
1, self.reward_idx_offsets + logged_action_idxs
)
all_metrics_target_q_values = torch.chunk(
self.q_network_cpe_target(next_states).detach(),
len(self.metrics_to_score),
dim=1,
)
target_metric_q_values = []
for i, per_metric_target_q_values in enumerate(all_metrics_target_q_values):
per_metric_next_q_values = torch.sum(
per_metric_target_q_values * model_propensities_next_states,
1,
keepdim=True,
)
per_metric_next_q_values = per_metric_next_q_values * not_done_mask
per_metric_target_q_values = metrics_reward_concat_real_vals[
:, i : i + 1
] + (discount_tensor * per_metric_next_q_values)
target_metric_q_values.append(per_metric_target_q_values)
target_metric_q_values = torch.cat(target_metric_q_values, dim=1)
metric_q_value_loss = self.q_network_loss(
metric_q_values, target_metric_q_values
)
# The model_propensities computed below are not used right now. The CPE graphs in the Outputs
# tab use model_propensities computed in the function create_from_tensors_dqn() in evaluation_data_page.py,
# which is called on the eval_table_sample in the gather_eval_data() function below.
model_propensities = masked_softmax(
all_action_scores,
training_batch.possible_actions_mask
if self.maxq_learning
else training_batch.action,
self.rl_temperature,
)
# Extract rewards predicted by the reward_network. The other columns will
# give predicted values for other metrics, if such were specified.
model_rewards = reward_estimates[
:,
torch.arange(
self.reward_idx_offsets[0],
self.reward_idx_offsets[0] + self.num_actions,
),
]
self.reporter.log(
reward_loss=reward_loss,
model_propensities=model_propensities,
model_rewards=model_rewards,
)
yield metric_q_value_loss
def gather_eval_data(self, validation_step_outputs):
was_on_gpu = self.on_gpu
self.cpu()
eval_data = None
for edp in validation_step_outputs:
if eval_data is None:
eval_data = edp
else:
eval_data = eval_data.append(edp)
if eval_data and eval_data.mdp_id is not None:
eval_data = eval_data.sort()
eval_data = eval_data.compute_values(self.gamma)
eval_data.validate()
if was_on_gpu:
self.cuda()
return eval_data
def validation_step(self, batch, batch_idx):
if isinstance(batch, dict):
batch = rlt.DiscreteDqnInput.from_dict(batch)
# HACK: Move to cpu in order to hold more batches in memory
# This is only needed when trainers need in-memory
# EvaluationDataPages of the full evaluation dataset
return EvaluationDataPage.create_from_training_batch(batch, self).cpu()
def validation_epoch_end(self, valid_step_outputs):
# As explained in the comments to the validation_step function in
# pytorch_lightning/core/lightning.py, this function is generally used as follows:
# val_outs = []
# for val_batch in val_data:
# out = validation_step(val_batch)
# val_outs.append(out)
# validation_epoch_end(val_outs)
# The input arguments of validation_epoch_end() is a list of EvaluationDataPages,
# which matches the way it is used in gather_eval_data() above.
eval_data = self.gather_eval_data(valid_step_outputs)
if eval_data and eval_data.mdp_id is not None:
cpe_details = self.evaluator.evaluate_post_training(eval_data)
self.reporter.log(cpe_details=cpe_details)
| 14,123 | 39.469914 | 115 | py |
ReAgent | ReAgent-master/reagent/training/slate_q_trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import enum
import logging
from typing import Optional
import reagent.core.parameters as rlp
import reagent.core.types as rlt
import torch
import torch.nn.functional as F
from reagent.core.dataclasses import field
from reagent.optimizer import Optimizer__Union, SoftUpdate
from reagent.training.reagent_lightning_module import ReAgentLightningModule
from reagent.training.rl_trainer_pytorch import RLTrainerMixin
logger = logging.getLogger(__name__)
class NextSlateValueNormMethod(enum.Enum):
"""
The Q value of the current slate item is the sum of the item's short-term reward and
the normalized sum of all item Q-values on the next slate.
We can normalize the sum by either the current slate size (NORM_BY_CURRENT_SLATE_SIZE)
or the next slate size (NORM_BY_NEXT_SLATE_SIZE).
This enum distinguishes between these two different ways of normalizing the next slate value.
"""
NORM_BY_CURRENT_SLATE_SIZE = "norm_by_current_slate_size"
NORM_BY_NEXT_SLATE_SIZE = "norm_by_next_slate_size"
class SlateQTrainer(RLTrainerMixin, ReAgentLightningModule):
def __init__(
self,
q_network,
q_network_target,
slate_size,
# Start SlateQTrainerParameters
rl: rlp.RLParameters = field( # noqa: B008
default_factory=lambda: rlp.RLParameters(maxq_learning=False)
),
optimizer: Optimizer__Union = field( # noqa: B008
default_factory=Optimizer__Union.default
),
slate_opt_parameters: Optional[rlp.SlateOptParameters] = None,
discount_time_scale: Optional[float] = None,
single_selection: bool = True,
next_slate_value_norm_method: NextSlateValueNormMethod = NextSlateValueNormMethod.NORM_BY_CURRENT_SLATE_SIZE,
minibatch_size: int = 1024,
evaluation: rlp.EvaluationParameters = field( # noqa: B008
default_factory=lambda: rlp.EvaluationParameters(calc_cpe_in_training=False)
),
) -> None:
"""
Args:
q_network: states, action -> q-value
slate_size(int): a fixed slate size
rl (optional): an instance of the RLParameter class, which
defines relevant hyperparameters
optimizer (optional): the optimizer class and
optimizer hyperparameters for the q network(s) optimizer
discount_time_scale (optional): use to control the discount factor (gamma)
relative to the time difference (t2-t1), i.e., gamma^((t2-t1)/time_scale).
If it is absent, we won't adjust the discount factor by the time difference.
single_selection (optional): TBD
next_slate_value_norm_method (optional): how to calculate the next slate value
when single_selection is False. By default we use NORM_BY_CURRENT_SLATE_SIZE.
minibatch_size (optional): the size of the minibatch
evaluation (optional): TBD
"""
super().__init__()
self.rl_parameters = rl
self.discount_time_scale = discount_time_scale
self.single_selection = single_selection
self.next_slate_value_norm_method = next_slate_value_norm_method
self.q_network = q_network
self.q_network_target = q_network_target
self.q_network_optimizer = optimizer
self.slate_size = slate_size
self.slate_opt_parameters = slate_opt_parameters
def configure_optimizers(self):
optimizers = []
optimizers.append(
self.q_network_optimizer.make_optimizer_scheduler(
self.q_network.parameters()
)
)
target_params = list(self.q_network_target.parameters())
source_params = list(self.q_network.parameters())
optimizers.append(
SoftUpdate.make_optimizer_scheduler(
target_params, source_params, tau=self.tau
)
)
return optimizers
def _action_docs(
self,
state: rlt.FeatureData,
action: torch.Tensor,
terminal_mask: Optional[torch.Tensor] = None,
) -> rlt.DocList:
# for invalid indices, simply set action to 0 so we can batch index still
if terminal_mask is not None:
assert terminal_mask.shape == (
action.shape[0],
), f"{terminal_mask.shape} != 0th dim of {action.shape}"
action[terminal_mask] = torch.zeros_like(action[terminal_mask])
docs = state.candidate_docs
assert docs is not None
return docs.select_slate(action)
def _get_unmasked_q_values(
self, q_network, state: rlt.FeatureData, slate: rlt.DocList
) -> torch.Tensor:
"""Gets the q values from the model and target networks"""
batch_size, slate_size, _ = slate.float_features.shape
# TODO: Probably should create a new model type
return q_network(
state.repeat_interleave(slate_size, dim=0), slate.as_feature_data()
).view(batch_size, slate_size)
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def _get_maxq_next_action(self, next_state: rlt.FeatureData) -> torch.Tensor:
"""Get the next action list based on the slate optimization strategy."""
slate_opt_parameters = self.slate_opt_parameters
assert slate_opt_parameters is not None
if slate_opt_parameters.method == rlp.SlateOptMethod.TOP_K:
return self._get_maxq_topk(next_state)
else:
raise NotImplementedError(
"SlateQ with optimization method other than TOP_K is not implemented."
)
def _get_maxq_topk(self, next_state: rlt.FeatureData) -> torch.Tensor:
candidate_docs = next_state.candidate_docs
assert candidate_docs is not None
batch_size, num_candidates, _ = candidate_docs.float_features.shape
assert 0 < self.slate_size <= num_candidates
docs = candidate_docs.select_slate(
torch.arange(num_candidates).repeat(batch_size, 1)
)
next_q_values = self._get_unmasked_q_values(
self.q_network_target, next_state, docs
) * self._get_docs_value(docs)
_, next_actions = torch.topk(next_q_values, self.slate_size, dim=1)
return next_actions
def _get_docs_value(self, docs: rlt.DocList) -> torch.Tensor:
# Multiplying by the mask to filter out selected padding items.
value = docs.value * docs.mask
if self.single_selection:
value = F.softmax(value, dim=1)
return value
def _get_slate_size(self, state: rlt.FeatureData) -> torch.Tensor:
"""Get the actual size (ignore all padded items) of each slate by summing item masks."""
mask = self._get_item_mask(state)
return torch.minimum(
mask.sum(1, keepdim=True),
torch.tensor([self.slate_size], device=mask.device),
)
def _get_item_mask(self, state: rlt.FeatureData) -> torch.Tensor:
"""Get the mask from the given state."""
candidate_docs = state.candidate_docs
assert candidate_docs is not None
return candidate_docs.mask
def _get_avg_by_slate_size(self, batch: rlt.SlateQInput):
"""Get the slate_size for averaging the sum of slate value."""
if (
self.next_slate_value_norm_method
== NextSlateValueNormMethod.NORM_BY_NEXT_SLATE_SIZE
):
return self._get_slate_size(batch.next_state)
if (
self.next_slate_value_norm_method
== NextSlateValueNormMethod.NORM_BY_CURRENT_SLATE_SIZE
):
return self._get_slate_size(batch.state)
raise NotImplementedError(
f"The next_slate_value_norm_method {self.next_slate_value_norm_method} has not been implemented"
)
def train_step_gen(self, training_batch: rlt.SlateQInput, batch_idx: int):
assert isinstance(
training_batch, rlt.SlateQInput
), f"learning input is a {type(training_batch)}"
reward = training_batch.reward
reward_mask = training_batch.reward_mask
discount_tensor = torch.full_like(reward, self.gamma)
# Adjust the discount factor by the time_diff if the discount_time_scale is provided,
# and the time_diff exists in the training_batch.
if self.discount_time_scale and training_batch.time_diff is not None:
discount_tensor = discount_tensor ** (
training_batch.time_diff / self.discount_time_scale
)
next_action = (
self._get_maxq_next_action(training_batch.next_state)
if self.rl_parameters.maxq_learning
else training_batch.next_action
)
terminal_mask = (training_batch.not_terminal.to(torch.bool) == False).squeeze(1)
next_action_docs = self._action_docs(
training_batch.next_state,
next_action,
terminal_mask=terminal_mask,
)
next_q_values = torch.sum(
self._get_unmasked_q_values(
self.q_network_target,
training_batch.next_state,
next_action_docs,
)
* self._get_docs_value(next_action_docs),
dim=1,
keepdim=True,
)
# If not single selection, divide max-Q by the actual slate size.
if not self.single_selection:
next_q_values = next_q_values / self._get_avg_by_slate_size(training_batch)
filtered_max_q_vals = next_q_values * training_batch.not_terminal.float()
target_q_values = reward + (discount_tensor * filtered_max_q_vals)
# Don't mask if not single selection
if self.single_selection:
target_q_values = target_q_values[reward_mask]
# Get Q-value of action taken
action_docs = self._action_docs(training_batch.state, training_batch.action)
q_values = self._get_unmasked_q_values(
self.q_network, training_batch.state, action_docs
)
if self.single_selection:
q_values = q_values[reward_mask]
all_action_scores = q_values.detach()
value_loss = F.mse_loss(q_values, target_q_values)
yield value_loss
if not self.single_selection:
all_action_scores = all_action_scores.sum(dim=1, keepdim=True)
# Logging at the end to schedule all the cuda operations first
self.reporter.log(
td_loss=value_loss,
model_values_on_logged_actions=all_action_scores,
)
# Use the soft update rule to update the target networks
result = self.soft_update_result()
self.log("td_loss", value_loss, prog_bar=True)
yield result
| 10,984 | 38.800725 | 117 | py |
ReAgent | ReAgent-master/reagent/training/imitator_training.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import torch
logger = logging.getLogger(__name__)
def get_valid_actions_from_imitator(imitator, input, drop_threshold):
"""Create mask for non-viable actions under the imitator."""
if isinstance(imitator, torch.nn.Module):
# pytorch model
imitator_outputs = imitator(input.float_features)
on_policy_action_probs = torch.nn.functional.softmax(imitator_outputs, dim=1)
else:
# sci-kit learn model
on_policy_action_probs = torch.tensor(imitator(input.float_features.cpu()))
filter_values = (
on_policy_action_probs / on_policy_action_probs.max(keepdim=True, dim=1)[0]
)
return (filter_values >= drop_threshold).float()
| 804 | 31.2 | 85 | py |
ReAgent | ReAgent-master/reagent/training/reward_network_trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from enum import Enum
from typing import Optional
import numpy as np
import reagent.core.types as rlt
import torch
from reagent.core.dataclasses import field
from reagent.models.base import ModelBase
from reagent.optimizer.union import Optimizer__Union
from reagent.training.reagent_lightning_module import ReAgentLightningModule
logger = logging.getLogger(__name__)
class LossFunction(Enum):
MSE = "MSE_Loss"
SmoothL1Loss = "SmoothL1_Loss"
L1Loss = "L1_Loss"
BCELoss = "BCE_Loss"
def _get_loss_function(
loss_fn: LossFunction,
reward_ignore_threshold: Optional[float],
weighted_by_inverse_propensity: bool,
):
reduction_type = "none"
if loss_fn == LossFunction.MSE:
torch_fn = torch.nn.MSELoss(reduction=reduction_type)
elif loss_fn == LossFunction.SmoothL1Loss:
torch_fn = torch.nn.SmoothL1Loss(reduction=reduction_type)
elif loss_fn == LossFunction.L1Loss:
torch_fn = torch.nn.L1Loss(reduction=reduction_type)
elif loss_fn == LossFunction.BCELoss:
torch_fn = torch.nn.BCELoss(reduction=reduction_type)
def wrapper_loss_fn(pred, target, weight):
loss = torch_fn(pred, target)
if weighted_by_inverse_propensity:
assert weight.shape == loss.shape
loss = loss * weight
# ignore abnormal reward only during training
if pred.requires_grad and reward_ignore_threshold is not None:
loss = loss[target <= reward_ignore_threshold]
assert len(loss) > 0, (
f"reward ignore threshold set too small. target={target}, "
f"threshold={reward_ignore_threshold}"
)
return torch.mean(loss)
return wrapper_loss_fn
class RewardNetTrainer(ReAgentLightningModule):
def __init__(
self,
reward_net: ModelBase,
optimizer: Optimizer__Union = field( # noqa: B008
default_factory=Optimizer__Union.default
),
loss_type: LossFunction = LossFunction.MSE,
reward_ignore_threshold: Optional[float] = None,
weighted_by_inverse_propensity: bool = False,
) -> None:
super().__init__()
self.reward_net = reward_net
self.optimizer = optimizer
self.loss_type = loss_type
self.reward_ignore_threshold = reward_ignore_threshold
self.weighted_by_inverse_propensity = weighted_by_inverse_propensity
self.loss_fn = _get_loss_function(
loss_type, reward_ignore_threshold, weighted_by_inverse_propensity
)
def configure_optimizers(self):
optimizers = []
optimizers.append(
self.optimizer.make_optimizer_scheduler(self.reward_net.parameters())
)
return optimizers
def _get_sample_weight(self, batch: rlt.PreprocessedRankingInput):
weight = None
if self.weighted_by_inverse_propensity:
if isinstance(batch, rlt.PreprocessedRankingInput):
assert batch.tgt_out_probs is not None
weight = 1.0 / batch.tgt_out_probs
else:
raise NotImplementedError(
f"Sampling weighting not implemented for {type(batch)}"
)
return weight
def _get_target_reward(self, batch: rlt.PreprocessedRankingInput):
if isinstance(batch, rlt.PreprocessedRankingInput):
target_reward = batch.slate_reward
else:
target_reward = batch.reward
assert target_reward is not None
return target_reward
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def _compute_unweighted_loss(
self, predicted_reward: torch.Tensor, target_reward: torch.Tensor
):
return self.loss_fn(
predicted_reward, target_reward, weight=torch.ones_like(predicted_reward)
)
def train_step_gen(
self, training_batch: rlt.PreprocessedRankingInput, batch_idx: int
):
weight = self._get_sample_weight(training_batch)
target_reward = self._get_target_reward(training_batch)
predicted_reward = self.reward_net(training_batch).predicted_reward
assert (
predicted_reward.shape == target_reward.shape
and len(target_reward.shape) == 2
and target_reward.shape[1] == 1
)
loss = self.loss_fn(predicted_reward, target_reward, weight)
detached_loss = loss.detach().cpu()
self.reporter.log(loss=detached_loss)
if weight is not None:
unweighted_loss = self._compute_unweighted_loss(
predicted_reward, target_reward
)
self.reporter.log(unweighted_loss=unweighted_loss)
if self.all_batches_processed % 10 == 0:
logger.info(
f"{self.all_batches_processed}-th batch: "
f"{self.loss_type}={detached_loss.item()}"
)
yield loss
# pyre-ignore inconsistent override because lightning doesn't use types
def validation_step(self, batch: rlt.PreprocessedRankingInput, batch_idx: int):
reward = self._get_target_reward(batch)
self.reporter.log(eval_rewards=reward.flatten().detach().cpu())
pred_reward = self.reward_net(batch).predicted_reward
self.reporter.log(eval_pred_rewards=pred_reward.flatten().detach().cpu())
weight = self._get_sample_weight(batch)
loss = self.loss_fn(pred_reward, reward, weight)
detached_loss = loss.detach().cpu()
self.reporter.log(eval_loss=detached_loss)
if weight is not None:
unweighted_loss = self._compute_unweighted_loss(pred_reward, reward)
self.reporter.log(eval_unweighted_loss=unweighted_loss)
return detached_loss.item()
def validation_epoch_end(self, outputs):
self.reporter.update_best_model(np.mean(outputs), self.reward_net)
def warm_start_components(self):
return ["reward_net"]
| 6,165 | 34.034091 | 85 | py |
ReAgent | ReAgent-master/reagent/training/dqn_trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from typing import List, Optional, Tuple
import reagent.core.types as rlt
import torch
from reagent.core.configuration import resolve_defaults
from reagent.core.dataclasses import dataclass, field
from reagent.core.parameters import EvaluationParameters, RLParameters
from reagent.optimizer import Optimizer__Union, SoftUpdate
from reagent.training.dqn_trainer_base import DQNTrainerBaseLightning
from reagent.training.imitator_training import get_valid_actions_from_imitator
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class BCQConfig:
# 0 = max q-learning, 1 = imitation learning
drop_threshold: float = 0.1
class DQNTrainer(DQNTrainerBaseLightning):
@resolve_defaults
def __init__(
self,
q_network,
q_network_target,
reward_network,
q_network_cpe=None,
q_network_cpe_target=None,
metrics_to_score=None,
evaluation: EvaluationParameters = field( # noqa: B008
default_factory=EvaluationParameters
),
imitator=None,
# Start DQNTrainerParameters. All parameters above should be
# in the blocklist for DQNTrainerParameters in parameters.py
actions: List[str] = field(default_factory=list), # noqa: B008
rl: RLParameters = field(default_factory=RLParameters), # noqa: B008
double_q_learning: bool = True,
bcq: Optional[BCQConfig] = None,
minibatch_size: int = 1024,
minibatches_per_step: int = 1,
optimizer: Optimizer__Union = field( # noqa: B008
default_factory=Optimizer__Union.default
),
) -> None:
"""
Args:
q_network: states -> q-value for each action
q_network_target: copy of q-network for training stability
reward_network: states -> reward for each action
q_network_cpe:
q_network_cpe_target:
metrics_to_score:
imitator (optional): The behavior policy, used for BCQ training
actions: list of action names
rl: RLParameters
double_q_learning: boolean flag to use double-q learning
bcq: a config file for batch-constrained q-learning, defaults to normal
minibatch_size: samples per minibatch
minibatches_per_step: minibatch updates per step
optimizer: q-network optimizer
evaluation: evaluation params, primarily whether to use CPE in eval or not
"""
super().__init__(
rl,
metrics_to_score=metrics_to_score,
actions=actions,
evaluation_parameters=evaluation,
)
assert self._actions is not None, "Discrete-action DQN needs action names"
self.double_q_learning = double_q_learning
self.minibatch_size = minibatch_size
self.minibatches_per_step = minibatches_per_step or 1
self.q_network = q_network
self.q_network_target = q_network_target
self.q_network_optimizer = optimizer
self._initialize_cpe(
reward_network, q_network_cpe, q_network_cpe_target, optimizer=optimizer
)
reward_boosts = torch.zeros([1, len(self._actions)])
if rl.reward_boost is not None:
# pyre-fixme[16]: `Optional` has no attribute `keys`.
for k in rl.reward_boost.keys():
i = self._actions.index(k)
# pyre-fixme[16]: `Optional` has no attribute `__getitem__`.
reward_boosts[0, i] = rl.reward_boost[k]
self.register_buffer("reward_boosts", reward_boosts)
# Batch constrained q-learning
self.bcq = bcq is not None
if self.bcq:
assert bcq is not None
self.bcq_drop_threshold = bcq.drop_threshold
self.bcq_imitator = imitator
def configure_optimizers(self):
optimizers = []
target_params = list(self.q_network_target.parameters())
source_params = list(self.q_network.parameters())
optimizers.append(
self.q_network_optimizer.make_optimizer_scheduler(
self.q_network.parameters()
)
)
if self.calc_cpe_in_training:
(
cpe_target_params,
cpe_source_params,
cpe_optimizers,
) = self._configure_cpe_optimizers()
target_params += cpe_target_params
source_params += cpe_source_params
optimizers += cpe_optimizers
optimizers.append(
SoftUpdate.make_optimizer_scheduler(
target_params, source_params, tau=self.tau
)
)
return optimizers
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def get_detached_model_outputs(
self, state
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Gets the q values from the model and target networks"""
q_values = self.q_network(state)
q_values_target = self.q_network_target(state)
return q_values, q_values_target
def compute_discount_tensor(
self, batch: rlt.DiscreteDqnInput, boosted_rewards: torch.Tensor
):
discount_tensor = torch.full_like(boosted_rewards, self.gamma)
if self.use_seq_num_diff_as_time_diff:
assert self.multi_steps is None
discount_tensor = torch.pow(self.gamma, batch.time_diff.float())
if self.multi_steps is not None:
assert batch.step is not None
discount_tensor = torch.pow(self.gamma, batch.step.float())
return discount_tensor
def compute_td_loss(
self,
batch: rlt.DiscreteDqnInput,
boosted_rewards: torch.Tensor,
discount_tensor: torch.Tensor,
):
not_done_mask = batch.not_terminal.float()
all_next_q_values, all_next_q_values_target = self.get_detached_model_outputs(
batch.next_state
)
if self.maxq_learning:
# Compute max a' Q(s', a') over all possible actions using target network
possible_next_actions_mask = batch.possible_next_actions_mask.float()
if self.bcq:
action_on_policy = get_valid_actions_from_imitator(
self.bcq_imitator,
batch.next_state,
self.bcq_drop_threshold,
)
possible_next_actions_mask *= action_on_policy
next_q_values, max_q_action_idxs = self.get_max_q_values_with_target(
all_next_q_values,
all_next_q_values_target,
possible_next_actions_mask,
)
else:
# SARSA
next_q_values, max_q_action_idxs = self.get_max_q_values_with_target(
all_next_q_values,
all_next_q_values_target,
batch.next_action,
)
filtered_next_q_vals = next_q_values * not_done_mask
target_q_values = boosted_rewards + (discount_tensor * filtered_next_q_vals)
# Get Q-value of action taken
all_q_values = self.q_network(batch.state)
# pyre-fixme[16]: `DQNTrainer` has no attribute `all_action_scores`.
self.all_action_scores = all_q_values.detach()
q_values = torch.sum(all_q_values * batch.action, 1, keepdim=True)
td_loss = self.q_network_loss(q_values, target_q_values.detach())
return td_loss
def train_step_gen(self, training_batch: rlt.DiscreteDqnInput, batch_idx: int):
# TODO: calls to _maybe_run_optimizer removed, should be replaced with Trainer parameter
self._check_input(training_batch)
rewards = self.boost_rewards(training_batch.reward, training_batch.action)
not_done_mask = training_batch.not_terminal.float()
discount_tensor = self.compute_discount_tensor(training_batch, rewards)
td_loss = self.compute_td_loss(training_batch, rewards, discount_tensor)
yield td_loss
td_loss = td_loss.detach()
# Get Q-values of next states, used in computing cpe
all_next_action_scores = self.q_network(training_batch.next_state).detach()
logged_action_idxs = torch.argmax(training_batch.action, dim=1, keepdim=True)
yield from self._calculate_cpes(
training_batch,
training_batch.state,
training_batch.next_state,
self.all_action_scores,
all_next_action_scores,
logged_action_idxs,
discount_tensor,
not_done_mask,
)
if self.maxq_learning:
possible_actions_mask = training_batch.possible_actions_mask
if self.bcq:
action_on_policy = get_valid_actions_from_imitator(
self.bcq_imitator, training_batch.state, self.bcq_drop_threshold
)
possible_actions_mask *= action_on_policy
# Do we ever use model_action_idxs computed below?
model_action_idxs = self.get_max_q_values(
self.all_action_scores,
possible_actions_mask if self.maxq_learning else training_batch.action,
)[1]
self._log_dqn(
td_loss, logged_action_idxs, training_batch, rewards, model_action_idxs
)
# Use the soft update rule to update target network
yield self.soft_update_result()
def _log_dqn(
self, td_loss, logged_action_idxs, training_batch, rewards, model_action_idxs
):
self.reporter.log(
td_loss=td_loss,
logged_actions=logged_action_idxs,
logged_propensities=training_batch.extras.action_probability,
logged_rewards=rewards,
logged_values=None, # Compute at end of each epoch for CPE
model_values=self.all_action_scores,
model_values_on_logged_actions=None, # Compute at end of each epoch for CPE
model_action_idxs=model_action_idxs,
)
model_values = self._dense_to_action_dict(self.all_action_scores.mean(dim=0))
action_histogram = self._dense_to_action_dict(
training_batch.action.float().mean(dim=0)
)
if training_batch.extras.action_probability is None:
logged_propensities = None
else:
logged_propensities = training_batch.extras.action_probability.mean(dim=0)
model_action_idxs = self._dense_to_action_dict(
torch.nn.functional.one_hot(
model_action_idxs.squeeze(1), num_classes=self.num_actions
)
.float()
.mean(dim=0)
)
self.logger.log_metrics(
{
"td_loss": td_loss,
"logged_actions": action_histogram,
"logged_propensities": logged_propensities,
"logged_rewards": rewards.mean(),
"model_values": model_values,
"model_action_idxs": model_action_idxs,
},
step=self.all_batches_processed,
)
def _dense_to_action_dict(self, dense: torch.Tensor):
assert dense.size() == (
self.num_actions,
), f"Invalid dense size {dense.size()} != {(self.num_actions,)}"
retval = {}
for i, a in enumerate(self._actions):
retval[a] = dense[i]
return retval
def validation_step(self, batch, batch_idx):
if isinstance(batch, dict):
batch = rlt.DiscreteDqnInput.from_dict(batch)
rewards = self.boost_rewards(batch.reward, batch.action)
discount_tensor = self.compute_discount_tensor(batch, rewards)
td_loss = self.compute_td_loss(batch, rewards, discount_tensor)
# Show eval_td_loss in a tensorboard graph
self.log("eval_td_loss", td_loss)
return super().validation_step(batch, batch_idx)
| 12,069 | 37.935484 | 96 | py |
ReAgent | ReAgent-master/reagent/training/td3_trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import copy
import logging
import reagent.core.types as rlt
import torch
import torch.nn.functional as F
from reagent.core.configuration import resolve_defaults
from reagent.core.dataclasses import field
from reagent.core.parameters import CONTINUOUS_TRAINING_ACTION_RANGE, RLParameters
from reagent.optimizer import Optimizer__Union, SoftUpdate
from reagent.training.reagent_lightning_module import ReAgentLightningModule
from reagent.training.rl_trainer_pytorch import RLTrainerMixin
logger = logging.getLogger(__name__)
class TD3Trainer(RLTrainerMixin, ReAgentLightningModule):
"""
Twin Delayed Deep Deterministic Policy Gradient algorithm trainer
as described in https://arxiv.org/pdf/1802.09477
"""
@resolve_defaults
def __init__(
self,
actor_network,
q1_network,
q2_network=None,
# Start TD3TrainerParameters
rl: RLParameters = field(default_factory=RLParameters), # noqa: B008
q_network_optimizer: Optimizer__Union = field( # noqa: B008
default_factory=Optimizer__Union.default
),
actor_network_optimizer: Optimizer__Union = field( # noqa: B008
default_factory=Optimizer__Union.default
),
minibatch_size: int = 64,
noise_variance: float = 0.2,
noise_clip: float = 0.5,
delayed_policy_update: int = 2,
minibatches_per_step: int = 1,
) -> None:
"""
Args:
actor_network: states -> actions, trained to maximize value
q1_network: states, action -> q-value
q2_network (optional): double q-learning to stabilize training
from overestimation bias
rl (optional): an instance of the RLParameter class, which
defines relevant hyperparameters
q_network_optimizer (optional): the optimizer class and
optimizer hyperparameters for the q network(s) optimizer
actor_network_optimizer (optional): see q_network_optimizer
minibatch_size (optional): the size of the minibatch
noise_variance (optional): the variance of action noise added to smooth
q-value estimates
noise_clip (optional): the maximum absolute value of action noise added
to smooth q-value estimates
delayed_policy_update (optional): the ratio of q network updates
to target and policy network updates
minibatches_per_step (optional, TODO: currently unused): the number of minibatch updates
per training step
"""
super().__init__()
self.rl_parameters = rl
self.minibatch_size = minibatch_size
self.minibatches_per_step = minibatches_per_step or 1
self.q1_network = q1_network
self.q1_network_target = copy.deepcopy(self.q1_network)
self.q_network_optimizer = q_network_optimizer
self.q2_network = q2_network
if self.q2_network is not None:
self.q2_network_target = copy.deepcopy(self.q2_network)
self.actor_network = actor_network
self.actor_network_target = copy.deepcopy(self.actor_network)
self.actor_network_optimizer = actor_network_optimizer
self.noise_variance = noise_variance
self.noise_clip_range = (-noise_clip, noise_clip)
self.delayed_policy_update = delayed_policy_update
def configure_optimizers(self):
optimizers = []
optimizers.append(
self.q_network_optimizer.make_optimizer_scheduler(
self.q1_network.parameters()
)
)
if self.q2_network:
optimizers.append(
self.q_network_optimizer.make_optimizer_scheduler(
self.q2_network.parameters()
)
)
optimizers.append(
self.actor_network_optimizer.make_optimizer_scheduler(
self.actor_network.parameters()
)
)
# soft-update
target_params = list(self.q1_network_target.parameters())
source_params = list(self.q1_network.parameters())
if self.q2_network:
target_params += list(self.q2_network_target.parameters())
source_params += list(self.q2_network.parameters())
target_params += list(self.actor_network_target.parameters())
source_params += list(self.actor_network.parameters())
optimizers.append(
SoftUpdate.make_optimizer_scheduler(
target_params, source_params, tau=self.tau
)
)
return optimizers
def train_step_gen(self, training_batch: rlt.PolicyNetworkInput, batch_idx: int):
"""
IMPORTANT: the input action here is assumed to be preprocessed to match the
range of the output of the actor.
"""
assert isinstance(training_batch, rlt.PolicyNetworkInput)
state = training_batch.state
action = training_batch.action
next_state = training_batch.next_state
reward = training_batch.reward
not_terminal = training_batch.not_terminal
# Generate target = r + y * min (Q1(s',pi(s')), Q2(s',pi(s')))
with torch.no_grad():
next_actor = self.actor_network_target(next_state).action
noise = torch.randn_like(next_actor) * self.noise_variance
next_actor = (next_actor + noise.clamp(*self.noise_clip_range)).clamp(
*CONTINUOUS_TRAINING_ACTION_RANGE
)
next_state_actor = (next_state, rlt.FeatureData(next_actor))
next_q_value = self.q1_network_target(*next_state_actor)
if self.q2_network is not None:
next_q_value = torch.min(
next_q_value, self.q2_network_target(*next_state_actor)
)
target_q_value = reward + self.gamma * next_q_value * not_terminal.float()
# Optimize Q1 and Q2
q1_value = self.q1_network(state, action)
q1_loss = F.mse_loss(q1_value, target_q_value)
if batch_idx % self.trainer.log_every_n_steps == 0:
self.reporter.log(
q1_loss=q1_loss,
q1_value=q1_value,
next_q_value=next_q_value,
target_q_value=target_q_value,
)
self.log("td_loss", q1_loss, prog_bar=True)
yield q1_loss
if self.q2_network:
q2_value = self.q2_network(state, action)
q2_loss = F.mse_loss(q2_value, target_q_value)
if batch_idx % self.trainer.log_every_n_steps == 0:
self.reporter.log(
q2_loss=q2_loss,
q2_value=q2_value,
)
yield q2_loss
# Only update actor and target networks after a fixed number of Q updates
if batch_idx % self.delayed_policy_update == 0:
actor_action = self.actor_network(state).action
actor_q1_value = self.q1_network(state, rlt.FeatureData(actor_action))
actor_loss = -(actor_q1_value.mean())
if batch_idx % self.trainer.log_every_n_steps == 0:
self.reporter.log(
actor_loss=actor_loss,
actor_q1_value=actor_q1_value,
)
yield actor_loss
# Use the soft update rule to update the target networks
result = self.soft_update_result()
yield result
else:
# Yielding None prevents the actor and target networks from updating
yield None
yield None
| 7,751 | 38.350254 | 100 | py |
ReAgent | ReAgent-master/reagent/training/discrete_crr_trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# Note: this files is modeled after td3_trainer.py
import logging
from typing import List, Tuple
import reagent.core.types as rlt
import torch
import torch.nn.functional as F
from reagent.core.configuration import resolve_defaults
from reagent.core.dataclasses import field
from reagent.core.parameters import EvaluationParameters, RLParameters
from reagent.optimizer import Optimizer__Union, SoftUpdate
from reagent.training.dqn_trainer_base import DQNTrainerBaseLightning
from torch import distributions as pyd
logger = logging.getLogger(__name__)
class DiscreteCRRTrainer(DQNTrainerBaseLightning):
"""
Critic Regularized Regression (CRR) algorithm trainer
as described in https://arxiv.org/abs/2006.15134
"""
@resolve_defaults
def __init__(
self,
actor_network,
actor_network_target,
q1_network,
q1_network_target,
reward_network,
q2_network=None,
q2_network_target=None,
q_network_cpe=None,
q_network_cpe_target=None,
metrics_to_score=None,
evaluation: EvaluationParameters = field( # noqa: B008
default_factory=EvaluationParameters
),
# Start CRRTrainerParameters. All parameters above should be
# in the blocklist for CRRTrainerParameters in parameters.py
rl: RLParameters = field(default_factory=RLParameters), # noqa: B008
double_q_learning: bool = True,
q_network_optimizer: Optimizer__Union = field( # noqa: B008
default_factory=Optimizer__Union.default
),
actor_network_optimizer: Optimizer__Union = field( # noqa: B008
default_factory=Optimizer__Union.default
),
use_target_actor: bool = False,
actions: List[str] = field(default_factory=list), # noqa: B008
delayed_policy_update: int = 1,
beta: float = 1.0,
entropy_coeff: float = 0.0,
clip_limit: float = 10.0,
max_weight: float = 20.0,
) -> None:
"""
Args:
actor_network: states -> actions, trained to maximize value
actor_network_target: copy of actor network for training stability
q1_network: states -> q-value for all actions
q1_network_target: copy of q-network for training stability
q2_network (optional): double q-learning to stabilize training
from overestimation bias. The presence of q2_network is specified
in discrete_crr.py using the config parameter double_q_learning
q2_network_target (optional): copy of q-network for training stability
rl (optional): an instance of the RLParameter class, which
defines relevant hyperparameters
q_network_optimizer (optional): the optimizer class and
optimizer hyperparameters for the q network(s) optimizer
actor_network_optimizer (optional): see q_network_optimizer
use_target_actor (optional): specifies whether target actor is used
delayed_policy_update (optional): the ratio of q network updates
to target and policy network updates
beta: coefficient for KL-divergence policy constaint regularization of CRR
see eq(5) in https://arxiv.org/pdf/2006.15134.pdf. With large beta, the output
policy of CRR can not leaves too far away from the logged policy
entropy_coeff: coefficient for entropy regularization
clip_limit: threshold for importance sampling when compute entropy
regularization using offline samples
max_weight: the maximum possible action weight in the actor loss
Explaination of entropy regularization:
Entropy regularization punishes deterministic policy and encourages
"unifom" policy. Entropy regularized MDP can be viewed as add the term
(-entropy_coeff * pi_ratio * log_pi_b) to each reward. For detailed
formulation of entropy regularized please see eq.(9) & eq.(10) in
https://arxiv.org/pdf/2007.06558.pdf
"""
super().__init__(
rl,
metrics_to_score=metrics_to_score,
actions=actions,
evaluation_parameters=evaluation,
)
self._actions = actions
assert self._actions is not None, "Discrete-action CRR needs action names"
self.rl_parameters = rl
self.double_q_learning = double_q_learning
self.use_target_actor = use_target_actor
self.q1_network = q1_network
self.q1_network_target = q1_network_target
self.q_network_optimizer = q_network_optimizer
self.q2_network = q2_network
if self.q2_network is not None:
assert (
q2_network_target is not None
), "q2_network provided without a target network"
self.q2_network_target = q2_network_target
self.actor_network = actor_network
self.actor_network_target = actor_network_target
self.actor_network_optimizer = actor_network_optimizer
self.delayed_policy_update = delayed_policy_update
self.register_buffer("reward_boosts", None)
self.reward_boosts = torch.zeros([1, len(self._actions)])
if rl.reward_boost is not None:
# pyre-fixme[16]: Optional type has no attribute `keys`.
for k in rl.reward_boost.keys():
i = self._actions.index(k)
# pyre-fixme[16]: Optional type has no attribute `__getitem__`.
self.reward_boosts[0, i] = rl.reward_boost[k]
self._initialize_cpe(
reward_network,
q_network_cpe,
q_network_cpe_target,
optimizer=q_network_optimizer,
)
self.beta = beta
self.entropy_coeff = entropy_coeff
self.clip_limit = clip_limit
self.max_weight = max_weight
@property
def q_network(self):
return self.q1_network
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def get_detached_model_outputs(self, state) -> Tuple[torch.Tensor, None]:
# This function is only used in evaluation_data_page.py, in create_from_tensors_dqn(),
# in order to compute model propensities. The definition of this function in
# dqn_trainer.py returns two values, and so we also return two values here, for
# consistency.
action_scores = self.actor_network(state).action
return action_scores, None
def configure_optimizers(self):
optimizers = []
target_params = list(self.q1_network_target.parameters())
source_params = list(self.q1_network.parameters())
optimizers.append(
self.q_network_optimizer.make_optimizer_scheduler(
self.q1_network.parameters()
)
)
if self.q2_network:
target_params += list(self.q2_network_target.parameters())
source_params += list(self.q2_network.parameters())
optimizers.append(
self.q_network_optimizer.make_optimizer_scheduler(
self.q2_network.parameters()
)
)
target_params += list(self.actor_network_target.parameters())
source_params += list(self.actor_network.parameters())
optimizers.append(
self.actor_network_optimizer.make_optimizer_scheduler(
self.actor_network.parameters()
)
)
if self.calc_cpe_in_training:
(
cpe_target_params,
cpe_source_params,
cpe_optimizers,
) = self._configure_cpe_optimizers()
target_params += cpe_target_params
source_params += cpe_source_params
optimizers += cpe_optimizers
optimizers.append(
SoftUpdate.make_optimizer_scheduler(
target_params, source_params, tau=self.tau
)
)
return optimizers
def compute_target_q_values(self, next_state, rewards, not_terminal, next_q_values):
if self.use_target_actor:
next_state_actor_output = self.actor_network_target(next_state).action
else:
next_state_actor_output = self.actor_network(next_state).action
next_dist = pyd.Categorical(logits=next_state_actor_output)
next_V = (next_q_values * next_dist.probs).sum(dim=1, keepdim=True)
if self.q2_network is not None:
next_q2_values = self.q2_network_target(next_state)
next_V2 = (next_q2_values * next_dist.probs).sum(dim=1, keepdim=True)
next_V = torch.min(next_V, next_V2)
target_q_values = rewards + self.gamma * next_V * not_terminal.float()
return target_q_values
def compute_td_loss(self, q_network, state, action, target_q_values):
all_q_values = q_network(state)
q_values = (all_q_values * action).sum(dim=1, keepdim=True)
q_loss = F.mse_loss(q_values, target_q_values)
return q_loss
def compute_actor_loss(
self, batch_idx, action, logged_action_probs, all_q_values, all_action_scores
):
# Only update actor network after a fixed number of Q updates
if batch_idx % self.delayed_policy_update != 0:
# Yielding None prevents the actor network from updating
actor_loss = None
return (actor_loss, actor_loss)
# dist is the distribution of actions derived from the actor's outputs (logits)
dist = pyd.Categorical(logits=all_action_scores)
# Note: D = dist.probs is equivalent to:
# e_x = torch.exp(actor_actions)
# D = e_x / e_x.sum(dim=1, keepdim=True)
# That is, dist gives a softmax distribution over actor's outputs
# values is the vector of state values in this batch
values = (all_q_values * dist.probs).sum(dim=1, keepdim=True)
advantages = all_q_values - values
# Note: the above statement subtracts the "values" column vector from
# every column of the all_q_values matrix, giving us the advantages
# of every action in the present state
weight = torch.clamp(
((1 / self.beta) * (advantages * action).sum(dim=1, keepdim=True)).exp(),
0,
self.max_weight,
)
# Remember: training_batch.action is in the one-hot format
logged_action_idxs = torch.argmax(action, dim=1, keepdim=True)
# Note: action space is assumed to be discrete with actions
# belonging to the set {0, 1, ..., action_dim-1}. Therefore,
# advantages.gather(1, logged_action_idxs) will select, for each data point
# (row i of the Advantage matrix "advantages"), the element with index
# action.float_features[i]
# Note: dist.logits already gives log(p), which can be verified by
# comparing dist.probs and dist.logits.
# https://pytorch.org/docs/master/distributions.html#multinomial
# states: logits (Tensor) – event log probabilities
# log_pi_b is the log of the probability assigned by the
# actor (abbreviated as pi) to the actions of the behavioral (b) policy
log_pi_b = dist.log_prob(logged_action_idxs.squeeze(1)).unsqueeze(1)
# entropy regularization
pi_t = (dist.probs * action).sum(dim=1, keepdim=True)
if self.entropy_coeff > 0:
pi_b = logged_action_probs.view(pi_t.shape)
assert torch.min(pi_b) > 0, "Logged action probability <= 0"
pi_ratio = torch.clip(pi_t / pi_b, min=1e-4, max=self.clip_limit)
entropy = (pi_ratio * log_pi_b).mean()
else:
# dummy value
entropy = 0
# Note: the CRR loss for each datapoint (and the magnitude of the corresponding
# parameter update) is proportional to log_pi_b * weight. Therefore, as mentioned
# at the top of Section 3.2, the actor on the one hand has incentive to assign
# larger probabilities to the actions observed in the dataset (so as to reduce
# the magnitude of log_pi_b), but on the other hand it gives preference to doing
# this on datapoints where weight is large (i.e., those points on which the
# Q-value of the observed action is large).
actor_loss_without_reg = (-log_pi_b * weight.detach()).mean()
actor_loss = (-log_pi_b * weight.detach()).mean() + self.entropy_coeff * entropy
return actor_loss_without_reg, actor_loss
def train_step_gen(self, training_batch: rlt.DiscreteDqnInput, batch_idx: int):
"""
IMPORTANT: the input action here is preprocessed according to the
training_batch type, which in this case is DiscreteDqnInput. Hence,
the preprocessor in the DiscreteDqnInputMaker class in the
trainer_preprocessor.py is used, which converts acion taken to a
one-hot representation.
"""
self._check_input(training_batch)
state = training_batch.state
action = training_batch.action
next_state = training_batch.next_state
not_terminal = training_batch.not_terminal
rewards = self.boost_rewards(training_batch.reward, training_batch.action)
# Remember: training_batch.action is in the one-hot format
logged_action_idxs = torch.argmax(action, dim=1, keepdim=True)
discount_tensor = torch.full_like(rewards, self.gamma)
next_q_values = self.q1_network_target(next_state)
target_q_values = self.compute_target_q_values(
next_state, rewards, not_terminal, next_q_values
)
q1_loss = self.compute_td_loss(self.q1_network, state, action, target_q_values)
# Show td_loss on the progress bar and in tensorboard graphs:
self.log("td_loss", q1_loss, prog_bar=True)
yield q1_loss
if self.q2_network:
q2_loss = self.compute_td_loss(
self.q2_network, state, action, target_q_values
)
yield q2_loss
all_q_values = self.q1_network(state) # Q-values of all actions
# Note: action_dim (the length of each row of the actor_action
# matrix obtained below) is assumed to be > 1.
all_action_scores = self.actor_network(state).action
logged_action_probs = training_batch.extras.action_probability
actor_loss_without_reg, actor_loss = self.compute_actor_loss(
batch_idx, action, logged_action_probs, all_q_values, all_action_scores
)
# self.reporter.log(
# actor_loss=actor_loss,
# actor_q1_value=actor_q1_values,
# )
# Show actor_loss on the progress bar and also in Tensorboard graphs
self.log("actor_loss_without_reg", actor_loss_without_reg, prog_bar=True)
self.log("actor_loss", actor_loss, prog_bar=True)
yield actor_loss
yield from self._calculate_cpes(
training_batch,
state,
next_state,
all_action_scores,
next_q_values.detach(),
logged_action_idxs,
discount_tensor,
not_terminal.float(),
)
# TODO: rename underlying function to get_max_possible_values_and_idxs
model_action_idxs = self.get_max_q_values(
all_action_scores,
training_batch.possible_actions_mask if self.maxq_learning else action,
)[1]
self.reporter.log(
logged_actions=logged_action_idxs,
td_loss=q1_loss,
logged_propensities=training_batch.extras.action_probability,
logged_rewards=rewards,
model_values=all_action_scores,
model_action_idxs=model_action_idxs,
)
# Use the soft update rule to update the target networks.
# Note: this yield has to be the last one, since SoftUpdate is the last
# optimizer added in the configure_optimizers() function.
result = self.soft_update_result()
yield result
def validation_step(self, batch, batch_idx):
# As explained in the comments to the validation_step function in
# pytorch_lightning/core/lightning.py, this function operates on a
# single batch of data from the validation set. For example:
# val_outs = []
# for val_batch in val_data:
# out = validation_step(val_batch)
# val_outs.append(out)
# validation_epoch_end(val_outs)
# Note: the relevant validation_epoch_end() function is defined in dqn_trainer_base.py
# RETURN ARGS:
# The super() call at the end of this function calls the function with the same name
# in dqn_trainer_base.py, which returns a EvaluationDataPage for data in that batch.
# In other words, the validation_epoch_end() function will take a list of validation
# EvaluationDataPages.
if isinstance(batch, dict):
batch = rlt.DiscreteDqnInput.from_dict(batch)
# validation data
state = batch.state
action = batch.action
next_state = batch.next_state
not_terminal = batch.not_terminal
rewards = self.boost_rewards(batch.reward, action)
# intermediate values
next_q_values = self.q1_network_target(next_state)
target_q_values = self.compute_target_q_values(
next_state, rewards, not_terminal, next_q_values
)
all_q_values = self.q1_network(state)
all_action_scores = self.actor_network(state).action
logged_action_probs = batch.extras.action_probability
# loss to log
actor_loss_without_reg, actor_loss = self.compute_actor_loss(
batch_idx, action, logged_action_probs, all_q_values, all_action_scores
)
td_loss = self.compute_td_loss(self.q1_network, state, action, target_q_values)
self.log("eval_actor_loss_without_reg", actor_loss_without_reg)
self.log("eval_actor_loss", actor_loss)
self.log("eval_td_loss", td_loss)
return super().validation_step(batch, batch_idx)
| 18,427 | 41.266055 | 94 | py |
ReAgent | ReAgent-master/reagent/training/gradient_free/es_worker.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import torch
import torch.distributed as distributed
import torch.nn
import torch.optim
from reagent.core.parameters import EvolutionParameters
from reagent.training.gradient_free.evolution_pool import EvolutionPool
from torch.distributed import ProcessGroup
logger = logging.getLogger(__name__)
class EsWorker:
def __init__(
self,
individual_pool: EvolutionPool,
es_params: EvolutionParameters,
process_group: ProcessGroup,
num_nodes: int,
) -> None:
logger.info("WORKER STARTED")
self.iteration = 0
self.most_recent_avg_rewards = 0.0
self.individual_pool = individual_pool
self.es_params = es_params
self.process_group = process_group
self.num_nodes = num_nodes
def run_epoch(self) -> float:
logger.info("Computing reward")
rewards = self.individual_pool.compute_all_local_rewards()
logger.info("Pushing reward")
# Sum the rewards across all machines
distributed.all_reduce(rewards, self.process_group)
# Divide the rewards by the number of machines. We do this because
# there is no "average" all_reduce operator.
rewards /= self.num_nodes
self.iteration += 1
self.individual_pool.apply_global_reward(rewards, self.iteration)
most_recent_avg_rewards = float(torch.mean(rewards))
new_parent_reward = self.individual_pool.compute_local_reward(
self.individual_pool.parent_tensors
)
logger.info(
"ITERATION: {0} MEAN REWARD: {1}, NEW PARENT REWARD: {2}".format(
self.iteration, most_recent_avg_rewards, new_parent_reward
)
)
return new_parent_reward
| 1,861 | 30.033333 | 77 | py |
ReAgent | ReAgent-master/reagent/training/gradient_free/ars_util.py | from operator import itemgetter
import numpy as np
import torch
"""
Utility functions for Advanced Random Search (ARS) algorithm
based on the paper "Simple random search provides a competitive approach
to reinforcement learning", Mania et al.
https://arxiv.org/abs/1803.07055
Here, we show an example of training a data reweighting policy using ARS. The policy
is learned to weight each sample for training a supervised learning model. ARS is a
competitive alternative to the policy gradient method in "Data Valuation using
Reinforcement Learning", Yoon, Arik, and Pfister.
https://arxiv.org/abs/1909.11671
def reward_func(pos_param, neg_param):
# Return rewards for positively/negatively perturbed parameters
# model = a supervised learning model
# X = training features
# y = labels
# Initialize a supervised learning model
model_pos = model.init()
# Sample weights are bounded within (0, 1)
pos_weight = torch.sigmoid(torch.matmul(torch.column_stack((X, y)), pos_param))
model_pos.fit(X, y, sample_weight=pos_weight)
r_pos = metric(model_pos.predict(X_e), y_e)
model_neg = model.init()
neg_weight = torch.sigmoid(torch.matmul(torch.column_stack((X, y)), neg_param))
model_neg.fit(X, y, sample_weight=neg_weight)
r_neg = metric(model_neg.predict(X_e), y_e)
return (r_pos, r_neg)
# Training
# feature_dim = feature dimension + 1 (for label)
# n_pert = given number of random perturbations
# alpha = step size
# noise = noise level (between 0 ~ 1) added to the random perturbations
ars_opt = ARSOptimizer(feature_dim, n_pert, alpha=alpha, noise=noise)
for _ in range(n_generations):
perturbed_params = ars_opt.sample_perturbed_params()
rewards = []
for idx in range(0, len(perturbed_params)):
pos_param, neg_param = params[idx]
rewards.extend(reward_func(pos_param, neg_param))
ars_opt.update_ars_params(rewards)
"""
class ARSOptimizer:
"""ARSOptimizer is supposed to maximize an objective function"""
def __init__(
self,
feature_dim,
n_pert=10,
rand_ars_params=False,
alpha=1,
noise=1,
b_top=None,
):
self.feature_dim = feature_dim
self.ars_params = (
np.random.randn(feature_dim) if rand_ars_params else np.zeros(feature_dim)
)
self.alpha = alpha
self.noise = noise
self.n_pert = n_pert
self.b_top = b_top if b_top is not None else n_pert
self.perturbations = []
def update_ars_params(self, rewards: torch.Tensor):
"""
reward should be something like
[reward_pert1_pos, reward_pert1_neg, reward_pert2_pos, reward_pert2_neg, ...]
"""
assert (
len(self.perturbations) > 0
), "must call sample_perturbed_params before this function"
assert rewards.shape == (
2 * self.n_pert,
), "rewards must have length 2 * n_pert"
rank = {}
rewards = rewards.numpy()
for pert_idx in range(self.n_pert):
reward_pos = rewards[2 * pert_idx]
reward_neg = rewards[2 * pert_idx + 1]
rank[pert_idx] = max(reward_pos, reward_neg)
self.perturbations[pert_idx] *= reward_pos - reward_neg
std_r = np.std(rewards)
weight_sum = 0
for pert_idx in list(
dict(sorted(rank.items(), key=itemgetter(1), reverse=True)).keys()
)[: self.b_top]:
weight_sum += self.perturbations[pert_idx]
self.ars_params = self.ars_params + self.alpha * weight_sum / (
self.b_top * (std_r if std_r > 0 else 1)
)
self.perturbations = []
def sample_perturbed_params(self):
"""Return tuples of (pos_param, neg_param)"""
self.perturbations = []
perturbed_params = []
for _ in range(self.n_pert):
pert = np.random.randn(self.feature_dim)
self.perturbations.append(pert)
perturbed_params.append(
(
torch.from_numpy(self.ars_params + self.noise * pert).float(),
torch.from_numpy(self.ars_params - self.noise * pert).float(),
)
)
return perturbed_params
| 4,380 | 34.909836 | 87 | py |
ReAgent | ReAgent-master/reagent/training/gradient_free/evolution_pool.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from typing import Dict, List
import torch
import torch.fb.rendezvous.zeus
import torch.nn
import torch.optim
from reagent.core.parameters import EvolutionParameters
logger = logging.getLogger(__name__)
MAX_RNG_SEED = 100000
class EvolutionPool:
"""
Handles spawning new individuals from a parent, computing an estimated gradient,
and applying that gradient to mutate the parent.
"""
def __init__(
self,
seed: int,
es_params: EvolutionParameters,
tensor_sizes: Dict[str, List[int]],
) -> None:
self.es_params = es_params
self.tensor_sizes = tensor_sizes
self.seed = seed
assert self.seed < MAX_RNG_SEED, "The random seed must be less than " + str(
MAX_RNG_SEED
)
logger.info("Starting pool with RNG seed: " + str(self.seed))
# Fill the population with empty values: will populate later
self.population_tensors: List[Dict[str, torch.Tensor]] = []
for _ in range(es_params.population_size):
individual = {}
for tensor_name, tensor_size in self.tensor_sizes.items():
individual[tensor_name] = torch.zeros(tensor_size, dtype=torch.float)
self.population_tensors.append(individual)
torch.manual_seed(self.seed)
self.parent_tensors: Dict[str, torch.Tensor] = {}
for tensor_name, tensor_size in self.tensor_sizes.items():
self.parent_tensors[tensor_name] = torch.randn(
tensor_size, dtype=torch.float
)
# pyre-fixme[41]: `grad` cannot be reassigned. It is a read-only property.
self.parent_tensors[tensor_name].grad = torch.randn(
tensor_size, dtype=torch.float
)
self.optimizer = torch.optim.Adam(
self.parent_tensors.values(), lr=self.es_params.learning_rate
)
self.populate_children(0)
def populate_children(self, iteration: int):
torch.manual_seed(iteration * MAX_RNG_SEED + self.seed)
for individual in self.population_tensors:
for tensor_name, parent_tensor in self.parent_tensors.items():
individual_tensor = individual[tensor_name]
individual_tensor.normal_(0, self.es_params.mutation_power)
individual_tensor.add_(parent_tensor)
def apply_global_reward(self, rewards: torch.Tensor, next_iteration: int):
std_dev = torch.std(rewards)
if torch.abs(std_dev) > 1e-6:
normalized_rewards = (rewards - torch.mean(rewards)) / std_dev
for parent_tensor in self.parent_tensors.values():
parent_tensor.grad.zero_()
for i, individual in enumerate(self.population_tensors):
for tensor_name, parent_tensor in self.parent_tensors.items():
individual_tensor = individual[tensor_name]
# Subtract the parent to get the gradient estimate
# pyre-fixme[16]: `Tensor` has no attribute `sub_`.
individual_tensor.sub_(parent_tensor)
# Amplify the gradient by the reward
individual_tensor.mul_(normalized_rewards[i])
# Divide by a normalizing constant
individual_tensor.div_(
self.es_params.population_size
* self.es_params.mutation_power
* -1
)
# pyre-fixme[41]: `grad` cannot be reassigned. It is a read-only
# property.
parent_tensor.grad += individual_tensor
self.optimizer.step()
self.populate_children(next_iteration)
def compute_all_local_rewards(self):
return torch.tensor(
[
self.compute_local_reward(individual)
for individual in self.population_tensors
],
dtype=torch.float,
)
def compute_local_reward(self, individual):
"""
Given an individual as a list of tensors, return the reward
of this policy
"""
raise NotImplementedError()
class OneMaxEvolutionPool(EvolutionPool):
"""
A simple example of an evolution pool. The agent gets maximum reward
as the tensor approaches [inf, -inf, inf, -inf, ...]
"""
def compute_local_reward(self, individual):
sigmoid_params = torch.nn.Sigmoid()(individual["data"])
total_reward = torch.sum(sigmoid_params[0::2]) + torch.sum(
1 - sigmoid_params[1::2]
)
return total_reward / sigmoid_params.shape[0]
| 4,802 | 35.112782 | 86 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.