repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
ReAgent | ReAgent-master/reagent/training/world_model/mdnrnn_trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from typing import Optional
import reagent.core.types as rlt
import torch
import torch.nn.functional as F
from reagent.core.parameters import MDNRNNTrainerParameters
from reagent.models.mdn_rnn import gmm_loss
from reagent.models.world_model import MemoryNetwork
from reagent.training.reagent_lightning_module import ReAgentLightningModule
logger = logging.getLogger(__name__)
class MDNRNNTrainer(ReAgentLightningModule):
"""Trainer for MDN-RNN"""
def __init__(
self,
memory_network: MemoryNetwork,
params: MDNRNNTrainerParameters,
cum_loss_hist: int = 100,
):
super().__init__()
self.memory_network = memory_network
self.params = params
def configure_optimizers(self):
optimizers = []
optimizers.append(
torch.optim.Adam(
self.memory_network.mdnrnn.parameters(), lr=self.params.learning_rate
)
)
return optimizers
def train_step_gen(self, training_batch: rlt.MemoryNetworkInput, batch_idx: int):
(seq_len, batch_size, state_dim) = training_batch.state.float_features.shape
losses = self.get_loss(training_batch, state_dim)
detached_losses = {k: loss.cpu().detach().item() for k, loss in losses.items()}
self.reporter.log(
loss=detached_losses["loss"],
gmm=detached_losses["gmm"],
bce=detached_losses["bce"],
mse=detached_losses["mse"],
)
if self.all_batches_processed % 10 == 0:
logger.info(
f'loss={detached_losses["loss"]}, gmm={detached_losses["loss"]}, bce={detached_losses["bce"]}, mse={detached_losses["mse"]}'
)
loss = losses["loss"]
# TODO: Must setup (or mock) trainer and a LoggerConnector to call self.log()!
if self.trainer is not None and self.trainer.logger is not None:
self.log("td_loss", loss, prog_bar=True)
yield loss
def validation_step( # pyre-ignore inconsistent override because lightning doesn't use types
self,
training_batch: rlt.MemoryNetworkInput,
batch_idx: int,
):
(seq_len, batch_size, state_dim) = training_batch.state.float_features.shape
losses = self.get_loss(training_batch, state_dim)
detached_losses = {k: loss.cpu().detach().item() for k, loss in losses.items()}
self.reporter.log(
eval_loss=detached_losses["loss"],
eval_gmm=detached_losses["gmm"],
eval_bce=detached_losses["bce"],
eval_mse=detached_losses["mse"],
)
loss = losses["loss"]
self.log("td_loss", loss, prog_bar=True)
return loss
def test_step( # pyre-ignore inconsistent override because lightning doesn't use types
self,
training_batch: rlt.MemoryNetworkInput,
batch_idx: int,
):
(seq_len, batch_size, state_dim) = training_batch.state.float_features.shape
losses = self.get_loss(training_batch, state_dim)
detached_losses = {k: loss.cpu().detach().item() for k, loss in losses.items()}
self.reporter.log(
test_loss=detached_losses["loss"],
test_gmm=detached_losses["gmm"],
test_bce=detached_losses["bce"],
test_mse=detached_losses["mse"],
)
loss = losses["loss"]
self.log("td_loss", loss, prog_bar=True)
return loss
def get_loss(
self, training_batch: rlt.MemoryNetworkInput, state_dim: Optional[int] = None
):
"""
Compute losses:
GMMLoss(next_state, GMMPredicted) / (STATE_DIM + 2)
+ MSE(reward, predicted_reward)
+ BCE(not_terminal, logit_not_terminal)
The STATE_DIM + 2 factor is here to counteract the fact that the GMMLoss scales
approximately linearly with STATE_DIM, dim of states. All losses
are averaged both on the batch and the sequence dimensions (the two first
dimensions).
:param training_batch:
training_batch has these fields:
- state: (SEQ_LEN, BATCH_SIZE, STATE_DIM) torch tensor
- action: (SEQ_LEN, BATCH_SIZE, ACTION_DIM) torch tensor
- reward: (SEQ_LEN, BATCH_SIZE) torch tensor
- not-terminal: (SEQ_LEN, BATCH_SIZE) torch tensor
- next_state: (SEQ_LEN, BATCH_SIZE, STATE_DIM) torch tensor
:param state_dim: the dimension of states. If provided, use it to normalize
gmm loss
:returns: dictionary of losses, containing the gmm, the mse, the bce and
the averaged loss.
"""
assert isinstance(training_batch, rlt.MemoryNetworkInput)
# mdnrnn's input should have seq_len as the first dimension
mdnrnn_output = self.memory_network(
training_batch.state, rlt.FeatureData(training_batch.action)
)
# mus, sigmas: [seq_len, batch_size, num_gaussian, state_dim]
mus, sigmas, logpi, rs, nts = (
mdnrnn_output.mus,
mdnrnn_output.sigmas,
mdnrnn_output.logpi,
mdnrnn_output.reward,
mdnrnn_output.not_terminal,
)
next_state = training_batch.next_state.float_features
not_terminal = training_batch.not_terminal.float()
reward = training_batch.reward
if self.params.fit_only_one_next_step:
next_state, not_terminal, reward, mus, sigmas, logpi, nts, rs = tuple(
map(
lambda x: x[-1:],
(next_state, not_terminal, reward, mus, sigmas, logpi, nts, rs),
)
)
gmm = (
gmm_loss(next_state, mus, sigmas, logpi)
* self.params.next_state_loss_weight
)
bce = (
F.binary_cross_entropy_with_logits(nts, not_terminal)
* self.params.not_terminal_loss_weight
)
mse = F.mse_loss(rs, reward) * self.params.reward_loss_weight
if state_dim is not None:
loss = gmm / (state_dim + 2) + bce + mse
else:
loss = gmm + bce + mse
return {"gmm": gmm, "bce": bce, "mse": mse, "loss": loss}
| 6,358 | 35.337143 | 140 | py |
ReAgent | ReAgent-master/reagent/training/world_model/seq2reward_trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import reagent.core.types as rlt
import torch
import torch.nn as nn
import torch.nn.functional as F
from reagent.core.parameters import Seq2RewardTrainerParameters
from reagent.models.fully_connected_network import FullyConnectedNetwork
from reagent.models.seq2reward_model import Seq2RewardNetwork
from reagent.training.reagent_lightning_module import ReAgentLightningModule
from reagent.training.utils import gen_permutations
logger = logging.getLogger(__name__)
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def get_step_prediction(
step_predict_network: FullyConnectedNetwork, training_batch: rlt.MemoryNetworkInput
):
first_step_state = training_batch.state.float_features[0]
pred_step = step_predict_network(first_step_state)
step_probability = F.softmax(pred_step, dim=1)
return step_probability
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def get_Q(
seq2reward_network: Seq2RewardNetwork,
cur_state: torch.Tensor,
all_permut: torch.Tensor,
) -> torch.Tensor:
"""
Input:
cur_state: the current state from where we start planning.
shape: batch_size x state_dim
all_permut: all action sequences (sorted in lexical order) for enumeration
shape: seq_len x num_perm x action_dim
"""
batch_size = cur_state.shape[0]
_, num_permut, num_action = all_permut.shape
num_permut_per_action = int(num_permut / num_action)
preprocessed_state = cur_state.unsqueeze(0).repeat_interleave(num_permut, dim=1)
state_feature_vector = rlt.FeatureData(preprocessed_state)
# expand action to match the expanded state sequence
action = rlt.FeatureData(all_permut.repeat(1, batch_size, 1))
acc_reward = seq2reward_network(state_feature_vector, action).acc_reward.reshape(
batch_size, num_action, num_permut_per_action
)
# The permuations are generated with lexical order
# the output has shape [num_perm, num_action,1]
# that means we can aggregate on the max reward
# then reshape it to (BATCH_SIZE, ACT_DIM)
max_acc_reward = (
torch.max(acc_reward, dim=2).values.detach().reshape(batch_size, num_action)
)
return max_acc_reward
class Seq2RewardTrainer(ReAgentLightningModule):
"""Trainer for Seq2Reward"""
def __init__(
self, seq2reward_network: Seq2RewardNetwork, params: Seq2RewardTrainerParameters
):
super().__init__()
self.seq2reward_network = seq2reward_network
self.params = params
# Turning off Q value output during training:
self.view_q_value = params.view_q_value
# permutations used to do planning
self.all_permut = gen_permutations(
params.multi_steps, len(self.params.action_names)
)
self.mse_loss = nn.MSELoss(reduction="mean")
# Predict how many steps are remaining from the current step
self.step_predict_network = FullyConnectedNetwork(
[
self.seq2reward_network.state_dim,
self.params.step_predict_net_size,
self.params.step_predict_net_size,
self.params.multi_steps,
],
["relu", "relu", "linear"],
use_layer_norm=False,
)
self.step_loss = nn.CrossEntropyLoss(reduction="mean")
def configure_optimizers(self):
optimizers = []
optimizers.append(
{
"optimizer": torch.optim.Adam(
self.seq2reward_network.parameters(), lr=self.params.learning_rate
),
}
)
optimizers.append(
{
"optimizer": torch.optim.Adam(
self.step_predict_network.parameters(), lr=self.params.learning_rate
)
},
)
return optimizers
def train_step_gen(self, training_batch: rlt.MemoryNetworkInput, batch_idx: int):
mse_loss = self.get_mse_loss(training_batch)
detached_mse_loss = mse_loss.cpu().detach().item()
yield mse_loss
step_entropy_loss = self.get_step_entropy_loss(training_batch)
detached_step_entropy_loss = step_entropy_loss.cpu().detach().item()
if self.view_q_value:
state_first_step = training_batch.state.float_features[0]
q_values = (
get_Q(
self.seq2reward_network,
state_first_step,
self.all_permut,
)
.cpu()
.mean(0)
.tolist()
)
else:
q_values = [0] * len(self.params.action_names)
step_probability = (
get_step_prediction(self.step_predict_network, training_batch)
.cpu()
.mean(dim=0)
.numpy()
)
logger.info(
f"Seq2Reward trainer output: mse_loss={detached_mse_loss}, "
f"step_entropy_loss={detached_step_entropy_loss}, q_values={q_values}, "
f"step_probability={step_probability}"
)
self.reporter.log(
mse_loss=detached_mse_loss,
step_entropy_loss=detached_step_entropy_loss,
q_values=[q_values],
)
yield step_entropy_loss
# pyre-ignore inconsistent override because lightning doesn't use types
def validation_step(self, batch: rlt.MemoryNetworkInput, batch_idx: int):
detached_mse_loss = self.get_mse_loss(batch).cpu().detach().item()
detached_step_entropy_loss = (
self.get_step_entropy_loss(batch).cpu().detach().item()
)
state_first_step = batch.state.float_features[0]
# shape: batch_size, action_dim
q_values_all_action_all_data = get_Q(
self.seq2reward_network,
state_first_step,
self.all_permut,
).cpu()
q_values = q_values_all_action_all_data.mean(0).tolist()
action_distribution = torch.bincount(
torch.argmax(q_values_all_action_all_data, dim=1),
minlength=len(self.params.action_names),
)
# normalize
action_distribution = (
action_distribution.float() / torch.sum(action_distribution)
).tolist()
self.reporter.log(
eval_mse_loss=detached_mse_loss,
eval_step_entropy_loss=detached_step_entropy_loss,
eval_q_values=[q_values],
eval_action_distribution=[action_distribution],
)
return (
detached_mse_loss,
detached_step_entropy_loss,
q_values,
action_distribution,
)
def get_mse_loss(self, training_batch: rlt.MemoryNetworkInput):
"""
Compute losses:
MSE(predicted_acc_reward, target_acc_reward)
:param training_batch:
training_batch has these fields:
- state: (SEQ_LEN, BATCH_SIZE, STATE_DIM) torch tensor
- action: (SEQ_LEN, BATCH_SIZE, ACTION_DIM) torch tensor
- reward: (SEQ_LEN, BATCH_SIZE) torch tensor
:returns:
mse loss on reward
"""
# pyre-fixme[16]: Optional type has no attribute `flatten`.
valid_step = training_batch.valid_step.flatten()
seq2reward_output = self.seq2reward_network(
training_batch.state,
rlt.FeatureData(training_batch.action),
valid_step,
)
predicted_acc_reward = seq2reward_output.acc_reward
seq_len, batch_size = training_batch.reward.size()
gamma = self.params.gamma
gamma_mask = (
torch.Tensor(
[[gamma ** i for i in range(seq_len)] for _ in range(batch_size)]
)
.transpose(0, 1)
.to(training_batch.reward.device)
)
target_acc_rewards = torch.cumsum(training_batch.reward * gamma_mask, dim=0)
target_acc_reward = target_acc_rewards[
valid_step - 1, torch.arange(batch_size)
].unsqueeze(1)
# make sure the prediction and target tensors have the same size
# the size should both be (BATCH_SIZE, 1) in this case.
assert (
predicted_acc_reward.size() == target_acc_reward.size()
), f"{predicted_acc_reward.size()}!={target_acc_reward.size()}"
return self.mse_loss(predicted_acc_reward, target_acc_reward)
def get_step_entropy_loss(self, training_batch: rlt.MemoryNetworkInput):
"""
Compute cross-entropy losses of step predictions
:param training_batch:
training_batch has these fields:
- state: (SEQ_LEN, BATCH_SIZE, STATE_DIM) torch tensor
- action: (SEQ_LEN, BATCH_SIZE, ACTION_DIM) torch tensor
- reward: (SEQ_LEN, BATCH_SIZE) torch tensor
:returns:
step_entropy_loss on step prediction
"""
# pyre-fixme[16]: Optional type has no attribute `flatten`.
valid_step = training_batch.valid_step.flatten()
first_step_state = training_batch.state.float_features[0]
valid_step_output = self.step_predict_network(first_step_state)
# step loss's target is zero-based indexed, so subtract 1 from valid_step
return self.step_loss(valid_step_output, valid_step - 1)
def warm_start_components(self):
components = ["seq2reward_network"]
return components
| 9,712 | 34.841328 | 88 | py |
ReAgent | ReAgent-master/reagent/training/world_model/compress_model_trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import reagent.core.types as rlt
import torch
import torch.nn.functional as F
from reagent.core.parameters import Seq2RewardTrainerParameters
from reagent.core.types import FeatureData
from reagent.models.fully_connected_network import FloatFeatureFullyConnected
from reagent.models.seq2reward_model import Seq2RewardNetwork
from reagent.training.reagent_lightning_module import ReAgentLightningModule
from reagent.training.utils import gen_permutations
from reagent.training.world_model.seq2reward_trainer import get_Q
logger = logging.getLogger(__name__)
class CompressModelTrainer(ReAgentLightningModule):
"""Trainer for fitting Seq2Reward planning outcomes to a neural network-based policy"""
def __init__(
self,
compress_model_network: FloatFeatureFullyConnected,
seq2reward_network: Seq2RewardNetwork,
params: Seq2RewardTrainerParameters,
):
super().__init__()
self.compress_model_network = compress_model_network
self.seq2reward_network = seq2reward_network
self.params = params
# permutations used to do planning
self.all_permut = gen_permutations(
params.multi_steps, len(self.params.action_names)
)
def configure_optimizers(self):
optimizers = []
optimizers.append(
{
"optimizer": torch.optim.Adam(
self.compress_model_network.parameters(),
lr=self.params.compress_model_learning_rate,
)
}
)
return optimizers
def train_step_gen(self, training_batch: rlt.MemoryNetworkInput, batch_idx: int):
loss, accuracy = self.get_loss(training_batch)
detached_loss = loss.cpu().detach().item()
accuracy = accuracy.item()
logger.info(
f"Seq2Reward Compress trainer MSE/Accuracy: {detached_loss}, {accuracy}"
)
self.reporter.log(mse_loss=detached_loss, accuracy=accuracy)
yield loss
@staticmethod
def extract_state_first_step(batch):
return FeatureData(batch.state.float_features[0])
# pyre-ignore inconsistent override because lightning doesn't use types
def validation_step(self, batch: rlt.MemoryNetworkInput, batch_idx: int):
mse, acc = self.get_loss(batch)
detached_loss = mse.cpu().detach().item()
acc = acc.item()
state_first_step = CompressModelTrainer.extract_state_first_step(batch)
# shape: batch_size, action_dim
q_values_all_action_all_data = (
self.compress_model_network(state_first_step).cpu().detach()
)
q_values = q_values_all_action_all_data.mean(0).tolist()
action_distribution = torch.bincount(
torch.argmax(q_values_all_action_all_data, dim=1),
minlength=len(self.params.action_names),
)
# normalize
action_distribution = (
action_distribution.float() / torch.sum(action_distribution)
).tolist()
self.reporter.log(
eval_mse_loss=detached_loss,
eval_accuracy=acc,
eval_q_values=[q_values],
eval_action_distribution=[action_distribution],
)
return (detached_loss, q_values, action_distribution, acc)
def get_loss(self, batch: rlt.MemoryNetworkInput):
state_first_step = CompressModelTrainer.extract_state_first_step(batch)
# shape: batch_size, num_action
compress_model_output = self.compress_model_network(state_first_step)
target = get_Q(
self.seq2reward_network,
state_first_step.float_features,
self.all_permut,
)
assert (
compress_model_output.size() == target.size()
), f"{compress_model_output.size()}!={target.size()}"
mse = F.mse_loss(compress_model_output, target)
with torch.no_grad():
target_action = torch.max(target, dim=1).indices
model_action = torch.max(compress_model_output, dim=1).indices
accuracy = torch.mean((target_action == model_action).float())
return mse, accuracy
def warm_start_components(self):
logger.info("No warm start components yet...")
components = []
return components
| 4,414 | 34.894309 | 91 | py |
ReAgent | ReAgent-master/reagent/training/cfeval/bandit_reward_network_trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from typing import Optional
import numpy as np
import reagent.core.types as rlt
import torch
from reagent.core.dataclasses import field
from reagent.models.base import ModelBase
from reagent.optimizer.union import Optimizer__Union
from reagent.training.reagent_lightning_module import ReAgentLightningModule
from reagent.training.reward_network_trainer import _get_loss_function, LossFunction
logger = logging.getLogger(__name__)
class BanditRewardNetTrainer(ReAgentLightningModule):
def __init__(
self,
reward_net: ModelBase,
optimizer: Optimizer__Union = field( # noqa: B008
default_factory=Optimizer__Union.default
),
loss_type: LossFunction = LossFunction.MSE,
reward_ignore_threshold: Optional[float] = None,
weighted_by_inverse_propensity: bool = False,
) -> None:
super().__init__()
self.reward_net = reward_net
self.optimizer = optimizer
self.loss_type = loss_type
self.reward_ignore_threshold = reward_ignore_threshold
self.weighted_by_inverse_propensity = weighted_by_inverse_propensity
self.loss_fn = _get_loss_function(
loss_type, reward_ignore_threshold, weighted_by_inverse_propensity
)
def configure_optimizers(self):
optimizers = []
optimizers.append(
self.optimizer.make_optimizer_scheduler(self.reward_net.parameters())
)
return optimizers
def _get_sample_weight(self, batch: rlt.BanditRewardModelInput):
weight = None
if self.weighted_by_inverse_propensity:
assert batch.action_prob is not None
weight = 1.0 / batch.action_prob
return weight
def _get_predicted_reward(self, batch: rlt.BanditRewardModelInput):
model_rewards_all_actions = self.reward_net(batch.state)
logged_action_idxs = torch.argmax(batch.action, dim=1, keepdim=True)
predicted_reward = model_rewards_all_actions.gather(1, logged_action_idxs)
return predicted_reward
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def _compute_unweighted_loss(
self, predicted_reward: torch.Tensor, target_reward: torch.Tensor
):
return self.loss_fn(
predicted_reward, target_reward, weight=torch.ones_like(predicted_reward)
)
def train_step_gen(
self, training_batch: rlt.BanditRewardModelInput, batch_idx: int
):
weight = self._get_sample_weight(training_batch)
target_reward = training_batch.reward
predicted_reward = self._get_predicted_reward(training_batch)
assert (
predicted_reward.shape == target_reward.shape
and len(target_reward.shape) == 2
and target_reward.shape[1] == 1
)
loss = self.loss_fn(predicted_reward, target_reward, weight)
detached_loss = loss.detach().cpu()
self.reporter.log(loss=detached_loss)
if weight is not None:
unweighted_loss = self._compute_unweighted_loss(
predicted_reward, target_reward
)
self.reporter.log(unweighted_loss=unweighted_loss)
if self.all_batches_processed % 10 == 0:
logger.info(
f"{self.all_batches_processed}-th batch: "
f"{self.loss_type}={detached_loss.item()}"
)
yield loss
# pyre-ignore inconsistent override because lightning doesn't use types
def validation_step(self, batch: rlt.BanditRewardModelInput, batch_idx: int):
if self._training_batch_type and isinstance(batch, dict):
batch = self._training_batch_type.from_dict(batch)
reward = batch.reward
self.reporter.log(eval_rewards=reward.flatten().detach().cpu())
pred_reward = self._get_predicted_reward(batch)
self.reporter.log(eval_pred_rewards=pred_reward.flatten().detach().cpu())
weight = self._get_sample_weight(batch)
loss = self.loss_fn(pred_reward, reward, weight)
detached_loss = loss.detach().cpu()
self.reporter.log(eval_loss=detached_loss)
if weight is not None:
unweighted_loss = self._compute_unweighted_loss(pred_reward, reward)
self.reporter.log(eval_unweighted_loss=unweighted_loss)
return detached_loss.item()
def validation_epoch_end(self, outputs):
self.reporter.update_best_model(np.mean(outputs), self.reward_net)
| 4,678 | 36.134921 | 85 | py |
ReAgent | ReAgent-master/reagent/training/ranking/seq2slate_attn_trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import numpy as np
import reagent.core.types as rlt
import torch
import torch.nn as nn
from reagent.core.dataclasses import field
from reagent.model_utils.seq2slate_utils import Seq2SlateMode
from reagent.models.seq2slate import Seq2SlateTransformerNet
from reagent.optimizer.union import Optimizer__Union
from reagent.training.reagent_lightning_module import ReAgentLightningModule
from sklearn.metrics import (
average_precision_score,
dcg_score,
ndcg_score,
roc_auc_score,
)
logger = logging.getLogger(__name__)
class Seq2SlatePairwiseAttnTrainer(ReAgentLightningModule):
"""
Seq2Slate without a decoder learned in a supervised learning fashion (
https://arxiv.org/pdf/1904.06813.pdf )
"""
def __init__(
self,
seq2slate_net: Seq2SlateTransformerNet,
slate_size: int,
calc_cpe: bool,
policy_optimizer: Optimizer__Union = field( # noqa: B008
default_factory=Optimizer__Union.default
),
) -> None:
super().__init__()
self.seq2slate_net = seq2slate_net
self.slate_size = slate_size
self.calc_cpe = calc_cpe
self.policy_optimizer = policy_optimizer
self.log_softmax = nn.LogSoftmax(dim=1)
self.kl_loss = nn.KLDivLoss(reduction="batchmean")
def configure_optimizers(self):
optimizers = []
optimizers.append(
self.policy_optimizer.make_optimizer_scheduler(
self.seq2slate_net.parameters()
)
)
return optimizers
def train_step_gen(
self, training_batch: rlt.PreprocessedRankingInput, batch_idx: int
):
assert type(training_batch) is rlt.PreprocessedRankingInput
# shape: batch_size, tgt_seq_len
encoder_scores = self.seq2slate_net(
training_batch, mode=Seq2SlateMode.ENCODER_SCORE_MODE
).encoder_scores
assert encoder_scores.requires_grad
loss = self.kl_loss(
self.log_softmax(encoder_scores), training_batch.position_reward
)
detached_loss = loss.detach().cpu()
self.reporter.log(train_cross_entropy_loss=detached_loss)
yield loss
# pyre-ignore inconsistent override because lightning doesn't use types
def validation_step(self, batch: rlt.PreprocessedRankingInput, batch_idx: int):
# pyre-fixme[16]: `Optional` has no attribute `shape`.
batch_size = batch.position_reward.shape[0]
# shape: batch_size, tgt_seq_len
encoder_scores = self.seq2slate_net(
batch, mode=Seq2SlateMode.ENCODER_SCORE_MODE
).encoder_scores
assert (
encoder_scores.shape[1] == batch.position_reward.shape[1] == self.slate_size
)
ce_loss = self.kl_loss(
self.log_softmax(encoder_scores), batch.position_reward
).item()
if not self.calc_cpe:
self.reporter.log(eval_cross_entropy_loss=ce_loss)
return
# shape: batch_size, tgt_seq_len
ranking_output = self.seq2slate_net(
batch, mode=Seq2SlateMode.RANK_MODE, greedy=True
)
# pyre-fixme[16]: `int` has no attribute `cpu`.
ranked_idx = (ranking_output.ranked_tgt_out_idx - 2).cpu().numpy()
# pyre-fixme[58]: `-` is not supported for operand types
# `Optional[torch.Tensor]` and `int`.
logged_idx = (batch.tgt_out_idx - 2).cpu().numpy()
score_bar = np.arange(self.slate_size, 0, -1)
batch_dcg = []
batch_ndcg = []
batch_mean_ap = []
batch_auc = []
batch_base_dcg = []
batch_base_ndcg = []
batch_base_map = []
batch_base_auc = []
for i in range(batch_size):
# no positive label in the slate or slate labels are all positive
# pyre-fixme[16]: `Optional` has no attribute `__getitem__`.
if (not torch.any(batch.position_reward[i].bool())) or (
torch.all(batch.position_reward[i].bool())
):
continue
ranked_scores = np.zeros(self.slate_size)
ranked_scores[ranked_idx[i]] = score_bar
truth_scores = np.zeros(self.slate_size)
truth_scores[logged_idx[i]] = batch.position_reward[i].cpu().numpy()
base_scores = np.zeros(self.slate_size)
base_scores[logged_idx[i]] = score_bar
# average_precision_score accepts 1D arrays
# dcg & ndcg accepts 2D arrays
batch_mean_ap.append(average_precision_score(truth_scores, ranked_scores))
batch_base_map.append(average_precision_score(truth_scores, base_scores))
batch_auc.append(roc_auc_score(truth_scores, ranked_scores))
batch_base_auc.append(roc_auc_score(truth_scores, base_scores))
ranked_scores = np.expand_dims(ranked_scores, axis=0)
truth_scores = np.expand_dims(truth_scores, axis=0)
base_scores = np.expand_dims(base_scores, axis=0)
batch_dcg.append(dcg_score(truth_scores, ranked_scores))
batch_ndcg.append(ndcg_score(truth_scores, ranked_scores))
batch_base_dcg.append(dcg_score(truth_scores, base_scores))
batch_base_ndcg.append(ndcg_score(truth_scores, base_scores))
self.reporter.log(
eval_cross_entropy_loss=ce_loss,
eval_dcg=torch.mean(torch.tensor(batch_dcg)).reshape(1),
eval_ndcg=torch.mean(torch.tensor(batch_ndcg)).reshape(1),
eval_mean_ap=torch.mean(torch.tensor(batch_mean_ap)).reshape(1),
eval_auc=torch.mean(torch.tensor(batch_auc)).reshape(1),
eval_base_dcg=torch.mean(torch.tensor(batch_base_dcg)).reshape(1),
eval_base_ndcg=torch.mean(torch.tensor(batch_base_ndcg)).reshape(1),
eval_base_map=torch.mean(torch.tensor(batch_base_map)).reshape(1),
eval_base_auc=torch.mean(torch.tensor(batch_base_auc)).reshape(1),
)
| 6,103 | 38.636364 | 88 | py |
ReAgent | ReAgent-master/reagent/training/ranking/seq2slate_sim_trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from itertools import permutations
from typing import List, Optional
import numpy as np
import reagent.core.types as rlt
import torch
import torch.nn as nn
from reagent.core.dataclasses import field
from reagent.core.parameters import Seq2SlateParameters
from reagent.core.torch_utils import gather
from reagent.models.seq2slate import BaselineNet, Seq2SlateMode, Seq2SlateTransformerNet
from reagent.optimizer.union import Optimizer__Union
from reagent.training.ranking.seq2slate_trainer import Seq2SlateTrainer
logger = logging.getLogger(__name__)
def _load_reward_net(name_and_path, use_gpu):
reward_name_and_net = {}
for name, path in name_and_path.items():
reward_network = torch.jit.load(path)
if use_gpu:
reward_network = reward_network.cuda()
reward_name_and_net[name] = reward_network
return reward_name_and_net
def swap_dist_in_slate(idx_):
# Do not want to modify the original list because swap happens in place.
idx = idx_.copy()
swapcount = 0
for j in range(len(idx)):
for i in range(1, len(idx) - j):
if idx[i - 1] > idx[i]:
swapcount += 1
idx[i - 1], idx[i] = idx[i], idx[i - 1]
return swapcount
def swap_dist_out_slate(idx):
return np.sum(x - i for i, x in enumerate(idx))
def swap_dist(idx: List[int]):
"""
A distance which measures how many swaps the prod
ordering needs to get to idx
Examples:
swap_dist([0, 1, 2, 4]) = 1
swap_dist([0, 1, 5, 2]) = 3
"""
assert type(idx) is list
return swap_dist_in_slate(idx) + swap_dist_out_slate(idx)
class Seq2SlateSimulationTrainer(Seq2SlateTrainer):
"""
Seq2Slate learned with simulation data, with the action
generated randomly and the reward computed by a reward network
"""
def __init__(
self,
seq2slate_net: Seq2SlateTransformerNet,
params: Seq2SlateParameters = field( # noqa: B008
default_factory=Seq2SlateParameters
),
baseline_net: Optional[BaselineNet] = None,
baseline_warmup_num_batches: int = 0,
policy_optimizer: Optimizer__Union = field( # noqa: B008
default_factory=Optimizer__Union.default
),
baseline_optimizer: Optimizer__Union = field( # noqa: B008
default_factory=Optimizer__Union.default
),
policy_gradient_interval: int = 1,
print_interval: int = 100,
calc_cpe: bool = False,
reward_network: Optional[nn.Module] = None,
) -> None:
super().__init__(
seq2slate_net,
params=params,
baseline_net=baseline_net,
baseline_warmup_num_batches=baseline_warmup_num_batches,
policy_optimizer=policy_optimizer,
baseline_optimizer=baseline_optimizer,
policy_gradient_interval=policy_gradient_interval,
print_interval=print_interval,
calc_cpe=calc_cpe,
reward_network=reward_network,
)
self.sim_param = params.simulation
assert self.sim_param is not None
# loaded when used
self.reward_name_and_net = nn.ModuleDict({})
self.MAX_DISTANCE = (
seq2slate_net.max_src_seq_len * (seq2slate_net.max_src_seq_len - 1) / 2
)
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def _simulated_training_input(self, training_input: rlt.PreprocessedRankingInput):
device = training_input.state.float_features.device
# precision error may cause invalid actions
valid_output = False
while not valid_output:
rank_output = self.seq2slate_net(
training_input,
mode=Seq2SlateMode.RANK_MODE,
tgt_seq_len=self.seq2slate_net.max_tgt_seq_len,
greedy=False,
)
model_propensities = rank_output.ranked_per_seq_probs
model_actions_with_offset = rank_output.ranked_tgt_out_idx
model_actions = model_actions_with_offset - 2
if torch.all(model_actions >= 0):
valid_output = True
batch_size = model_actions_with_offset.shape[0]
simulated_slate_features = gather(
training_input.src_seq.float_features,
# pyre-fixme[61]: `model_actions` may not be initialized here.
model_actions,
)
if not self.reward_name_and_net:
use_gpu = True if device == torch.device("cuda") else False
self.reward_name_and_net = nn.ModuleDict(
_load_reward_net(self.sim_param.reward_name_path, use_gpu)
)
sim_slate_reward = torch.zeros(batch_size, 1, device=device)
for name, reward_net in self.reward_name_and_net.items():
weight = self.sim_param.reward_name_weight[name]
power = self.sim_param.reward_name_power[name]
sr = reward_net(
training_input.state.float_features,
training_input.src_seq.float_features,
simulated_slate_features,
training_input.src_src_mask,
model_actions_with_offset,
).detach()
assert sr.ndim == 2, f"Slate reward {name} output should be 2-D tensor"
sim_slate_reward += weight * (sr ** power)
# guard-rail reward prediction range
reward_clamp = self.sim_param.reward_clamp
if reward_clamp is not None:
sim_slate_reward = torch.clamp(
sim_slate_reward, min=reward_clamp.clamp_min, max=reward_clamp.clamp_max
)
# guard-rail sequence similarity
distance_penalty = self.sim_param.distance_penalty
if distance_penalty is not None:
sim_distance = (
torch.tensor(
# pyre-fixme[16]: `int` has no attribute `__iter__`.
[swap_dist(x.tolist()) for x in model_actions],
device=device,
)
.unsqueeze(1)
.float()
)
sim_slate_reward += distance_penalty * (self.MAX_DISTANCE - sim_distance)
assert (
len(sim_slate_reward.shape) == 2 and sim_slate_reward.shape[1] == 1
), f"{sim_slate_reward.shape}"
on_policy_input = rlt.PreprocessedRankingInput.from_input(
state=training_input.state.float_features,
candidates=training_input.src_seq.float_features,
device=device,
# pyre-fixme[6]: Expected `Optional[torch.Tensor]` for 4th param but got
# `int`.
# pyre-fixme[61]: `model_actions` may not be initialized here.
action=model_actions,
slate_reward=sim_slate_reward,
# pyre-fixme[61]: `model_propensities` may not be initialized here.
logged_propensities=model_propensities,
)
return on_policy_input
def training_step(self, batch: rlt.PreprocessedRankingInput, batch_idx: int):
assert type(batch) is rlt.PreprocessedRankingInput
training_batch = self._simulated_training_input(batch)
return super().training_step(training_batch, batch_idx)
| 7,437 | 37.14359 | 88 | py |
ReAgent | ReAgent-master/reagent/training/ranking/seq2slate_trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from typing import List, Optional, Tuple
import reagent.core.types as rlt
import torch
import torch.nn as nn
import torch.nn.functional as F
from reagent.core.dataclasses import field
from reagent.core.parameters import Seq2SlateParameters
from reagent.evaluation.evaluation_data_page import EvaluationDataPage
from reagent.model_utils.seq2slate_utils import Seq2SlateMode
from reagent.models.seq2slate import BaselineNet, Seq2SlateTransformerNet
from reagent.optimizer.union import Optimizer__Union
from reagent.training.ranking.helper import ips_clamp
from reagent.training.reagent_lightning_module import ReAgentLightningModule
logger = logging.getLogger(__name__)
class Seq2SlateTrainer(ReAgentLightningModule):
def __init__(
self,
seq2slate_net: Seq2SlateTransformerNet,
params: Seq2SlateParameters = field( # noqa: B008
default_factory=Seq2SlateParameters
),
baseline_net: Optional[BaselineNet] = None,
baseline_warmup_num_batches: int = 0,
policy_optimizer: Optimizer__Union = field( # noqa: B008
default_factory=Optimizer__Union.default
),
baseline_optimizer: Optimizer__Union = field( # noqa: B008
default_factory=Optimizer__Union.default
),
policy_gradient_interval: int = 1,
print_interval: int = 100,
calc_cpe: bool = False,
reward_network: Optional[nn.Module] = None,
) -> None:
super().__init__()
self.seq2slate_net = seq2slate_net
self.params = params
self.policy_gradient_interval = policy_gradient_interval
self.print_interval = print_interval
self.baseline_net = baseline_net
self.baseline_warmup_num_batches = baseline_warmup_num_batches
self.rl_opt = policy_optimizer
if self.baseline_net:
self.baseline_opt = baseline_optimizer
# use manual optimization to get more flexibility
self.automatic_optimization = False
assert not calc_cpe or reward_network is not None
self.calc_cpe = calc_cpe
self.reward_network = reward_network
def configure_optimizers(self):
optimizers = []
optimizers.append(
self.rl_opt.make_optimizer_scheduler(self.seq2slate_net.parameters())
)
if self.baseline_net:
optimizers.append(
self.baseline_opt.make_optimizer_scheduler(
self.baseline_net.parameters()
)
)
return optimizers
def _compute_impt_smpl(
self, model_propensities, logged_propensities
) -> Tuple[torch.Tensor, torch.Tensor]:
logged_propensities = logged_propensities.reshape(-1, 1)
assert (
model_propensities.shape == logged_propensities.shape
and len(model_propensities.shape) == 2
and model_propensities.shape[1] == 1
), f"{model_propensities.shape} {logged_propensities.shape}"
impt_smpl = model_propensities / logged_propensities
clamped_impt_smpl = ips_clamp(impt_smpl, self.params.ips_clamp)
return impt_smpl, clamped_impt_smpl
# pyre-fixme [14]: overrides method defined in `ReAgentLightningModule` inconsistently
def training_step(self, batch: rlt.PreprocessedRankingInput, batch_idx: int):
assert type(batch) is rlt.PreprocessedRankingInput
batch_size = batch.state.float_features.shape[0]
reward = batch.slate_reward
assert reward is not None
optimizers = self.optimizers()
if self.baseline_net:
assert len(optimizers) == 2
baseline_opt = optimizers[1]
else:
assert len(optimizers) == 1
rl_opt = optimizers[0]
if self.baseline_net:
# Train baseline
b = self.baseline_net(batch)
baseline_loss = 1.0 / batch_size * torch.sum((b - reward) ** 2)
baseline_opt.zero_grad()
self.manual_backward(baseline_loss)
baseline_opt.step()
else:
b = torch.zeros_like(reward)
baseline_loss = torch.zeros(1)
# Train Seq2Slate using REINFORCE
# log probs of tgt seqs
model_propensities = torch.exp(
self.seq2slate_net(
batch, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE
).log_probs
)
b = b.detach()
assert (
b.shape == reward.shape == model_propensities.shape
), f"{b.shape} {reward.shape} {model_propensities.shape}"
impt_smpl, clamped_impt_smpl = self._compute_impt_smpl(
model_propensities, batch.tgt_out_probs
)
assert (
impt_smpl.shape == clamped_impt_smpl.shape == reward.shape
), f"{impt_smpl.shape} {clamped_impt_smpl.shape} {reward.shape}"
# gradient is only w.r.t model_propensities
assert (
not reward.requires_grad
# pyre-fixme[16]: `Optional` has no attribute `requires_grad`.
and not batch.tgt_out_probs.requires_grad
and impt_smpl.requires_grad
and clamped_impt_smpl.requires_grad
and not b.requires_grad
)
# add negative sign because we take gradient descent but we want to
# maximize rewards
batch_obj_loss = -clamped_impt_smpl * (reward - b)
obj_loss = torch.mean(batch_obj_loss)
# condition to perform policy gradient update:
# 1. no baseline
# 2. or baseline is present and it passes the warm up stage
# 3. the last policy gradient was performed policy_gradient_interval minibatches ago
if (
self.baseline_net is None
or (self.all_batches_processed + 1) >= self.baseline_warmup_num_batches
):
self.manual_backward(obj_loss)
if (self.all_batches_processed + 1) % self.policy_gradient_interval == 0:
rl_opt.step()
rl_opt.zero_grad()
else:
logger.info("Not update RL model because now is baseline warmup phase")
ips_loss = torch.mean(-impt_smpl * reward).cpu().detach().numpy()
clamped_ips_loss = (
torch.mean(-clamped_impt_smpl * reward).cpu().detach().numpy()
)
baseline_loss = baseline_loss.detach().cpu().numpy().item()
advantage = (reward - b).detach().cpu().numpy()
logged_slate_rank_probs = model_propensities.detach().cpu().numpy()
if (self.all_batches_processed + 1) % self.print_interval == 0:
logger.info(
"{} batch: ips_loss={}, clamped_ips_loss={}, baseline_loss={}, max_ips={}, mean_ips={}, grad_update={}".format(
self.all_batches_processed + 1,
ips_loss,
clamped_ips_loss,
baseline_loss,
torch.max(impt_smpl),
torch.mean(impt_smpl),
(self.all_batches_processed + 1) % self.policy_gradient_interval
== 0,
)
)
self.reporter.log(
train_ips_score=torch.tensor(ips_loss).reshape(1),
train_clamped_ips_score=torch.tensor(clamped_ips_loss).reshape(1),
train_baseline_loss=torch.tensor(baseline_loss).reshape(1),
train_logged_slate_rank_probs=torch.FloatTensor(logged_slate_rank_probs),
train_ips_ratio=impt_smpl,
train_clamped_ips_ratio=clamped_impt_smpl,
train_advantages=advantage,
)
# pyre-ignore inconsistent override because lightning doesn't use types
def validation_step(self, batch: rlt.PreprocessedRankingInput, batch_idx: int):
seq2slate_net = self.seq2slate_net
assert seq2slate_net.training is False
logged_slate_rank_prob = torch.exp(
seq2slate_net(batch, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE)
.log_probs.detach()
.flatten()
.cpu()
)
eval_baseline_loss = torch.tensor([0.0]).reshape(1)
if self.baseline_net:
baseline_net = self.baseline_net
b = baseline_net(batch).detach()
eval_baseline_loss = F.mse_loss(b, batch.slate_reward).cpu().reshape(1)
else:
b = torch.zeros_like(batch.slate_reward)
eval_advantage = (
# pyre-fixme[58]: `-` is not supported for operand types
# `Optional[torch.Tensor]` and `Any`.
(batch.slate_reward - b)
.flatten()
.cpu()
)
ranked_slate_output = seq2slate_net(batch, Seq2SlateMode.RANK_MODE, greedy=True)
ranked_slate_rank_prob = ranked_slate_output.ranked_per_seq_probs.cpu()
self.reporter.log(
eval_baseline_loss=eval_baseline_loss,
eval_advantages=eval_advantage,
logged_slate_rank_probs=logged_slate_rank_prob,
ranked_slate_rank_probs=ranked_slate_rank_prob,
)
if not self.calc_cpe:
return
edp_g = EvaluationDataPage.create_from_tensors_seq2slate(
seq2slate_net,
self.reward_network,
batch,
eval_greedy=True,
)
edp_ng = EvaluationDataPage.create_from_tensors_seq2slate(
seq2slate_net,
self.reward_network,
batch,
eval_greedy=False,
)
return edp_g, edp_ng
# pyre-fixme[14]: Inconsistent override
def validation_epoch_end(
self, outputs: Optional[List[Tuple[EvaluationDataPage, EvaluationDataPage]]]
):
if self.calc_cpe:
assert outputs is not None
eval_data_pages_g, eval_data_pages_ng = None, None
for edp_g, edp_ng in outputs:
if eval_data_pages_g is None and eval_data_pages_ng is None:
eval_data_pages_g = edp_g
eval_data_pages_ng = edp_ng
else:
# pyre-fixme[16]: `Optional` has no attribute `append`
eval_data_pages_g.append(edp_g)
eval_data_pages_ng.append(edp_ng)
self.reporter.log(
eval_data_pages_g=eval_data_pages_g,
eval_data_pages_ng=eval_data_pages_ng,
)
| 10,466 | 37.340659 | 127 | py |
ReAgent | ReAgent-master/reagent/training/ranking/helper.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Optional
import torch
from reagent.core.parameters_seq2slate import IPSClamp, IPSClampMethod
def ips_clamp(impt_smpl, ips_clamp: Optional[IPSClamp]):
if not ips_clamp:
return impt_smpl.clone()
if ips_clamp.clamp_method == IPSClampMethod.UNIVERSAL:
return torch.clamp(impt_smpl, 0, ips_clamp.clamp_max)
elif ips_clamp.clamp_method == IPSClampMethod.AGGRESSIVE:
return torch.where(
impt_smpl > ips_clamp.clamp_max, torch.zeros_like(impt_smpl), impt_smpl
)
| 627 | 33.888889 | 83 | py |
ReAgent | ReAgent-master/reagent/training/ranking/seq2slate_tf_trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from typing import List, Optional, Tuple
import reagent.core.types as rlt
import torch
import torch.nn as nn
import torch.nn.functional as F
from reagent.core.dataclasses import field
from reagent.core.parameters import Seq2SlateParameters
from reagent.evaluation.evaluation_data_page import EvaluationDataPage
from reagent.model_utils.seq2slate_utils import Seq2SlateMode
from reagent.models.seq2slate import Seq2SlateTransformerNet
from reagent.optimizer.union import Optimizer__Union
from reagent.training.reagent_lightning_module import ReAgentLightningModule
logger = logging.getLogger(__name__)
class Seq2SlateTeacherForcingTrainer(ReAgentLightningModule):
"""
Seq2Slate learned in a teach-forcing fashion (only used if the
the ground-truth sequences are available)
"""
def __init__(
self,
seq2slate_net: Seq2SlateTransformerNet,
params: Seq2SlateParameters,
policy_optimizer: Optimizer__Union = field( # noqa: B008
default_factory=Optimizer__Union.default
),
policy_gradient_interval: int = 1,
print_interval: int = 100,
calc_cpe: bool = False,
reward_network: Optional[nn.Module] = None,
) -> None:
super().__init__()
self.params = params
self.policy_gradient_interval = policy_gradient_interval
self.print_interval = print_interval
self.seq2slate_net = seq2slate_net
self.policy_optimizer = policy_optimizer
self.kl_div_loss = nn.KLDivLoss(reduction="batchmean")
# use manual optimization to get more flexibility
self.automatic_optimization = False
assert not calc_cpe or reward_network is not None
self.calc_cpe = calc_cpe
self.reward_network = reward_network
def configure_optimizers(self):
optimizers = []
optimizers.append(
self.policy_optimizer.make_optimizer_scheduler(
self.seq2slate_net.parameters()
)
)
return optimizers
# pyre-fixme [14]: overrides method defined in `ReAgentLightningModule` inconsistently
def training_step(self, batch: rlt.PreprocessedRankingInput, batch_idx: int):
assert type(batch) is rlt.PreprocessedRankingInput
log_probs = self.seq2slate_net(
batch, mode=Seq2SlateMode.PER_SYMBOL_LOG_PROB_DIST_MODE
).log_probs
assert log_probs.requires_grad
assert batch.optim_tgt_out_idx is not None
labels = self._transform_label(batch.optim_tgt_out_idx)
assert not labels.requires_grad
loss = self.kl_div_loss(log_probs, labels)
self.manual_backward(loss)
if (self.all_batches_processed + 1) % self.policy_gradient_interval == 0:
opt = self.optimizers()[0]
opt.step()
opt.zero_grad()
loss = loss.detach().cpu().numpy()
log_probs = log_probs.detach()
if (self.all_batches_processed + 1) % self.print_interval == 0:
logger.info(f"{self.all_batches_processed + 1} batch: loss={loss}")
def _transform_label(self, optim_tgt_out_idx: torch.Tensor):
label_size = self.seq2slate_net.max_src_seq_len + 2
label = F.one_hot(optim_tgt_out_idx, label_size)
return label.float()
# pyre-ignore inconsistent override because lightning doesn't use types
def validation_step(self, batch: rlt.PreprocessedRankingInput, batch_idx: int):
seq2slate_net = self.seq2slate_net
assert seq2slate_net.training is False
logged_slate_rank_prob = torch.exp(
seq2slate_net(batch, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE)
.log_probs.detach()
.flatten()
.cpu()
)
ranked_slate_output = seq2slate_net(batch, Seq2SlateMode.RANK_MODE, greedy=True)
ranked_slate_rank_prob = ranked_slate_output.ranked_per_seq_probs.cpu()
self.reporter.log(
logged_slate_rank_probs=logged_slate_rank_prob,
ranked_slate_rank_probs=ranked_slate_rank_prob,
)
if not self.calc_cpe:
return
edp_g = EvaluationDataPage.create_from_tensors_seq2slate(
seq2slate_net,
self.reward_network,
batch,
eval_greedy=True,
)
edp_ng = EvaluationDataPage.create_from_tensors_seq2slate(
seq2slate_net,
self.reward_network,
batch,
eval_greedy=False,
)
return edp_g, edp_ng
# pyre-fixme[14]: Inconsistent override
def validation_epoch_end(
self, outputs: Optional[List[Tuple[EvaluationDataPage, EvaluationDataPage]]]
):
if self.calc_cpe:
assert outputs is not None
eval_data_pages_g, eval_data_pages_ng = None, None
for edp_g, edp_ng in outputs:
if eval_data_pages_g is None and eval_data_pages_ng is None:
eval_data_pages_g = edp_g
eval_data_pages_ng = edp_ng
else:
# pyre-fixme[16]: `Optional` has no attribute `append`
eval_data_pages_g.append(edp_g)
eval_data_pages_ng.append(edp_ng)
self.reporter.log(
eval_data_pages_g=eval_data_pages_g,
eval_data_pages_ng=eval_data_pages_ng,
)
| 5,504 | 34.980392 | 90 | py |
ReAgent | ReAgent-master/reagent/models/dueling_q_network.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from reagent.core import types as rlt
from reagent.core.tensorboardX import SummaryWriterContext
from reagent.models.base import ModelBase
from reagent.models.critic import FullyConnectedCritic
from reagent.models.dqn import FullyConnectedDQN
logger = logging.getLogger(__name__)
INVALID_ACTION_CONSTANT = -1e10
class DuelingQNetwork(ModelBase):
def __init__(
self,
*,
shared_network: ModelBase,
advantage_network: ModelBase,
value_network: ModelBase,
) -> None:
"""
Dueling Q-Network Architecture: https://arxiv.org/abs/1511.06581
"""
super().__init__()
self.shared_network = shared_network
input_prototype = shared_network.input_prototype()
assert isinstance(
input_prototype, rlt.FeatureData
), "shared_network should expect FeatureData as input"
self.advantage_network = advantage_network
self.value_network = value_network
_check_connection(self)
self._name = "unnamed"
@classmethod
def make_fully_connected(
cls,
state_dim: int,
action_dim: int,
layers: List[int],
activations: List[str],
num_atoms: Optional[int] = None,
use_batch_norm: bool = False,
):
assert len(layers) > 0, "Must have at least one layer"
state_embedding_dim = layers[-1]
assert state_embedding_dim % 2 == 0, "The last size must be divisible by 2"
shared_network = FullyConnectedDQN(
state_dim,
state_embedding_dim,
sizes=layers[:-1],
activations=activations[:-1],
normalized_output=True,
use_batch_norm=use_batch_norm,
)
advantage_network = FullyConnectedDQN(
state_embedding_dim,
action_dim,
sizes=[state_embedding_dim // 2],
activations=activations[-1:],
num_atoms=num_atoms,
)
value_network = FullyConnectedDQN(
state_embedding_dim,
1,
sizes=[state_embedding_dim // 2],
activations=activations[-1:],
num_atoms=num_atoms,
)
return cls(
shared_network=shared_network,
advantage_network=advantage_network,
value_network=value_network,
)
def input_prototype(self):
return self.shared_network.input_prototype()
def _get_values(
self, state: rlt.FeatureData
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
shared_state = rlt.FeatureData(self.shared_network(state))
value = self.value_network(shared_state)
raw_advantage = self.advantage_network(shared_state)
reduce_over = tuple(range(1, raw_advantage.dim()))
advantage = raw_advantage - raw_advantage.mean(dim=reduce_over, keepdim=True)
q_value = value + advantage
return value, raw_advantage, advantage, q_value
def forward(
self,
state: rlt.FeatureData,
possible_actions_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
value, raw_advantage, advantage, q_value = self._get_values(state)
# TODO: export these as observable values
if SummaryWriterContext._global_step % 1000 == 0:
_log_histogram_and_mean(self._name, "value", value)
_log_histogram_and_mean(self._name, "q_value", q_value)
_log_histogram_and_mean(self._name, "raw_advantage", raw_advantage)
advantage = advantage.detach()
for i in range(advantage.shape[1]):
a = advantage[:, i]
_log_histogram_and_mean(f"{self._name}/{i}", "advantage", a)
if possible_actions_mask is not None:
# subtract huge value from impossible actions to force their probabilities to 0
q_value = (
q_value + (1 - possible_actions_mask.float()) * INVALID_ACTION_CONSTANT
)
return q_value
class ParametricDuelingQNetwork(ModelBase):
def __init__(
self,
*,
shared_network: ModelBase,
advantage_network: ModelBase,
value_network: ModelBase,
) -> None:
"""
Dueling Q-Network Architecture: https://arxiv.org/abs/1511.06581
"""
super().__init__()
advantage_network_input = advantage_network.input_prototype()
assert (
isinstance(advantage_network_input, tuple)
and len(advantage_network_input) == 2
)
assert advantage_network_input[0].has_float_features_only
self.shared_network = shared_network
self.advantage_network = advantage_network
self.value_network = value_network
_check_connection(self)
self._name = "unnamed"
@classmethod
def make_fully_connected(
cls,
state_dim: int,
action_dim: int,
layers: List[int],
activations: List[str],
use_batch_norm: bool = False,
):
state_embedding_dim = layers[-1]
shared_network = FullyConnectedDQN(
state_dim,
state_embedding_dim,
sizes=layers[:-1],
activations=activations[:-1],
normalized_output=True,
)
advantage_network = FullyConnectedCritic(
state_embedding_dim,
action_dim,
sizes=[state_embedding_dim // 2],
activations=activations[-1:],
)
value_network = FullyConnectedDQN(
state_embedding_dim,
1,
sizes=[state_embedding_dim // 2],
activations=activations[-1:],
)
return ParametricDuelingQNetwork(
shared_network=shared_network,
advantage_network=advantage_network,
value_network=value_network,
)
def input_prototype(self):
shared_network_input = self.shared_network.input_prototype()
assert isinstance(shared_network_input, rlt.FeatureData)
_state, action = self.advantage_network.input_prototype()
return (shared_network_input, action)
def _get_values(
self, state_action: Tuple[rlt.FeatureData, rlt.FeatureData]
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
state, action = state_action
shared_state = rlt.FeatureData(self.shared_network(state))
value = self.value_network(shared_state)
advantage = self.advantage_network(shared_state, action)
q_value = value + advantage
return advantage, value, q_value
def forward(self, state: rlt.FeatureData, action: rlt.FeatureData) -> torch.Tensor:
advantage, value, q_value = self._get_values((state, action))
# TODO: export these as observable values
if SummaryWriterContext._global_step % 1000 == 0:
_log_histogram_and_mean(self._name, "value", value)
_log_histogram_and_mean(self._name, "q_value", q_value)
_log_histogram_and_mean(self._name, "advantage", advantage)
return q_value
def _log_histogram_and_mean(name, key, x):
SummaryWriterContext.add_histogram(
f"dueling_network/{name}/{key}", x.detach().cpu()
)
SummaryWriterContext.add_scalar(
f"dueling_network/{name}/mean_{key}", x.detach().mean().cpu()
)
def _check_connection(model):
try:
with torch.no_grad():
model.eval()
_ = model._get_values(model.input_prototype())
except Exception:
logger.error(
"The networks aren't connecting to each other; check your networks"
)
raise
finally:
model.train()
| 7,899 | 32.617021 | 91 | py |
ReAgent | ReAgent-master/reagent/models/base.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from copy import deepcopy
from typing import Any, Optional
import torch.nn as nn
from reagent.core import types as rlt
# add ABCMeta once https://github.com/sphinx-doc/sphinx/issues/5995 is fixed
class ModelBase(nn.Module):
"""
A base class to support exporting through ONNX
"""
def input_prototype(self) -> Any:
"""
This function provides the input for ONNX graph tracing.
The return value should be what expected by `forward()`.
"""
raise NotImplementedError
def feature_config(self) -> Optional[rlt.ModelFeatureConfig]:
"""
If the model needs additional preprocessing, e.g., using sequence features,
returns the config here.
"""
return None
def get_target_network(self):
"""
Return a copy of this network to be used as target network
Subclass should override this if the target network should share parameters
with the network to be trained.
"""
return deepcopy(self)
def get_distributed_data_parallel_model(self):
"""
Return DistributedDataParallel version of this model
This needs to be implemented explicitly because:
1) Model with EmbeddingBag module is not compatible with vanilla DistributedDataParallel
2) Exporting logic needs structured data. DistributedDataParallel doesn't work with structured data.
"""
raise NotImplementedError
def cpu_model(self):
"""
Override this in DistributedDataParallel models
"""
# This is not ideal but makes exporting simple
return deepcopy(self).cpu()
| 1,759 | 29.877193 | 108 | py |
ReAgent | ReAgent-master/reagent/models/actor.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import math
from typing import List, Optional
import torch
from reagent.core import types as rlt
from reagent.core.parameters import CONTINUOUS_TRAINING_ACTION_RANGE
from reagent.core.tensorboardX import SummaryWriterContext
from reagent.models.base import ModelBase
from reagent.models.fully_connected_network import FullyConnectedNetwork
from torch.distributions import Dirichlet
from torch.distributions.normal import Normal
LOG_PROB_MIN = -2.0
LOG_PROB_MAX = 2.0
class StochasticActor(ModelBase):
def __init__(self, scorer, sampler):
super().__init__()
self.scorer = scorer
self.sampler = sampler
def input_prototype(self):
return self.scorer.input_prototype()
def get_distributed_data_parallel_model(self):
raise NotImplementedError()
def forward(self, state):
action_scores = self.scorer(state)
return self.sampler.sample_action(action_scores, possible_actions_mask=None)
class FullyConnectedActor(ModelBase):
def __init__(
self,
state_dim: int,
action_dim: int,
sizes: List[int],
activations: List[str],
use_batch_norm: bool = False,
action_activation: str = "tanh",
exploration_variance: Optional[float] = None,
):
super().__init__()
assert state_dim > 0, "state_dim must be > 0, got {}".format(state_dim)
assert action_dim > 0, "action_dim must be > 0, got {}".format(action_dim)
self.state_dim = state_dim
self.action_dim = action_dim
assert len(sizes) == len(
activations
), "The numbers of sizes and activations must match; got {} vs {}".format(
len(sizes), len(activations)
)
self.action_activation = action_activation
self.fc = FullyConnectedNetwork(
[state_dim] + sizes + [action_dim],
activations + [self.action_activation],
use_batch_norm=use_batch_norm,
)
# Gaussian noise for exploration.
self.exploration_variance = exploration_variance
if exploration_variance is not None:
assert exploration_variance > 0
loc = torch.zeros(action_dim).float()
scale = torch.ones(action_dim).float() * exploration_variance
self.noise_dist = Normal(loc=loc, scale=scale)
def input_prototype(self):
return rlt.FeatureData(torch.randn(1, self.state_dim))
def forward(self, state: rlt.FeatureData) -> rlt.ActorOutput:
action = self.fc(state.float_features)
batch_size = action.shape[0]
assert action.shape == (
batch_size,
self.action_dim,
), f"{action.shape} != ({batch_size}, {self.action_dim})"
if self.exploration_variance is None:
log_prob = torch.zeros(batch_size).to(action.device).float().view(-1, 1)
return rlt.ActorOutput(action=action, log_prob=log_prob)
noise = self.noise_dist.sample((batch_size,))
# TODO: log prob is affected by clamping, how to handle that?
log_prob = (
self.noise_dist.log_prob(noise).to(action.device).sum(dim=1).view(-1, 1)
).clamp(LOG_PROB_MIN, LOG_PROB_MAX)
action = (action + noise.to(action.device)).clamp(
*CONTINUOUS_TRAINING_ACTION_RANGE
)
return rlt.ActorOutput(action=action, log_prob=log_prob)
class GaussianFullyConnectedActor(ModelBase):
def __init__(
self,
state_dim: int,
action_dim: int,
sizes: List[int],
activations: List[str],
scale: float = 0.05,
use_batch_norm: bool = False,
use_layer_norm: bool = False,
use_l2_normalization: bool = False,
):
"""
Args:
use_l2_normalization: if True, divides action by l2 norm.
"""
super().__init__()
assert state_dim > 0, "state_dim must be > 0, got {}".format(state_dim)
assert action_dim > 0, "action_dim must be > 0, got {}".format(action_dim)
self.state_dim = state_dim
self.action_dim = action_dim
assert len(sizes) == len(
activations
), "The numbers of sizes and activations must match; got {} vs {}".format(
len(sizes), len(activations)
)
# The last layer is mean & scale for reparameterization trick
self.fc = FullyConnectedNetwork(
[state_dim] + sizes + [action_dim * 2],
activations + ["linear"],
use_batch_norm=use_batch_norm,
use_layer_norm=use_layer_norm,
)
self.use_layer_norm = use_layer_norm
if self.use_layer_norm:
self.loc_layer_norm = torch.nn.LayerNorm(action_dim)
self.scale_layer_norm = torch.nn.LayerNorm(action_dim)
self.use_l2_normalization = use_l2_normalization
# used to calculate log-prob
self.const = math.log(math.sqrt(2 * math.pi))
self.eps = 1e-6
def input_prototype(self):
return rlt.FeatureData(torch.randn(1, self.state_dim))
def _normal_log_prob(self, r, scale_log):
"""
Compute log probability from normal distribution the same way as
torch.distributions.normal.Normal, which is:
```
-((value - loc) ** 2) / (2 * var) - log_scale - math.log(math.sqrt(2 * math.pi))
```
In the context of this class, `value = loc + r * scale`. Therefore, this
function only takes `r` & `scale`; it can be reduced to below.
The primary reason we don't use Normal class is that it currently
cannot be exported through ONNX.
"""
return -(r ** 2) / 2 - scale_log - self.const
def _squash_correction(self, squashed_action):
"""
Same as
https://github.com/haarnoja/sac/blob/108a4229be6f040360fcca983113df9c4ac23a6a/sac/policies/gaussian_policy.py#L133
"""
return (1 - squashed_action ** 2 + self.eps).log()
def _get_loc_and_scale_log(self, state: rlt.FeatureData):
loc_scale = self.fc(state.float_features)
loc = loc_scale[::, : self.action_dim]
scale_log = loc_scale[::, self.action_dim :]
if self.use_layer_norm:
loc = self.loc_layer_norm(loc)
scale_log = self.scale_layer_norm(scale_log)
scale_log = scale_log.clamp(LOG_PROB_MIN, LOG_PROB_MAX)
return loc, scale_log
def _squash_raw_action(self, raw_action: torch.Tensor) -> torch.Tensor:
# NOTE: without clamping to (-(1-eps), 1-eps), torch.tanh would output
# 1, and torch.atanh would map it to +inf, causing log_prob to be -inf.
squashed_action = torch.clamp(
torch.tanh(raw_action), -1.0 + self.eps, 1.0 - self.eps
)
if self.use_l2_normalization:
l2_norm = (squashed_action ** 2).sum(dim=1, keepdim=True).sqrt()
squashed_action = squashed_action / l2_norm
return squashed_action
def forward(self, state: rlt.FeatureData):
loc, scale_log = self._get_loc_and_scale_log(state)
r = torch.randn_like(scale_log, device=scale_log.device)
raw_action = loc + r * scale_log.exp()
squashed_action = self._squash_raw_action(raw_action)
squashed_loc = self._squash_raw_action(loc)
if SummaryWriterContext._global_step % 1000 == 0:
SummaryWriterContext.add_histogram("actor/forward/loc", loc.detach().cpu())
SummaryWriterContext.add_histogram(
"actor/forward/scale_log", scale_log.detach().cpu()
)
return rlt.ActorOutput(
action=squashed_action,
log_prob=self.get_log_prob(state, squashed_action),
squashed_mean=squashed_loc,
)
def get_log_prob(self, state: rlt.FeatureData, squashed_action: torch.Tensor):
"""
Action is expected to be squashed with tanh
"""
if self.use_l2_normalization:
# TODO: calculate log_prob for l2 normalization
# https://math.stackexchange.com/questions/3120506/on-the-distribution-of-a-normalized-gaussian-vector
# http://proceedings.mlr.press/v100/mazoure20a/mazoure20a.pdf
pass
loc, scale_log = self._get_loc_and_scale_log(state)
raw_action = torch.atanh(squashed_action)
r = (raw_action - loc) / scale_log.exp()
log_prob = self._normal_log_prob(r, scale_log)
squash_correction = self._squash_correction(squashed_action)
if SummaryWriterContext._global_step % 1000 == 0:
SummaryWriterContext.add_histogram(
"actor/get_log_prob/loc", loc.detach().cpu()
)
SummaryWriterContext.add_histogram(
"actor/get_log_prob/scale_log", scale_log.detach().cpu()
)
SummaryWriterContext.add_histogram(
"actor/get_log_prob/log_prob", log_prob.detach().cpu()
)
SummaryWriterContext.add_histogram(
"actor/get_log_prob/squash_correction", squash_correction.detach().cpu()
)
return torch.sum(log_prob - squash_correction, dim=1).reshape(-1, 1)
class DirichletFullyConnectedActor(ModelBase):
# Used to prevent concentration from being 0
EPSILON = 1e-6
def __init__(self, state_dim, action_dim, sizes, activations, use_batch_norm=False):
"""
AKA the multivariate beta distribution. Used in cases where actor's action
must sum to 1.
"""
super().__init__()
assert state_dim > 0, "state_dim must be > 0, got {}".format(state_dim)
assert action_dim > 0, "action_dim must be > 0, got {}".format(action_dim)
self.state_dim = state_dim
self.action_dim = action_dim
assert len(sizes) == len(
activations
), "The numbers of sizes and activations must match; got {} vs {}".format(
len(sizes), len(activations)
)
# The last layer gives the concentration of the distribution.
self.fc = FullyConnectedNetwork(
[state_dim] + sizes + [action_dim],
activations + ["linear"],
use_batch_norm=use_batch_norm,
)
def input_prototype(self):
return rlt.FeatureData(torch.randn(1, self.state_dim))
def _get_concentration(self, state):
"""
Get concentration of distribution.
https://stats.stackexchange.com/questions/244917/what-exactly-is-the-alpha-in-the-dirichlet-distribution
"""
return self.fc(state.float_features).exp() + self.EPSILON
@torch.no_grad()
def get_log_prob(self, state, action):
concentration = self._get_concentration(state)
log_prob = Dirichlet(concentration).log_prob(action)
return log_prob.unsqueeze(dim=1)
def forward(self, state):
concentration = self._get_concentration(state)
if self.training:
# PyTorch can't backwards pass _sample_dirichlet
action = Dirichlet(concentration).rsample()
else:
# ONNX can't export Dirichlet()
action = torch._sample_dirichlet(concentration)
log_prob = Dirichlet(concentration).log_prob(action)
return rlt.ActorOutput(action=action, log_prob=log_prob.unsqueeze(dim=1))
| 11,437 | 37.641892 | 122 | py |
ReAgent | ReAgent-master/reagent/models/embedding_bag_concat.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Dict, List
import torch
from reagent.core import types as rlt
from reagent.models.base import ModelBase
class EmbeddingBagConcat(ModelBase):
"""
Concatenating embedding with float features before passing the input
to DQN
"""
def __init__(
self,
state_dim: int,
model_feature_config: rlt.ModelFeatureConfig,
embedding_dim: int,
):
super().__init__()
assert state_dim > 0, "state_dim must be > 0, got {}".format(state_dim)
self.state_dim = state_dim
# for input prototype
self._id_list_feature_names: List[str] = [
config.name for config in model_feature_config.id_list_feature_configs
]
self._id_score_list_feature_names: List[str] = [
config.name for config in model_feature_config.id_score_list_feature_configs
]
self.embedding_bags = torch.nn.ModuleDict(
{
table_name: torch.nn.EmbeddingBag(
num_embeddings=id_mapping.value.table_size,
embedding_dim=embedding_dim,
mode="sum",
)
for table_name, id_mapping in model_feature_config.id_mapping_config.items()
}
)
self.feat2table: Dict[str, str] = {
feature_name: config.id_mapping_name
for feature_name, config in model_feature_config.name2config.items()
}
self._output_dim = (
state_dim
+ len(self._id_list_feature_names) * embedding_dim
+ len(self._id_score_list_feature_names) * embedding_dim
)
@property
def output_dim(self) -> int:
return self._output_dim
def input_prototype(self):
id_list_features = {
k: (torch.tensor([0], dtype=torch.long), torch.tensor([], dtype=torch.long))
for k in self._id_list_feature_names
}
id_score_list_features = {
k: (
torch.tensor([0], dtype=torch.long),
torch.tensor([], dtype=torch.long),
torch.tensor([], dtype=torch.float),
)
for k in self._id_score_list_feature_names
}
return rlt.FeatureData(
float_features=torch.randn(1, self.state_dim),
id_list_features=id_list_features,
id_score_list_features=id_score_list_features,
)
def forward(self, state: rlt.FeatureData):
# id_list is (offset, value); sum pooling
id_list_embeddings = [
self.embedding_bags[self.feat2table[feature_name]](input=v[1], offsets=v[0])
for feature_name, v in state.id_list_features.items()
]
# id_score_list is (offset, key, value); weighted sum pooling
id_score_list_embeddings = [
self.embedding_bags[self.feat2table[feature_name]](
input=v[1], offsets=v[0], per_sample_weights=v[2]
)
for feature_name, v in state.id_score_list_features.items()
]
return torch.cat(
id_list_embeddings + id_score_list_embeddings + [state.float_features],
dim=1,
)
| 3,303 | 33.778947 | 92 | py |
ReAgent | ReAgent-master/reagent/models/convolutional_network.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import collections
import logging
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from reagent.models.fully_connected_network import FullyConnectedNetwork
logger = logging.getLogger(__name__)
CnnParameters = collections.namedtuple(
"CnnParameters",
[
"conv_dims",
"conv_height_kernels",
"conv_width_kernels",
"pool_types",
"pool_kernels_strides",
"num_input_channels",
"input_height",
"input_width",
],
)
class ConvolutionalNetwork(nn.Module):
def __init__(self, cnn_parameters, layers, activations, use_layer_norm) -> None:
super().__init__()
self.conv_dims = cnn_parameters.conv_dims
self.conv_height_kernels = cnn_parameters.conv_height_kernels
self.conv_width_kernels = cnn_parameters.conv_width_kernels
self.use_layer_norm = use_layer_norm
self.conv_layers: nn.ModuleList = nn.ModuleList()
self.pool_layers: nn.ModuleList = nn.ModuleList()
self.layer_norm_layers: nn.ModuleList = nn.ModuleList()
for i, _ in enumerate(self.conv_dims[1:]):
self.conv_layers.append(
nn.Conv2d(
self.conv_dims[i],
self.conv_dims[i + 1],
kernel_size=(
self.conv_height_kernels[i],
self.conv_width_kernels[i],
),
)
)
nn.init.kaiming_normal_(self.conv_layers[i].weight)
if cnn_parameters.pool_types[i] == "max":
self.pool_layers.append(
nn.MaxPool2d(kernel_size=cnn_parameters.pool_kernels_strides[i])
)
else:
assert False, "Unknown pooling type".format(layers)
if self.use_layer_norm:
self.layer_norm_layers.append(nn.GroupNorm(1, self.conv_dims[i + 1]))
input_size = (
cnn_parameters.num_input_channels,
cnn_parameters.input_height,
cnn_parameters.input_width,
)
conv_out = self.conv_forward(torch.ones(1, *input_size))
self.fc_input_dim = int(np.prod(conv_out.size()[1:]))
layers[0] = self.fc_input_dim
self.feed_forward = FullyConnectedNetwork(
layers, activations, use_layer_norm=use_layer_norm
)
def conv_forward(self, input):
x = input
for i, _ in enumerate(self.conv_layers):
x = self.conv_layers[i](x)
if self.use_layer_norm:
x = self.layer_norm_layers[i](x)
x = F.relu(x)
x = self.pool_layers[i](x)
return x
def forward(self, input) -> torch.FloatTensor:
"""Forward pass for generic convnet DNNs. Assumes activation names
are valid pytorch activation names.
:param input image tensor
"""
x = self.conv_forward(input)
x = x.view(-1, self.fc_input_dim)
# pyre-fixme[7]: Expected `FloatTensor` but got `Tensor`.
return self.feed_forward.forward(x)
| 3,256 | 32.234694 | 85 | py |
ReAgent | ReAgent-master/reagent/models/seq2slate_reward.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import copy
import logging
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from reagent.core import types as rlt
from reagent.core.torch_utils import gather
from reagent.model_utils.seq2slate_utils import DECODER_START_SYMBOL, subsequent_mask
from reagent.models.base import ModelBase
from reagent.models.seq2slate import (
Decoder,
DecoderLayer,
Embedder,
Encoder,
EncoderLayer,
MultiHeadedAttention,
PositionalEncoding,
PositionwiseFeedForward,
)
logger = logging.getLogger(__name__)
class Seq2SlateRewardNetBase(ModelBase):
def __init__(
self,
state_dim: int,
candidate_dim: int,
dim_model: int,
num_stacked_layers: int,
max_src_seq_len: int,
max_tgt_seq_len: int,
):
super().__init__()
self.state_dim = state_dim
self.candidate_dim = candidate_dim
self.dim_model = dim_model
self.num_stacked_layers = num_stacked_layers
self.candidate_embedder = Embedder(candidate_dim, dim_model // 2)
self.state_embedder = Embedder(state_dim, dim_model // 2)
self.max_src_seq_len = max_src_seq_len
self.max_tgt_seq_len = max_tgt_seq_len
def input_prototype(self):
return rlt.PreprocessedRankingInput.from_tensors(
state=torch.randn(1, self.state_dim),
src_seq=torch.randn(1, self.max_src_seq_len, self.candidate_dim),
tgt_in_seq=torch.randn(1, self.max_tgt_seq_len, self.candidate_dim),
tgt_out_seq=torch.randn(1, self.max_tgt_seq_len, self.candidate_dim),
src_src_mask=torch.ones(1, self.max_src_seq_len, self.max_src_seq_len),
tgt_tgt_mask=torch.ones(1, self.max_tgt_seq_len, self.max_tgt_seq_len),
tgt_out_idx=torch.arange(self.max_tgt_seq_len).reshape(1, -1) + 2,
)
def _init_params(self):
# Initialize parameters with Glorot / fan_avg.
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def _num_of_params(model):
return len(torch.cat([p.flatten() for p in model.parameters()]))
logger.info(f"Num of total params: {_num_of_params(self)}, {self._get_name()}")
class Seq2SlateGRURewardNet(Seq2SlateRewardNetBase):
def __init__(
self,
state_dim: int,
candidate_dim: int,
num_stacked_layers: int,
dim_model: int,
max_src_seq_len: int,
max_tgt_seq_len: int,
):
super().__init__(
state_dim,
candidate_dim,
dim_model,
num_stacked_layers,
max_src_seq_len,
max_tgt_seq_len,
)
self.gru = nn.GRU(
input_size=dim_model,
hidden_size=dim_model,
num_layers=num_stacked_layers,
batch_first=True,
)
self.end_of_seq_vec = nn.Parameter(
torch.zeros(candidate_dim), requires_grad=True
)
self.proj = nn.Linear(2 * dim_model, 1)
self._init_params()
def _convert_seq2slate_to_reward_model_format(
self, input: rlt.PreprocessedRankingInput
):
device = next(self.parameters()).device
# pyre-fixme[16]: Optional type has no attribute `float_features`.
batch_size, tgt_seq_len, candidate_dim = input.tgt_out_seq.float_features.shape
src_seq_len = input.src_seq.float_features.shape[1]
assert self.max_tgt_seq_len == tgt_seq_len
assert self.max_src_seq_len == src_seq_len
# unselected_idx stores indices of items that are not included in the slate
unselected_idx = torch.ones(batch_size, src_seq_len, device=device)
unselected_idx[
torch.arange(batch_size, device=device).repeat_interleave(
torch.tensor(tgt_seq_len, device=device)
),
# pyre-fixme[16]: Optional type has no attribute `flatten`.
input.tgt_out_idx.flatten() - 2,
] = 0
# shape: batch_size, (src_seq_len - tgt_seq_len)
unselected_idx = torch.nonzero(unselected_idx, as_tuple=True)[1].reshape(
batch_size, src_seq_len - tgt_seq_len
)
# shape: batch_size, (src_seq_len - tgt_seq_len), candidate_dim
unselected_candidate_features = gather(
input.src_seq.float_features, unselected_idx
)
# shape: batch_size, src_seq_len + 1, candidate_dim
tgt_in_seq = torch.cat(
(
input.tgt_out_seq.float_features,
unselected_candidate_features,
self.end_of_seq_vec.repeat(batch_size, 1, 1),
),
dim=1,
)
return rlt.PreprocessedRankingInput.from_tensors(
state=input.state.float_features,
src_seq=input.src_seq.float_features,
src_src_mask=input.src_src_mask,
tgt_in_seq=tgt_in_seq,
)
def embed(self, state, tgt_in_seq):
batch_size = state.shape[0]
# candidate_embed: batch_size, src_seq_len + 1, dim_model/2
candidate_embed = self.candidate_embedder(tgt_in_seq)
# state_embed: batch_size, dim_model/2
state_embed = self.state_embedder(state)
# transform state_embed into shape: batch_size, src_seq_len, dim_model/2
state_embed = state_embed.repeat(1, self.max_src_seq_len + 1).reshape(
batch_size, self.max_src_seq_len + 1, -1
)
# Input at each encoder step is actually concatenation of state_embed
# and candidate embed.
# shape: batch_size, src_seq_len + 1, dim_model
tgt_in_embed = torch.cat((state_embed, candidate_embed), dim=2)
return tgt_in_embed
def forward(self, input: rlt.PreprocessedRankingInput):
input = self._convert_seq2slate_to_reward_model_format(input)
state = input.state.float_features
tgt_in_seq = input.tgt_in_seq.float_features
# shape: batch_size, src_seq_len + 1, dim_modle
tgt_in_embed = self.embed(state, tgt_in_seq)
# output shape: batch_size, src_seq_len + 1, dim_model
output, hn = self.gru(tgt_in_embed)
# hn shape: batch_size, dim_model
hn = hn[-1] # top layer's hidden state
# attention, using hidden as query, outputs as keys and values
# shape: batch_size, src_seq_len + 1
attn_weights = F.softmax(
torch.bmm(
output,
hn.unsqueeze(2) / torch.sqrt(torch.tensor(self.candidate_dim).float()),
).squeeze(2),
dim=1,
)
# shape: batch_size, dim_model
context_vector = torch.bmm(attn_weights.unsqueeze(1), output).squeeze(1)
# reward prediction depends on hidden state of the last step + context vector
# shape: batch_size, 2 * dim_model
seq_embed = torch.cat((hn, context_vector), dim=1)
# shape: batch_size, 1
pred_reward = self.proj(seq_embed)
return rlt.RewardNetworkOutput(predicted_reward=pred_reward)
class Seq2SlateTransformerRewardNet(Seq2SlateRewardNetBase):
def __init__(
self,
state_dim: int,
candidate_dim: int,
num_stacked_layers: int,
num_heads: int,
dim_model: int,
dim_feedforward: int,
max_src_seq_len: int,
max_tgt_seq_len: int,
):
"""
A reward network that predicts slate reward.
It uses a transformer-based encoder to encode the items shown in the slate.
The slate reward is predicted by attending all encoder steps' outputs.
"""
super().__init__(
state_dim,
candidate_dim,
dim_model,
num_stacked_layers,
max_src_seq_len,
max_tgt_seq_len,
)
self.num_heads = num_heads
self.dim_feedforward = dim_feedforward
c = copy.deepcopy
attn = MultiHeadedAttention(num_heads, dim_model)
ff = PositionwiseFeedForward(dim_model, dim_feedforward)
self.encoder = Encoder(
EncoderLayer(dim_model, c(attn), c(ff)), num_stacked_layers
)
self.decoder = Decoder(
DecoderLayer(dim_model, c(attn), c(attn), c(ff)), num_stacked_layers
)
self.positional_encoding_encoder = PositionalEncoding(dim_model)
self.positional_encoding_decoder = PositionalEncoding(dim_model)
self.proj = nn.Linear(dim_model, 1)
self.decoder_start_vec = nn.Parameter(
torch.zeros(candidate_dim), requires_grad=True
)
self._init_params()
def encode(self, state, src_seq, src_mask):
# state: batch_size, state_dim
# src_seq: batch_size, src_seq_len, dim_candidate
# src_src_mask shape: batch_size, src_seq_len, src_seq_len
batch_size = src_seq.shape[0]
# candidate_embed: batch_size, src_seq_len, dim_model/2
candidate_embed = self.candidate_embedder(src_seq)
# state_embed: batch_size, dim_model/2
state_embed = self.state_embedder(state)
# transform state_embed into shape: batch_size, src_seq_len, dim_model/2
state_embed = state_embed.repeat(1, self.max_src_seq_len).reshape(
batch_size, self.max_src_seq_len, -1
)
# Input at each encoder step is actually concatenation of state_embed
# and candidate embed. state_embed is replicated at each encoding step.
# src_embed shape: batch_size, src_seq_len, dim_model
src_embed = self.positional_encoding_encoder(
torch.cat((state_embed, candidate_embed), dim=2)
)
# encoder_output shape: batch_size, src_seq_len, dim_model
return self.encoder(src_embed, src_mask)
def decode(
self, memory, state, tgt_src_mask, tgt_in_seq, tgt_tgt_mask, tgt_seq_len
):
# memory is the output of the encoder, the attention of each input symbol
# memory shape: batch_size, src_seq_len, dim_model
# tgt_src_mask shape: batch_size, tgt_seq_len, src_seq_len
# tgt_seq shape: batch_size, tgt_seq_len, dim_candidate
# tgt_tgt_mask shape: batch_size, tgt_seq_len, tgt_seq_len
batch_size = tgt_in_seq.shape[0]
# candidate_embed shape: batch_size, seq_len, dim_model/2
candidate_embed = self.candidate_embedder(tgt_in_seq)
# state_embed: batch_size, dim_model/2
state_embed = self.state_embedder(state)
# state_embed: batch_size, seq_len, dim_model/2
state_embed = state_embed.repeat(1, tgt_seq_len).reshape(
batch_size, tgt_seq_len, -1
)
# tgt_embed: batch_size, seq_len, dim_model
tgt_embed = self.positional_encoding_decoder(
torch.cat((state_embed, candidate_embed), dim=2)
)
# output of decoder will be later transformed into probabilities over symbols.
# shape: batch_size, seq_len, dim_model
return self.decoder(tgt_embed, memory, tgt_src_mask, tgt_tgt_mask)
def _convert_seq2slate_to_reward_model_format(
self, input: rlt.PreprocessedRankingInput
):
"""
In the reward model, the transformer decoder should see the full
sequences; while in seq2slate, the decoder only sees the sequence
before the last item.
"""
device = next(self.parameters()).device
# pyre-fixme[16]: Optional type has no attribute `float_features`.
batch_size, tgt_seq_len, candidate_dim = input.tgt_out_seq.float_features.shape
assert self.max_tgt_seq_len == tgt_seq_len
tgt_tgt_mask = subsequent_mask(tgt_seq_len + 1, device)
# shape: batch_size, tgt_seq_len + 1, candidate_dim
tgt_in_seq = torch.cat(
(
self.decoder_start_vec.repeat(batch_size, 1, 1),
input.tgt_out_seq.float_features,
),
dim=1,
)
return rlt.PreprocessedRankingInput.from_tensors(
state=input.state.float_features,
src_seq=input.src_seq.float_features,
src_src_mask=input.src_src_mask,
tgt_in_seq=tgt_in_seq,
tgt_tgt_mask=tgt_tgt_mask,
)
def forward(self, input: rlt.PreprocessedRankingInput):
input = self._convert_seq2slate_to_reward_model_format(input)
state, src_seq, tgt_in_seq, src_src_mask, tgt_tgt_mask = (
input.state.float_features,
input.src_seq.float_features,
input.tgt_in_seq.float_features,
input.src_src_mask,
input.tgt_tgt_mask,
)
# encoder_output shape: batch_size, src_seq_len, dim_model
encoder_output = self.encode(state, src_seq, src_src_mask)
batch_size, tgt_seq_len, _ = tgt_in_seq.shape
# tgt_src_mask shape: batch_size, tgt_seq_len, src_seq_len
tgt_src_mask = torch.ones(
batch_size, tgt_seq_len, self.max_src_seq_len, device=src_src_mask.device
)
# decoder_output shape: batch_size, tgt_seq_len, dim_model
decoder_output = self.decode(
memory=encoder_output,
state=state,
tgt_src_mask=tgt_src_mask,
tgt_in_seq=tgt_in_seq,
tgt_tgt_mask=tgt_tgt_mask,
tgt_seq_len=tgt_seq_len,
)
# use the decoder's last step embedding to predict the slate reward
pred_reward = self.proj(decoder_output[:, -1, :])
return rlt.RewardNetworkOutput(predicted_reward=pred_reward)
class Seq2SlateRewardNetJITWrapper(ModelBase):
def __init__(self, model: Seq2SlateRewardNetBase):
super().__init__()
self.model = model
def input_prototype(self, use_gpu=False):
input_prototype = self.model.input_prototype()
if use_gpu:
input_prototype = input_prototype.cuda()
return (
input_prototype.state.float_features,
input_prototype.src_seq.float_features,
input_prototype.tgt_out_seq.float_features,
input_prototype.src_src_mask,
input_prototype.tgt_out_idx,
)
def forward(
self,
state: torch.Tensor,
src_seq: torch.Tensor,
tgt_out_seq: torch.Tensor,
src_src_mask: torch.Tensor,
tgt_out_idx: torch.Tensor,
) -> torch.Tensor:
return self.model(
rlt.PreprocessedRankingInput(
state=rlt.FeatureData(float_features=state),
src_seq=rlt.FeatureData(float_features=src_seq),
tgt_out_seq=rlt.FeatureData(float_features=tgt_out_seq),
src_src_mask=src_src_mask,
tgt_out_idx=tgt_out_idx,
)
).predicted_reward
class Seq2SlateRewardNetEnsemble(ModelBase):
def __init__(self, models: List[ModelBase]):
super().__init__()
self.models = models
def forward(
self,
state: torch.Tensor,
src_seq: torch.Tensor,
tgt_out_seq: torch.Tensor,
src_src_mask: torch.Tensor,
tgt_out_idx: torch.Tensor,
) -> torch.Tensor:
agg_pred = torch.cat(
[
model(
state,
src_seq,
tgt_out_seq,
src_src_mask,
tgt_out_idx,
)
for model in self.models
],
dim=1,
)
return torch.median(agg_pred, dim=1, keepdim=True).values
| 15,673 | 35.793427 | 87 | py |
ReAgent | ReAgent-master/reagent/models/categorical_dqn.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import torch
import torch.nn.functional as F
from reagent.core import types as rlt
from reagent.models.base import ModelBase
class CategoricalDQN(ModelBase):
def __init__(
self,
distributional_network: ModelBase,
*,
qmin: float,
qmax: float,
num_atoms: int
):
super().__init__()
self.distributional_network = distributional_network
self.support = torch.linspace(qmin, qmax, num_atoms)
def input_prototype(self):
return self.distributional_network.input_prototype()
def forward(self, state: rlt.FeatureData):
# pyre-fixme[16]: `Tensor` has no attribute `exp`.
dist = self.log_dist(state).exp()
q_values = (dist * self.support.to(dist.device)).sum(2)
return q_values
def log_dist(self, state: rlt.FeatureData) -> torch.Tensor:
log_dist = self.distributional_network(state)
return F.log_softmax(log_dist, -1)
| 1,055 | 29.171429 | 71 | py |
ReAgent | ReAgent-master/reagent/models/dqn.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Optional, Union
import numpy as np
import torch
from reagent.core import types as rlt
from reagent.models.fully_connected_network import (
FloatFeatureFullyConnected,
)
INVALID_ACTION_CONSTANT = -1e10
class FullyConnectedDQN(FloatFeatureFullyConnected):
def __init__(
self,
state_dim,
action_dim,
sizes,
activations,
*,
num_atoms: Optional[int] = None,
use_batch_norm: bool = False,
dropout_ratio: float = 0.0,
normalized_output: bool = False,
use_layer_norm: bool = False,
):
super().__init__(
state_dim=state_dim,
output_dim=action_dim,
sizes=sizes,
activations=activations,
num_atoms=num_atoms,
use_batch_norm=use_batch_norm,
dropout_ratio=dropout_ratio,
normalized_output=normalized_output,
use_layer_norm=use_layer_norm,
)
self.action_dim = self.output_dim
def forward(
self,
state: rlt.FeatureData,
possible_actions_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
x = super().forward(state=state)
if possible_actions_mask is not None:
# subtract huge value from impossible actions to force their probabilities to 0
x = x + (1 - possible_actions_mask.float()) * INVALID_ACTION_CONSTANT
return x
| 1,541 | 27.555556 | 91 | py |
ReAgent | ReAgent-master/reagent/models/mlp_scorer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import reagent.core.types as rlt
import torch
from reagent.models.base import ModelBase
class MLPScorer(ModelBase):
"""
Log-space in and out
"""
def __init__(
self,
mlp: torch.nn.Module,
has_user_feat: bool = False,
) -> None:
super().__init__()
self.mlp = mlp
self.has_user_feat = has_user_feat
def forward(self, obs: rlt.FeatureData):
mlp_input = obs.get_ranking_state(self.has_user_feat)
scores = self.mlp(mlp_input)
return scores.squeeze(-1)
def input_prototype(self):
# Sample config for input
batch_size = 2
state_dim = 5
num_docs = 3
candidate_dim = 4
return rlt.FeatureData(
float_features=torch.randn((batch_size, state_dim)),
candidate_docs=rlt.DocList(
float_features=torch.randn(batch_size, num_docs, candidate_dim)
),
)
| 1,043 | 24.463415 | 79 | py |
ReAgent | ReAgent-master/reagent/models/mdn_rnn.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from collections import deque
from typing import NamedTuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as f
from reagent.core import types as rlt
from reagent.core.torch_utils import stack
from torch.distributions.normal import Normal
logger = logging.getLogger(__name__)
class MDNRNN(nn.Module):
"""Mixture Density Network - Recurrent Neural Network"""
def __init__(
self, state_dim, action_dim, num_hiddens, num_hidden_layers, num_gaussians
):
super().__init__()
self.state_dim = state_dim
self.action_dim = action_dim
self.num_hiddens = num_hiddens
self.num_hidden_layers = num_hidden_layers
self.rnn = nn.LSTM(
input_size=state_dim + action_dim,
hidden_size=num_hiddens,
num_layers=num_hidden_layers,
)
self.num_gaussians = num_gaussians
# outputs:
# 1. mu, sigma, and pi for each gaussian
# 2. non-terminal signal
# 3. reward
self.gmm_linear = nn.Linear(
num_hiddens, (2 * state_dim + 1) * num_gaussians + 2
)
def forward(self, actions: torch.Tensor, states: torch.Tensor, hidden=None):
"""Forward pass of MDN-RNN
:param actions: (SEQ_LEN, BATCH_SIZE, ACTION_DIM) torch tensor
:param states: (SEQ_LEN, BATCH_SIZE, STATE_DIM) torch tensor
:returns: parameters of the GMM prediction for the next state,
gaussian prediction of the reward and logit prediction of
non-terminality. And the RNN's outputs.
- mus: (SEQ_LEN, BATCH_SIZE, NUM_GAUSSIANS, STATE_DIM) torch tensor
- sigmas: (SEQ_LEN, BATCH_SIZE, NUM_GAUSSIANS, STATE_DIM) torch tensor
- logpi: (SEQ_LEN, BATCH_SIZE, NUM_GAUSSIANS) torch tensor
- reward: (SEQ_LEN, BATCH_SIZE) torch tensor
- not_terminal: (SEQ_LEN, BATCH_SIZE) torch tensor
- last_step_hidden_and_cell: TUPLE(
(NUM_LAYERS, BATCH_SIZE, HIDDEN_SIZE),
(NUM_LAYERS, BATCH_SIZE, HIDDEN_SIZE)
) torch tensor
- all_steps_hidden: (SEQ_LEN, BATCH_SIZE, HIDDEN_SIZE) torch tensor
"""
device = next(self.parameters()).device
actions = actions.to(device)
states = states.to(device)
seq_len, batch_size = actions.size(0), actions.size(1)
ins = torch.cat([actions, states], dim=-1)
all_steps_hidden, last_step_hidden_and_cell = self.rnn(ins, hidden)
gmm_outs = self.gmm_linear(all_steps_hidden)
stride = self.num_gaussians * self.state_dim
mus = gmm_outs[:, :, :stride].view(
seq_len, batch_size, self.num_gaussians, self.state_dim
)
sigmas = torch.exp(
gmm_outs[:, :, stride : 2 * stride].view(
seq_len, batch_size, self.num_gaussians, self.state_dim
)
)
logpi = f.log_softmax(
gmm_outs[:, :, 2 * stride : 2 * stride + self.num_gaussians].view(
seq_len, batch_size, self.num_gaussians
),
dim=-1,
)
reward = gmm_outs[:, :, -2]
not_terminal = gmm_outs[:, :, -1]
return (
mus,
sigmas,
logpi,
reward,
not_terminal,
all_steps_hidden,
last_step_hidden_and_cell,
)
def get_initial_hidden_state(self, batch_size=1):
hidden = (
torch.zeros(self.num_hidden_layers, batch_size, self.num_hiddens),
torch.zeros(self.num_hidden_layers, batch_size, self.num_hiddens),
)
return hidden
class MDNRNNMemorySample(NamedTuple):
state: np.ndarray
action: np.ndarray
next_state: np.ndarray
reward: float
not_terminal: float
# TODO(T67083129): use ReplayBuffer in circular_replay_buffer.py
class MDNRNNMemoryPool:
def __init__(self, max_replay_memory_size):
self.replay_memory = deque(maxlen=max_replay_memory_size)
self.max_replay_memory_size = max_replay_memory_size
self.accu_memory_num = 0
def deque_sample(self, indices):
for i in indices:
s = self.replay_memory[i]
yield s.state, s.action, s.next_state, s.reward, s.not_terminal
def sample_memories(self, batch_size, use_gpu=False) -> rlt.MemoryNetworkInput:
"""
:param batch_size: number of samples to return
:param use_gpu: whether to put samples on gpu
State's shape is SEQ_LEN x BATCH_SIZE x STATE_DIM, for example.
By default, MDN-RNN consumes data with SEQ_LEN as the first dimension.
"""
sample_indices = np.random.randint(self.memory_size, size=batch_size)
device = torch.device("cuda") if use_gpu else torch.device("cpu")
# state/next state shape: batch_size x seq_len x state_dim
# action shape: batch_size x seq_len x action_dim
# reward/not_terminal shape: batch_size x seq_len
state, action, next_state, reward, not_terminal = map(
lambda x: stack(x).float().to(device),
zip(*self.deque_sample(sample_indices)),
)
# make shapes seq_len x batch_size x feature_dim
state, action, next_state, reward, not_terminal = transpose(
state, action, next_state, reward, not_terminal
)
return rlt.MemoryNetworkInput(
state=rlt.FeatureData(float_features=state),
reward=reward,
time_diff=torch.ones_like(reward).float(),
action=action,
next_state=rlt.FeatureData(float_features=next_state),
not_terminal=not_terminal,
step=None,
)
def insert_into_memory(self, state, action, next_state, reward, not_terminal):
self.replay_memory.append(
MDNRNNMemorySample(
state=state,
action=action,
next_state=next_state,
reward=reward,
not_terminal=not_terminal,
)
)
self.accu_memory_num += 1
@property
def memory_size(self):
return min(self.accu_memory_num, self.max_replay_memory_size)
def transpose(*args):
res = []
for arg in args:
res.append(arg.transpose(1, 0))
return res
def gmm_loss(batch, mus, sigmas, logpi, reduce=True):
"""Computes the gmm loss.
Compute minus the log probability of batch under the GMM model described
by mus, sigmas, pi. Precisely, with bs1, bs2, ... the sizes of the batch
dimensions (several batch dimension are useful when you have both a batch
axis and a time step axis), gs the number of mixtures and fs the number of
features.
:param batch: (bs1, bs2, *, fs) torch tensor
:param mus: (bs1, bs2, *, gs, fs) torch tensor
:param sigmas: (bs1, bs2, *, gs, fs) torch tensor
:param logpi: (bs1, bs2, *, gs) torch tensor
:param reduce: if not reduce, the mean in the following formula is omitted
:returns:
loss(batch) = - mean_{i1=0..bs1, i2=0..bs2, ...} log(
sum_{k=1..gs} pi[i1, i2, ..., k] * N(
batch[i1, i2, ..., :] | mus[i1, i2, ..., k, :], sigmas[i1, i2, ..., k, :]))
NOTE: The loss is not reduced along the feature dimension (i.e. it should
scale linearily with fs).
Adapted from: https://github.com/ctallec/world-models
"""
# for non-image based environment, batch's shape before unsqueeze:
# (seq_len, batch_size, fs)
batch = batch.unsqueeze(-2)
normal_dist = Normal(mus, sigmas)
g_log_probs = normal_dist.log_prob(batch)
# According to the world model paper, the prediction of next state is a
# factored Gaussian distribution (i.e., the covariance matrix of the multi-
# dimensional Gaussian distribution is a diagonal matrix). Hence we can sum
# log probability of each dimension when calculating log joint probability
g_log_probs = logpi + torch.sum(g_log_probs, dim=-1)
# log sum exp
max_log_probs = torch.max(g_log_probs, dim=-1, keepdim=True)[0]
g_log_probs = g_log_probs - max_log_probs
g_probs = torch.exp(g_log_probs)
probs = torch.sum(g_probs, dim=-1)
log_prob = max_log_probs.squeeze() + torch.log(probs)
if reduce:
return -(torch.mean(log_prob))
return -log_prob
| 8,467 | 35.5 | 87 | py |
ReAgent | ReAgent-master/reagent/models/fully_connected_network.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import math
from typing import List, Optional
import torch
import torch.nn as nn
import torch.nn.init as init
from reagent.core import types as rlt
from reagent.models.base import ModelBase
logger = logging.getLogger(__name__)
def gaussian_fill_w_gain(tensor, gain, dim_in, min_std=0.0) -> None:
"""Gaussian initialization with gain."""
init.normal_(tensor, mean=0, std=max(gain * math.sqrt(1 / dim_in), min_std))
ACTIVATION_MAP = {
"tanh": nn.Tanh,
"relu": nn.ReLU,
"leaky_relu": nn.LeakyReLU,
"linear": nn.Identity,
"sigmoid": nn.Sigmoid,
}
class SlateBatchNorm1d(nn.Module):
"""
Same as nn.BatchNorm1d is input has shape (batch_size, feat_dim).
But if input has shape (batch_size, num_candidates, item_feats), like in LearnedVM,
we transpose it, since that's what nn.BatchNorm1d computes Batch Normalization over
1st dimension, while we want to compute it over item_feats.
NOTE: this is different from nn.BatchNorm2d which is for CNNs, and expects 4D inputs
"""
def __init__(self, *args, **kwargs):
super().__init__()
self.vanilla = nn.BatchNorm1d(*args, **kwargs)
def forward(self, x: torch.Tensor):
assert len(x.shape) in [2, 3], f"Invalid input shape {x.shape}"
if len(x.shape) == 2:
return self.vanilla(x)
if len(x.shape) == 3:
return self.vanilla(x.transpose(1, 2)).transpose(1, 2)
class FullyConnectedNetwork(ModelBase):
def __init__(
self,
layers,
activations,
*,
use_batch_norm: bool = False,
min_std: float = 0.0,
dropout_ratio: float = 0.0,
use_layer_norm: bool = False,
normalize_output: bool = False,
orthogonal_init: bool = False,
) -> None:
super().__init__()
self.input_dim = layers[0]
modules: List[nn.Module] = []
assert len(layers) == len(activations) + 1
for i, ((in_dim, out_dim), activation) in enumerate(
zip(zip(layers, layers[1:]), activations)
):
# Add BatchNorm1d
if use_batch_norm:
modules.append(SlateBatchNorm1d(in_dim))
# Add Linear
linear = nn.Linear(in_dim, out_dim)
# assuming activation is valid
gain = torch.nn.init.calculate_gain(activation)
if orthogonal_init:
# provably better https://openreview.net/forum?id=rkgqN1SYvr
nn.init.orthogonal_(linear.weight.data, gain=gain)
else:
# gaussian init
gaussian_fill_w_gain(
linear.weight, gain=gain, dim_in=in_dim, min_std=min_std
)
init.constant_(linear.bias, 0) # type: ignore
modules.append(linear)
# Add LayerNorm
if use_layer_norm and (normalize_output or i < len(activations) - 1):
modules.append(nn.LayerNorm(out_dim)) # type: ignore
# Add activation
if activation in ACTIVATION_MAP:
modules.append(ACTIVATION_MAP[activation]())
else:
# See if it matches any of the nn modules
modules.append(getattr(nn, activation)())
# Add Dropout
if dropout_ratio > 0.0 and (normalize_output or i < len(activations) - 1):
modules.append(nn.Dropout(p=dropout_ratio))
self.dnn = nn.Sequential(*modules) # type: ignore
def input_prototype(self):
return torch.randn(1, self.input_dim)
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""Forward pass for generic feed-forward DNNs. Assumes activation names
are valid pytorch activation names.
:param input tensor
"""
return self.dnn(input)
class FloatFeatureFullyConnected(ModelBase):
"""
A fully connected network that takes FloatFeatures input
and supports distributional prediction.
"""
def __init__(
self,
state_dim,
output_dim,
sizes,
activations,
*,
num_atoms: Optional[int] = None,
use_batch_norm: bool = False,
dropout_ratio: float = 0.0,
normalized_output: bool = False,
use_layer_norm: bool = False,
):
super().__init__()
assert state_dim > 0, "state_dim must be > 0, got {}".format(state_dim)
assert output_dim > 0, "output_dim must be > 0, got {}".format(output_dim)
self.state_dim = state_dim
self.output_dim = output_dim
assert len(sizes) == len(
activations
), "The numbers of sizes and activations must match; got {} vs {}".format(
len(sizes), len(activations)
)
self.num_atoms = num_atoms
self.fc = FullyConnectedNetwork(
[state_dim] + sizes + [output_dim * (num_atoms or 1)],
activations + ["linear"],
use_batch_norm=use_batch_norm,
dropout_ratio=dropout_ratio,
normalize_output=normalized_output,
use_layer_norm=use_layer_norm,
)
def input_prototype(self):
return rlt.FeatureData(self.fc.input_prototype())
def forward(
self,
state: rlt.FeatureData,
) -> torch.Tensor:
float_features = state.float_features
x = self.fc(float_features)
if self.num_atoms is not None:
x = x.view(float_features.shape[0], self.action_dim, self.num_atoms)
return x
| 5,648 | 31.653179 | 88 | py |
ReAgent | ReAgent-master/reagent/models/seq2reward_model.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Optional
import torch
import torch.nn as nn
from reagent.core import types as rlt
from reagent.models.base import ModelBase
class Seq2RewardNetwork(ModelBase):
def __init__(self, state_dim, action_dim, num_hiddens, num_hidden_layers):
super().__init__()
self.state_dim = state_dim
self.action_dim = action_dim
self.num_hiddens = num_hiddens
self.num_hidden_layers = num_hidden_layers
self.rnn = nn.LSTM(
input_size=action_dim, hidden_size=num_hiddens, num_layers=num_hidden_layers
)
self.lstm_linear = nn.Linear(num_hiddens, 1)
self.map_linear = nn.Linear(state_dim, self.num_hiddens)
def input_prototype(self):
return (
rlt.FeatureData(torch.randn(1, 1, self.state_dim)),
rlt.FeatureData(torch.randn(1, 1, self.action_dim)),
)
def forward(
self,
state: rlt.FeatureData,
action: rlt.FeatureData,
valid_reward_len: Optional[torch.Tensor] = None,
):
"""Forward pass of Seq2Reward
Takes in the current state and use it as init hidden
The input sequence are pure actions only
Output the predicted reward after each time step
:param actions: (SEQ_LEN, BATCH_SIZE, ACTION_DIM) torch tensor
:param states: (SEQ_LEN, BATCH_SIZE, STATE_DIM) torch tensor
:param valid_reward_len: (BATCH_SIZE,) torch tensor
:returns: predicated accumulated rewards at last step for the given sequence
- acc_reward: (BATCH_SIZE, 1) torch tensor
"""
states = state.float_features
actions = action.float_features
batch_size = states.shape[1]
hidden = self.get_initial_hidden_state(
states[0][None, :, :], batch_size=batch_size
)
# all_steps_hidden shape: seq_len, batch_size, hidden_size
all_steps_hidden, _ = self.rnn(actions, hidden)
if valid_reward_len is None:
acc_reward = self.lstm_linear(all_steps_hidden[-1])
else:
valid_step_hidden = all_steps_hidden[
valid_reward_len - 1, torch.arange(batch_size)
]
acc_reward = self.lstm_linear(valid_step_hidden)
return rlt.Seq2RewardOutput(acc_reward=acc_reward)
def get_initial_hidden_state(self, state, batch_size=1):
# state embedding with linear mapping
# repeat state to fill num_hidden_layers at first dimension
state = state.repeat(self.num_hidden_layers, 1, 1)
state_embed = self.map_linear(state)
# hidden = (hidden,cell) where hidden is init with liner map
# of input state and cell is 0.
# hidden :
# TUPLE(
# (NUM_LAYERS, BATCH_SIZE, HIDDEN_SIZE),
# (NUM_LAYERS, BATCH_SIZE, HIDDEN_SIZE)
# ) torch tensor
hidden = (
state_embed,
torch.zeros(self.num_hidden_layers, batch_size, self.num_hiddens).to(
state.device
),
)
return hidden
| 3,177 | 33.923077 | 88 | py |
ReAgent | ReAgent-master/reagent/models/cem_planner.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""
A network which implements a cross entropy method-based planner
The planner plans the best next action based on simulation data generated by
an ensemble of world models.
The idea is inspired by: https://arxiv.org/abs/1805.12114
"""
import itertools
import logging
import random
from typing import List, Optional, Tuple
import numpy as np
import scipy.stats as stats
import torch
import torch.nn as nn
from reagent.core import types as rlt
from reagent.core.parameters import CONTINUOUS_TRAINING_ACTION_RANGE
from reagent.models.base import ModelBase
from reagent.models.world_model import MemoryNetwork
from reagent.training.utils import rescale_actions
from torch.distributions.bernoulli import Bernoulli
from torch.distributions.categorical import Categorical
from torch.distributions.normal import Normal
logger = logging.getLogger(__name__)
class CEMPlannerNetwork(nn.Module):
def __init__(
self,
mem_net_list: List[MemoryNetwork],
cem_num_iterations: int,
cem_population_size: int,
ensemble_population_size: int,
num_elites: int,
plan_horizon_length: int,
state_dim: int,
action_dim: int,
discrete_action: bool,
terminal_effective: bool,
gamma: float,
alpha: float = 0.25,
epsilon: float = 0.001,
action_upper_bounds: Optional[np.ndarray] = None,
action_lower_bounds: Optional[np.ndarray] = None,
):
"""
:param mem_net_list: A list of world models used to simulate trajectories
:param cem_num_iterations: The maximum number of iterations for
searching the best action
:param cem_population_size: The number of candidate solutions to
evaluate in each CEM iteration
:param ensemble_population_size: The number of trajectories to be
sampled to evaluate a CEM solution
:param num_elites: The number of elites kept to refine solutions
in each iteration
:param plan_horizon_length: The number of steps to plan ahead
:param state_dim: state dimension
:param action_dim: action dimension
:param discrete_action: If actions are discrete or continuous
:param terminal_effective: If False, planning will stop after a
predicted terminal signal
:param gamma: The reward discount factor
:param alpha: The CEM solution update rate
:param epsilon: The planning will stop early when the solution
variance drops below epsilon
:param action_upper_bounds: Upper bound of each action dimension.
Only effective when discrete_action=False.
:param action_lower_bounds: Lower bound of each action dimension.
Only effective when discrete_action=False.
"""
super().__init__()
self.mem_net_list = nn.ModuleList(mem_net_list)
self.cem_num_iterations = cem_num_iterations
self.cem_pop_size = cem_population_size
self.ensemble_pop_size = ensemble_population_size
self.num_elites = num_elites
self.plan_horizon_length = plan_horizon_length
self.state_dim = state_dim
self.action_dim = action_dim
self.terminal_effective = terminal_effective
self.gamma = gamma
self.alpha = alpha
self.epsilon = epsilon
self.discrete_action = discrete_action
if not discrete_action:
assert (
(action_upper_bounds is not None)
and (action_lower_bounds is not None)
and (
action_upper_bounds.shape
== action_lower_bounds.shape
== (action_dim,)
)
)
assert np.all(action_upper_bounds >= action_lower_bounds)
self.action_upper_bounds = np.tile(
action_upper_bounds, self.plan_horizon_length
)
self.action_lower_bounds = np.tile(
action_lower_bounds, self.plan_horizon_length
)
self.orig_action_upper = torch.tensor(action_upper_bounds)
self.orig_action_lower = torch.tensor(action_lower_bounds)
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def forward(self, state: rlt.FeatureData):
assert state.float_features.shape == (1, self.state_dim)
if self.discrete_action:
return self.discrete_planning(state)
return self.continuous_planning(state)
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def acc_rewards_of_one_solution(
self, init_state: torch.Tensor, solution: torch.Tensor, solution_idx: int
):
"""
ensemble_pop_size trajectories will be sampled to evaluate a
CEM solution. Each trajectory is generated by one world model
:param init_state: its shape is (state_dim, )
:param solution: its shape is (plan_horizon_length, action_dim)
:param solution_idx: the index of the solution
:return reward: Reward of each of ensemble_pop_size trajectories
"""
reward_matrix = np.zeros((self.ensemble_pop_size, self.plan_horizon_length))
for i in range(self.ensemble_pop_size):
state = init_state
mem_net_idx = np.random.randint(0, len(self.mem_net_list))
for j in range(self.plan_horizon_length):
# state shape:
# (1, 1, state_dim)
# action shape:
# (1, 1, action_dim)
(
reward,
next_state,
not_terminal,
not_terminal_prob,
) = self.sample_reward_next_state_terminal(
state=rlt.FeatureData(state.reshape((1, 1, self.state_dim))),
action=rlt.FeatureData(
solution[j, :].reshape((1, 1, self.action_dim))
),
mem_net=self.mem_net_list[mem_net_idx],
)
reward_matrix[i, j] = reward * (self.gamma ** j)
if not not_terminal:
logger.debug(
f"Solution {solution_idx}: predict terminal at step {j}"
f" with prob. {1.0 - not_terminal_prob}"
)
if not not_terminal:
break
state = next_state
return np.sum(reward_matrix, axis=1)
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def acc_rewards_of_all_solutions(
self, state: rlt.FeatureData, solutions: torch.Tensor
) -> float:
"""
Calculate accumulated rewards of solutions.
:param state: the input which contains the starting state
:param solutions: its shape is (cem_pop_size, plan_horizon_length, action_dim)
:returns: a vector of size cem_pop_size, which is the reward of each solution
"""
acc_reward_vec = np.zeros(self.cem_pop_size)
init_state = state.float_features
for i in range(self.cem_pop_size):
if i % (self.cem_pop_size // 10) == 0:
logger.debug(f"Simulating the {i}-th solution...")
acc_reward_vec[i] = self.acc_rewards_of_one_solution(
init_state, solutions[i], i
)
return acc_reward_vec
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def sample_reward_next_state_terminal(
self, state: rlt.FeatureData, action: rlt.FeatureData, mem_net: MemoryNetwork
):
"""Sample one-step dynamics based on the provided world model"""
wm_output = mem_net(state, action)
num_mixtures = wm_output.logpi.shape[2]
mixture_idx = (
Categorical(torch.exp(wm_output.logpi.view(num_mixtures)))
.sample()
.long()
.item()
)
next_state = Normal(
wm_output.mus[0, 0, mixture_idx], wm_output.sigmas[0, 0, mixture_idx]
).sample()
reward = wm_output.reward[0, 0]
if self.terminal_effective:
not_terminal_prob = torch.sigmoid(wm_output.not_terminal[0, 0])
not_terminal = Bernoulli(not_terminal_prob).sample().long().item()
else:
not_terminal_prob = 1.0
not_terminal = 1
return reward, next_state, not_terminal, not_terminal_prob
def constrained_variance(self, mean, var):
lb_dist, ub_dist = (
mean - self.action_lower_bounds,
self.action_upper_bounds - mean,
)
return np.minimum(np.minimum((lb_dist / 2) ** 2, (ub_dist / 2) ** 2), var)
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def continuous_planning(self, state: rlt.FeatureData) -> torch.Tensor:
# TODO: Warmstarts means and vars using previous solutions (T48841404)
mean = (self.action_upper_bounds + self.action_lower_bounds) / 2
var = (self.action_upper_bounds - self.action_lower_bounds) ** 2 / 16
normal_sampler = stats.truncnorm(
-2, 2, loc=np.zeros_like(mean), scale=np.ones_like(mean)
)
for i in range(self.cem_num_iterations):
logger.debug(f"{i}-th cem iteration.")
const_var = self.constrained_variance(mean, var)
solutions = (
normal_sampler.rvs(
size=[self.cem_pop_size, self.action_dim * self.plan_horizon_length]
)
* np.sqrt(const_var)
+ mean
)
action_solutions = torch.from_numpy(
solutions.reshape(
(self.cem_pop_size, self.plan_horizon_length, self.action_dim)
)
).float()
acc_rewards = self.acc_rewards_of_all_solutions(state, action_solutions)
elites = solutions[np.argsort(acc_rewards)][-self.num_elites :]
new_mean = np.mean(elites, axis=0)
new_var = np.var(elites, axis=0)
mean = self.alpha * mean + (1 - self.alpha) * new_mean
var = self.alpha * var + (1 - self.alpha) * new_var
if np.max(var) <= self.epsilon:
break
# Pick the first action of the optimal solution
solution = mean[: self.action_dim]
raw_action = solution.reshape(-1)
low = torch.tensor(CONTINUOUS_TRAINING_ACTION_RANGE[0])
high = torch.tensor(CONTINUOUS_TRAINING_ACTION_RANGE[1])
# rescale to range (-1, 1) as per canonical output range of continuous agents
return rescale_actions(
torch.tensor(raw_action),
new_min=low,
new_max=high,
prev_min=self.orig_action_lower,
prev_max=self.orig_action_upper,
)
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def discrete_planning(self, state: rlt.FeatureData) -> Tuple[int, np.ndarray]:
# For discrete actions, we use random shoots to get the best next action
random_action_seqs = list(
itertools.product(range(self.action_dim), repeat=self.plan_horizon_length)
)
random_action_seqs = random.choices(random_action_seqs, k=self.cem_pop_size)
action_solutions = torch.zeros(
self.cem_pop_size, self.plan_horizon_length, self.action_dim
)
for i, action_seq in enumerate(random_action_seqs):
for j, act_idx in enumerate(action_seq):
action_solutions[i, j, act_idx] = 1
acc_rewards = self.acc_rewards_of_all_solutions(state, action_solutions)
first_action_tally = np.zeros(self.action_dim)
reward_tally = np.zeros(self.action_dim)
for action_seq, acc_reward in zip(random_action_seqs, acc_rewards):
first_action = action_seq[0]
first_action_tally[first_action] += 1
reward_tally[first_action] += acc_reward
best_next_action_idx = np.nanargmax(reward_tally / first_action_tally)
best_next_action_one_hot = torch.zeros(self.action_dim).float()
best_next_action_one_hot[best_next_action_idx] = 1
logger.debug(
f"Choose action {best_next_action_idx}."
f"Stats: {reward_tally} / {first_action_tally}"
f" = {reward_tally/first_action_tally} "
)
return best_next_action_idx, best_next_action_one_hot
| 13,047 | 40.686901 | 88 | py |
ReAgent | ReAgent-master/reagent/models/bcq.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import torch
from reagent.models.base import ModelBase
class BatchConstrainedDQN(ModelBase):
def __init__(self, state_dim, q_network, imitator_network, bcq_drop_threshold):
super().__init__()
assert state_dim > 0, "state_dim must be > 0, got {}".format(state_dim)
self.state_dim = state_dim
self.q_network = q_network
self.imitator_network = imitator_network
self.invalid_action_penalty = -1e10
self.bcq_drop_threshold = bcq_drop_threshold
def input_prototype(self):
return self.q_network.input_prototype()
def forward(self, state):
q_values = self.q_network(state)
imitator_outputs = self.imitator_network(state.float_features)
imitator_probs = torch.nn.functional.softmax(imitator_outputs, dim=1)
filter_values = imitator_probs / imitator_probs.max(keepdim=True, dim=1)[0]
invalid_actions = (filter_values < self.bcq_drop_threshold).float()
invalid_action_penalty = self.invalid_action_penalty * invalid_actions
constrained_q_values = q_values + invalid_action_penalty
return constrained_q_values
| 1,239 | 40.333333 | 83 | py |
ReAgent | ReAgent-master/reagent/models/seq2slate.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import math
from typing import Optional, NamedTuple
import torch
import torch.nn as nn
import torch.nn.modules.transformer as transformer
from reagent.core import types as rlt
from reagent.core.configuration import param_hash
from reagent.core.dataclasses import dataclass
from reagent.core.torch_utils import gather
from reagent.model_utils.seq2slate_utils import (
DECODER_START_SYMBOL,
PADDING_SYMBOL,
Seq2SlateMode,
Seq2SlateOutputArch,
attention,
pytorch_decoder_mask,
clones,
mask_logits_by_idx,
per_symbol_to_per_seq_probs,
print_model_info,
)
from reagent.models.base import ModelBase
from torch.nn.parallel.distributed import DistributedDataParallel
logger = logging.getLogger(__name__)
class Generator(nn.Module):
"""Candidate generation"""
def forward(self, probs: torch.Tensor, greedy: bool):
"""
Decode one-step
:param probs: probability distributions of decoder.
Shape: batch_size, tgt_seq_len, candidate_size
:param greedy: whether to greedily pick or sample the next symbol
"""
batch_size = probs.shape[0]
# get the last step probs shape: batch_size, candidate_size
prob = probs[:, -1, :]
if greedy:
_, next_candidate = torch.max(prob, dim=1)
else:
next_candidate = torch.multinomial(prob, num_samples=1, replacement=False)
next_candidate = next_candidate.reshape(batch_size, 1)
# next_candidate: the decoded symbols for the latest step
# shape: batch_size x 1
# prob: generative probabilities of the latest step
# shape: batch_size x candidate_size
return next_candidate, prob
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
"""
def __init__(self, dim_model):
super().__init__()
self.norm = nn.LayerNorm(dim_model)
def forward(self, x, sublayer):
return x + sublayer(self.norm(x))
class Encoder(nn.Module):
"Core encoder is a stack of num_layers layers"
def __init__(self, layer, num_layers):
super().__init__()
self.layers = clones(layer, num_layers)
self.norm = nn.LayerNorm(layer.dim_model)
def forward(self, x, mask):
"Pass the input (and mask) through each layer in turn."
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class EncoderLayer(nn.Module):
"""Encoder is made up of self-attn and feed forward"""
def __init__(self, dim_model, self_attn, feed_forward):
super().__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(dim_model), 2)
self.dim_model = dim_model
def forward(self, src_embed, src_mask):
# src_embed shape: batch_size, seq_len, dim_model
# src_src_mask shape: batch_size, seq_len, seq_len
def self_attn_layer(x):
return self.self_attn(x, x, x, src_mask)
# attn_output shape: batch_size, seq_len, dim_model
attn_output = self.sublayer[0](src_embed, self_attn_layer)
# return shape: batch_size, seq_len, dim_model
return self.sublayer[1](attn_output, self.feed_forward)
class Decoder(nn.Module):
"""Generic num_layers layer decoder with masking."""
def __init__(self, layer, num_layers):
super().__init__()
self.layers = clones(layer, num_layers)
self.norm = nn.LayerNorm(layer.size)
def forward(self, x, memory, tgt_src_mask, tgt_tgt_mask):
# each layer is one DecoderLayer
for layer in self.layers:
x = layer(x, memory, tgt_src_mask, tgt_tgt_mask)
return self.norm(x)
class DecoderLayer(nn.Module):
"""Decoder is made of self-attn, src-attn, and feed forward"""
def __init__(self, size, self_attn, src_attn, feed_forward):
super().__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size), 3)
def forward(self, x, m, tgt_src_mask, tgt_tgt_mask):
# x is target embedding or the output of previous decoder layer
# x shape: batch_size, seq_len, dim_model
# m is the output of the last encoder layer
# m shape: batch_size, seq_len, dim_model
# tgt_src_mask shape: batch_size, tgt_seq_len, src_seq_len
# tgt_tgt_mask shape: batch_size, tgt_seq_len, tgt_seq_len
def self_attn_layer_tgt(x):
return self.self_attn(query=x, key=x, value=x, mask=tgt_tgt_mask)
def self_attn_layer_src(x):
return self.src_attn(query=x, key=m, value=m, mask=tgt_src_mask)
x = self.sublayer[0](x, self_attn_layer_tgt)
x = self.sublayer[1](x, self_attn_layer_src)
# return shape: batch_size, seq_len, dim_model
return self.sublayer[2](x, self.feed_forward)
class EncoderPyTorch(nn.Module):
"""Transformer-based encoder based on PyTorch official implementation"""
def __init__(self, dim_model, num_heads, dim_feedforward, num_layers):
super().__init__()
encoder_layer = nn.TransformerEncoderLayer(
d_model=dim_model,
dim_feedforward=dim_feedforward,
nhead=num_heads,
dropout=0.0,
)
self.transformer_encoder = nn.TransformerEncoder(
encoder_layer, num_layers=num_layers
)
def forward(self, src):
# Adapt to PyTorch format (batch_size as second dim)
src = src.transpose(0, 1)
# not using mask currently since we do not deal with paddings
out = self.transformer_encoder(src)
return out.transpose(0, 1)
class DecoderLastLayerPytorch(transformer.TransformerDecoderLayer):
"""
The last layer of Decoder.
Modified from PyTorch official code: instead of attention embedding,
return attention weights which can be directly used to sample items
"""
def forward(
self,
tgt,
memory,
tgt_mask,
memory_mask,
):
tgt2 = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
_, attn_weights = self.multihead_attn(
tgt,
memory,
memory,
attn_mask=memory_mask,
)
assert attn_weights is not None
return attn_weights
class DecoderPyTorch(nn.Module):
"""Transformer-based decoder based on PyTorch official implementation"""
def __init__(self, dim_model, num_heads, dim_feedforward, num_layers):
super().__init__()
assert num_layers >= 1
self.layers = nn.ModuleList(
[
transformer.TransformerDecoderLayer(
d_model=dim_model,
nhead=num_heads,
dim_feedforward=dim_feedforward,
dropout=0.0,
)
for _ in range(num_layers - 1)
]
+ [
DecoderLastLayerPytorch(
d_model=dim_model,
nhead=num_heads,
dim_feedforward=dim_feedforward,
dropout=0.0,
)
]
)
self.num_layers = num_layers
def forward(self, tgt_embed, memory, tgt_src_mask, tgt_tgt_mask):
# tgt_embed shape: batch_size, tgt_seq_len, dim_model
# memory shape: batch_size, src_seq_len, dim_model
# tgt_src_mask shape: batch_size, tgt_seq_len, src_seq_len
# tgt_tgt_mask shape: batch_size, tgt_seq_len, tgt_seq_len
batch_size, tgt_seq_len, _ = tgt_embed.shape
# Adapt to PyTorch format
tgt_embed = tgt_embed.transpose(0, 1)
memory = memory.transpose(0, 1)
output = tgt_embed
for mod in self.layers:
output = mod(
output,
memory,
tgt_mask=tgt_tgt_mask,
memory_mask=tgt_src_mask,
)
probs_for_placeholders = torch.zeros(
batch_size, tgt_seq_len, 2, device=tgt_embed.device
)
probs = torch.cat((probs_for_placeholders, output), dim=2)
return probs
class MultiHeadedAttention(nn.Module):
def __init__(self, num_heads, dim_model):
"""Take in model size and number of heads"""
super().__init__()
assert dim_model % num_heads == 0
# We assume d_v always equals d_k
self.d_k = dim_model // num_heads
self.num_heads = num_heads
self.linears = clones(nn.Linear(dim_model, dim_model), 4)
def forward(self, query, key, value, mask=None):
if mask is not None:
# Same mask applied to all num_heads heads.
# mask shape: batch_size, 1, seq_len, seq_len
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from dim_model => num_heads x d_k
# self.linear[0, 1, 2] is query weight matrix, key weight matrix, and
# value weight matrix, respectively.
# l(x) represents the transformed query matrix, key matrix and value matrix
# l(x) has shape (batch_size, seq_len, dim_model). You can think l(x) as
# the matrices from a one-head attention; or you can think
# l(x).view(...).transpose(...) as the matrices of num_heads attentions,
# each attention has d_k dimension.
query, key, value = [
l(x).view(nbatches, -1, self.num_heads, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))
]
# 2) Apply attention on all the projected vectors in batch.
# x shape: batch_size, num_heads, seq_len, d_k
x, _ = attention(query, key, value, mask, self.d_k)
# 3) "Concat" using a view and apply a final linear.
# each attention's output is d_k dimension. Concat num_heads attention's outputs
# x shape: batch_size, seq_len, dim_model
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.num_heads * self.d_k)
return self.linears[-1](x)
class PositionwiseFeedForward(nn.Module):
def __init__(self, dim_model, dim_feedforward):
super().__init__()
self.net = torch.nn.Sequential(
torch.nn.Linear(dim_model, dim_feedforward),
torch.nn.ReLU(),
torch.nn.Linear(dim_feedforward, dim_model),
)
def forward(self, x):
return self.net(x)
class Embedder(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
self.dim_in = dim_in
self.dim_out = dim_out
self.linear = nn.Linear(self.dim_in, self.dim_out)
def forward(self, x):
# x: raw input features. Shape: batch_size, seq_len, dim_in
output = self.linear(x) * math.sqrt(self.dim_out)
# output shape: batch_size, seq_len, dim_out
return output
class PositionalEncoding(nn.Module):
"""
A special, non-learnable positional encoding for handling variable (possibly longer)
lengths of inputs. We simply add an ordinal number as an additional dimension for
the input embeddings, and then project them back to the original number of dimensions
"""
def __init__(self, dim_model):
super().__init__()
self.pos_embed = nn.Linear(dim_model + 1, dim_model)
self.activation = nn.ReLU()
def forward(self, x):
device = x.device
batch_size, seq_len, _ = x.shape
position_idx = (
torch.arange(0, seq_len, device=device)
.unsqueeze(0)
.repeat(batch_size, 1)
.reshape(batch_size, seq_len, 1)
)
# shape: batch_size, seq_len, dim_model + 1
x_pos = torch.cat((x, position_idx), dim=2)
# project back to shape: batch_size, seq_len, dim_model
return self.activation(self.pos_embed(x_pos))
class BaselineNet(nn.Module):
def __init__(self, state_dim, dim_feedforward, num_stacked_layers):
super().__init__()
nn_blocks = [nn.Linear(state_dim, dim_feedforward), nn.ReLU()]
assert num_stacked_layers >= 1
for _ in range(num_stacked_layers - 1):
nn_blocks.extend([nn.Linear(dim_feedforward, dim_feedforward), nn.ReLU()])
nn_blocks.append(nn.Linear(dim_feedforward, 1))
self.mlp = nn.Sequential(*nn_blocks)
def forward(self, input: rlt.PreprocessedRankingInput):
x = input.state.float_features
return self.mlp(x)
class Seq2SlateTransformerOutput(NamedTuple):
ranked_per_symbol_probs: Optional[torch.Tensor]
ranked_per_seq_probs: Optional[torch.Tensor]
ranked_tgt_out_idx: Optional[torch.Tensor]
per_symbol_log_probs: Optional[torch.Tensor]
per_seq_log_probs: Optional[torch.Tensor]
encoder_scores: Optional[torch.Tensor]
class Seq2SlateTransformerModel(nn.Module):
"""
A Seq2Slate network with Transformer. The network is essentially an
encoder-decoder structure. The encoder inputs a sequence of candidate feature
vectors and a state feature vector, and the decoder outputs an ordered
list of candidate indices. The output order is learned through REINFORCE
algorithm to optimize sequence-wise reward.
One application example is to rank candidate feeds to a specific user such
that the final list of feeds as a whole optimizes the user's engagement.
Seq2Slate paper: https://arxiv.org/abs/1810.02019
Transformer paper: https://arxiv.org/abs/1706.03762
The model archtecture can also adapt to some variations.
(1) The decoder can be autoregressive
(2) The decoder can take encoder scores and perform iterative softmax (aka frechet sort)
(3) No decoder and the output order is solely based on encoder scores
"""
def __init__(
self,
state_dim: int,
candidate_dim: int,
num_stacked_layers: int,
num_heads: int,
dim_model: int,
dim_feedforward: int,
max_src_seq_len: int,
max_tgt_seq_len: int,
output_arch: Seq2SlateOutputArch,
temperature: float = 1.0,
state_embed_dim: Optional[int] = None,
):
"""
:param state_dim: state feature dimension
:param candidate_dim: candidate feature dimension
:param num_stacked_layers: number of stacked layers in Transformer
:param num_heads: number of attention heads used in Transformer
:param dim_model: number of attention dimensions in Transformer
:param dim_feedforward: number of hidden units in FeedForward layers
in Transformer
:param max_src_seq_len: the maximum length of input sequences
:param max_tgt_seq_len: the maximum length of output sequences
:param output_arch: determines seq2slate output architecture
:param temperature: temperature used in decoder sampling
:param state_embed_dim: embedding dimension of state features.
by default (if not specified), state_embed_dim = dim_model / 2
"""
super().__init__()
self.state_dim = state_dim
self.candidate_dim = candidate_dim
self.num_stacked_layers = num_stacked_layers
self.num_heads = num_heads
self.dim_model = dim_model
self.dim_feedforward = dim_feedforward
self.max_src_seq_len = max_src_seq_len
self.max_tgt_seq_len = max_tgt_seq_len
self.output_arch = output_arch
self._DECODER_START_SYMBOL = DECODER_START_SYMBOL
self._PADDING_SYMBOL = PADDING_SYMBOL
self._RANK_MODE = Seq2SlateMode.RANK_MODE.value
self._PER_SYMBOL_LOG_PROB_DIST_MODE = (
Seq2SlateMode.PER_SYMBOL_LOG_PROB_DIST_MODE.value
)
self._PER_SEQ_LOG_PROB_MODE = Seq2SlateMode.PER_SEQ_LOG_PROB_MODE.value
self._DECODE_ONE_STEP_MODE = Seq2SlateMode.DECODE_ONE_STEP_MODE.value
self._ENCODER_SCORE_MODE = Seq2SlateMode.ENCODER_SCORE_MODE.value
self._OUTPUT_PLACEHOLDER = torch.zeros(1)
self.encoder = EncoderPyTorch(
dim_model, num_heads, dim_feedforward, num_stacked_layers
)
# Compute score at each encoder step
self.encoder_scorer = nn.Linear(dim_model, 1)
self.generator = Generator()
self.decoder = DecoderPyTorch(
dim_model, num_heads, dim_feedforward, num_stacked_layers
)
self.positional_encoding_decoder = PositionalEncoding(dim_model)
if state_embed_dim is None:
state_embed_dim = dim_model // 2
candidate_embed_dim = dim_model - state_embed_dim
self.state_embedder = Embedder(state_dim, state_embed_dim)
self.candidate_embedder = Embedder(candidate_dim, candidate_embed_dim)
# Initialize parameters with Glorot / fan_avg.
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
print_model_info(self)
__constants__ = [
"state_dim",
"candidate_dim",
"num_stacked_layers",
"num_heads",
"dim_model",
"dim_feedforward",
"max_src_seq_len",
"max_tgt_seq_len",
"output_path",
"temperature",
"state_embed_dim",
"_DECODER_START_SYMBOL",
"_PADDING_SYMBOL",
"_RANK_MODE",
"_PER_SYMBOL_LOG_PROB_DIST_MODE",
"_PER_SEQ_LOG_PROB_MODE",
"_DECODE_ONE_STEP_MODE",
"_ENCODER_SCORE_MODE",
]
def forward(
self,
mode: str,
state: torch.Tensor,
src_seq: torch.Tensor,
tgt_in_idx: Optional[torch.Tensor] = None,
tgt_out_idx: Optional[torch.Tensor] = None,
tgt_in_seq: Optional[torch.Tensor] = None,
tgt_seq_len: Optional[int] = None,
greedy: Optional[bool] = None,
) -> Seq2SlateTransformerOutput:
"""
:param input: model input
:param mode: a string indicating which mode to perform.
"rank": return ranked actions and their generative probabilities.
"per_seq_log_probs": return generative log probabilities of given
tgt sequences (used for REINFORCE training)
"per_symbol_log_probs": return generative log probabilties of each
symbol in given tgt sequences (used in TEACHER FORCING training)
:param tgt_seq_len: the length of output sequence to be decoded. Only used
in rank mode
:param greedy: whether to sample based on softmax distribution or greedily
when decoding. Only used in rank mode
"""
if mode == self._RANK_MODE:
if tgt_seq_len is None:
tgt_seq_len = self.max_tgt_seq_len
assert greedy is not None
return self._rank(
state=state,
src_seq=src_seq,
tgt_seq_len=tgt_seq_len,
greedy=greedy,
)
elif mode in (self._PER_SEQ_LOG_PROB_MODE, self._PER_SYMBOL_LOG_PROB_DIST_MODE):
assert tgt_in_seq is not None
assert tgt_in_idx is not None
assert tgt_out_idx is not None
return self._log_probs(
state=state,
src_seq=src_seq,
tgt_in_seq=tgt_in_seq,
tgt_in_idx=tgt_in_idx,
tgt_out_idx=tgt_out_idx,
mode=mode,
)
elif mode == self._ENCODER_SCORE_MODE:
assert self.output_arch == Seq2SlateOutputArch.ENCODER_SCORE
assert tgt_out_idx is not None
return self.encoder_output_to_scores(
state=state,
src_seq=src_seq,
tgt_out_idx=tgt_out_idx,
)
else:
raise NotImplementedError()
def _rank(
self, state: torch.Tensor, src_seq: torch.Tensor, tgt_seq_len: int, greedy: bool
) -> Seq2SlateTransformerOutput:
"""Decode sequences based on given inputs"""
device = src_seq.device
batch_size, src_seq_len, candidate_dim = src_seq.shape
candidate_size = src_seq_len + 2
# candidate_features is used as look-up table for candidate features.
# the second dim is src_seq_len + 2 because we also want to include
# features of start symbol and padding symbol
candidate_features = torch.zeros(
batch_size, src_seq_len + 2, candidate_dim, device=device
)
# TODO: T62502977 create learnable feature vectors for start symbol
# and padding symbol
candidate_features[:, 2:, :] = src_seq
# memory shape: batch_size, src_seq_len, dim_model
memory = self.encode(state, src_seq)
if self.output_arch == Seq2SlateOutputArch.ENCODER_SCORE:
tgt_out_idx, ranked_per_symbol_probs = self._encoder_rank(
memory, tgt_seq_len
)
elif self.output_arch == Seq2SlateOutputArch.FRECHET_SORT and greedy:
# greedy decoding for non-autoregressive decoder
tgt_out_idx, ranked_per_symbol_probs = self._greedy_rank(
state, memory, candidate_features, tgt_seq_len
)
else:
assert greedy is not None
# autoregressive decoding
tgt_out_idx, ranked_per_symbol_probs = self._autoregressive_rank(
state, memory, candidate_features, tgt_seq_len, greedy
)
# ranked_per_symbol_probs shape: batch_size, tgt_seq_len, candidate_size
# ranked_per_seq_probs shape: batch_size, 1
ranked_per_seq_probs = per_symbol_to_per_seq_probs(
ranked_per_symbol_probs, tgt_out_idx
)
# tgt_out_idx shape: batch_size, tgt_seq_len
return Seq2SlateTransformerOutput(
ranked_per_symbol_probs=ranked_per_symbol_probs,
ranked_per_seq_probs=ranked_per_seq_probs,
ranked_tgt_out_idx=tgt_out_idx,
per_symbol_log_probs=self._OUTPUT_PLACEHOLDER,
per_seq_log_probs=self._OUTPUT_PLACEHOLDER,
encoder_scores=self._OUTPUT_PLACEHOLDER,
)
def _greedy_rank(
self,
state: torch.Tensor,
memory: torch.Tensor,
candidate_features: torch.Tensor,
tgt_seq_len: int,
):
"""Using the first step decoder scores to greedily sort items"""
# candidate_features shape: batch_size, src_seq_len + 2, candidate_dim
batch_size, candidate_size, _ = candidate_features.shape
device = candidate_features.device
# Only one step input to the decoder
tgt_in_idx = torch.full(
(batch_size, 1), self._DECODER_START_SYMBOL, dtype=torch.long, device=device
)
tgt_in_seq = gather(candidate_features, tgt_in_idx)
# shape: batch_size, candidate_size
probs = self.decode(
memory=memory,
state=state,
tgt_in_idx=tgt_in_idx,
tgt_in_seq=tgt_in_seq,
)[:, -1, :]
# tgt_out_idx shape: batch_size, tgt_seq_len
tgt_out_idx = torch.argsort(probs, dim=1, descending=True)[:, :tgt_seq_len]
# since it is greedy ranking, we set selected items' probs to 1
ranked_per_symbol_probs = torch.zeros(
batch_size, tgt_seq_len, candidate_size, device=device
).scatter(2, tgt_out_idx.unsqueeze(2), 1.0)
return tgt_out_idx, ranked_per_symbol_probs
def _autoregressive_rank(
self,
state: torch.Tensor,
memory: torch.Tensor,
candidate_features: torch.Tensor,
tgt_seq_len: int,
greedy: bool,
):
batch_size, candidate_size, _ = candidate_features.shape
device = candidate_features.device
tgt_in_idx = torch.full(
(batch_size, 1), self._DECODER_START_SYMBOL, dtype=torch.long, device=device
)
ranked_per_symbol_probs = torch.zeros(
batch_size, tgt_seq_len, candidate_size, device=device
)
for step in torch.arange(tgt_seq_len, device=device):
tgt_in_seq = gather(candidate_features, tgt_in_idx)
# shape batch_size, step + 1, candidate_size
probs = self.decode(
memory=memory,
state=state,
tgt_in_idx=tgt_in_idx,
tgt_in_seq=tgt_in_seq,
)
# next candidate shape: batch_size, 1
# prob shape: batch_size, candidate_size
next_candidate, next_candidate_sample_prob = self.generator(probs, greedy)
ranked_per_symbol_probs[:, step, :] = next_candidate_sample_prob
tgt_in_idx = torch.cat([tgt_in_idx, next_candidate], dim=1)
# remove the decoder start symbol
# tgt_out_idx shape: batch_size, tgt_seq_len
tgt_out_idx = tgt_in_idx[:, 1:]
return tgt_out_idx, ranked_per_symbol_probs
def _encoder_rank(self, memory: torch.Tensor, tgt_seq_len: int):
batch_size, src_seq_len, _ = memory.shape
candidate_size = src_seq_len + 2
device = memory.device
ranked_per_symbol_probs = torch.zeros(
batch_size, tgt_seq_len, candidate_size, device=device
)
# encoder_scores shape: batch_size, src_seq_len
encoder_scores = self.encoder_scorer(memory).squeeze(dim=2)
tgt_out_idx = torch.argsort(encoder_scores, dim=1, descending=True)[
:, :tgt_seq_len
]
# +2 to account for start symbol and padding symbol
tgt_out_idx += 2
# every position has propensity of 1 because we are just using argsort
ranked_per_symbol_probs = ranked_per_symbol_probs.scatter(
2, tgt_out_idx.unsqueeze(2), 1.0
)
return tgt_out_idx, ranked_per_symbol_probs
def _log_probs(
self,
state: torch.Tensor,
src_seq: torch.Tensor,
tgt_in_seq: torch.Tensor,
tgt_in_idx: torch.Tensor,
tgt_out_idx: torch.Tensor,
mode: str,
) -> Seq2SlateTransformerOutput:
"""
Compute log of generative probabilities of given tgt sequences
(used for REINFORCE training)
"""
# encoder_output shape: batch_size, src_seq_len, dim_model
encoder_output = self.encode(state, src_seq)
tgt_seq_len = tgt_in_seq.shape[1]
src_seq_len = src_seq.shape[1]
assert tgt_seq_len <= src_seq_len
# decoder_probs shape: batch_size, tgt_seq_len, candidate_size
decoder_probs = self.decode(
memory=encoder_output,
state=state,
tgt_in_idx=tgt_in_idx,
tgt_in_seq=tgt_in_seq,
)
# log_probs shape:
# if mode == PER_SEQ_LOG_PROB_MODE: batch_size, 1
# if mode == PER_SYMBOL_LOG_PROB_DIST_MODE: batch_size, tgt_seq_len, candidate_size
if mode == self._PER_SYMBOL_LOG_PROB_DIST_MODE:
per_symbol_log_probs = torch.log(torch.clamp(decoder_probs, min=1e-40))
return Seq2SlateTransformerOutput(
ranked_per_symbol_probs=None,
ranked_per_seq_probs=None,
ranked_tgt_out_idx=None,
per_symbol_log_probs=per_symbol_log_probs,
per_seq_log_probs=None,
encoder_scores=None,
)
per_seq_log_probs = torch.log(
per_symbol_to_per_seq_probs(decoder_probs, tgt_out_idx)
)
return Seq2SlateTransformerOutput(
ranked_per_symbol_probs=None,
ranked_per_seq_probs=None,
ranked_tgt_out_idx=None,
per_symbol_log_probs=None,
per_seq_log_probs=per_seq_log_probs,
encoder_scores=None,
)
def encoder_output_to_scores(
self, state: torch.Tensor, src_seq: torch.Tensor, tgt_out_idx: torch.Tensor
) -> Seq2SlateTransformerOutput:
# encoder_output shape: batch_size, src_seq_len, dim_model
encoder_output = self.encode(state, src_seq)
# encoder_output shape: batch_size, src_seq_len, dim_model
# tgt_out_idx shape: batch_size, tgt_seq_len
batch_size, tgt_seq_len = tgt_out_idx.shape
# order encoder_output by tgt_out_idx
# slate_encoder_output shape: batch_size, tgt_seq_len, dim_model
slate_encoder_output = gather(encoder_output, tgt_out_idx - 2)
# encoder_scores shape: batch_size, tgt_seq_len
encoder_scores = self.encoder_scorer(slate_encoder_output).squeeze()
return Seq2SlateTransformerOutput(
ranked_per_symbol_probs=None,
ranked_per_seq_probs=None,
ranked_tgt_out_idx=None,
per_symbol_log_probs=None,
per_seq_log_probs=None,
encoder_scores=encoder_scores,
)
def encode(self, state, src_seq):
# state: batch_size, state_dim
# src_seq: batch_size, src_seq_len, dim_candidate
batch_size, max_src_seq_len, _ = src_seq.shape
# candidate_embed: batch_size, src_seq_len, dim_model/2
candidate_embed = self.candidate_embedder(src_seq)
# state_embed: batch_size, dim_model/2
state_embed = self.state_embedder(state)
# transform state_embed into shape: batch_size, src_seq_len, dim_model/2
state_embed = state_embed.repeat(1, max_src_seq_len).reshape(
batch_size, max_src_seq_len, -1
)
# Input at each encoder step is actually concatenation of state_embed
# and candidate embed. state_embed is replicated at each encoding step.
# src_embed shape: batch_size, src_seq_len, dim_model
src_embed = torch.cat((state_embed, candidate_embed), dim=2)
# encoder_output shape: batch_size, src_seq_len, dim_model
return self.encoder(src_embed)
def decode(self, memory, state, tgt_in_idx, tgt_in_seq):
# memory is the output of the encoder, the attention of each input symbol
# memory shape: batch_size, src_seq_len, dim_model
# tgt_in_idx shape: batch_size, tgt_seq_len
# tgt_seq shape: batch_size, tgt_seq_len, dim_candidate
batch_size, src_seq_len, _ = memory.shape
_, tgt_seq_len = tgt_in_idx.shape
candidate_size = src_seq_len + 2
if self.output_arch == Seq2SlateOutputArch.FRECHET_SORT:
# encoder_scores shape: batch_size, src_seq_len
encoder_scores = self.encoder_scorer(memory).squeeze(dim=2)
logits = torch.zeros(batch_size, tgt_seq_len, candidate_size).to(
encoder_scores.device
)
logits[:, :, :2] = float("-inf")
logits[:, :, 2:] = encoder_scores.repeat(1, tgt_seq_len).reshape(
batch_size, tgt_seq_len, src_seq_len
)
logits = mask_logits_by_idx(logits, tgt_in_idx)
probs = torch.softmax(logits, dim=2)
elif self.output_arch == Seq2SlateOutputArch.AUTOREGRESSIVE:
# candidate_embed shape: batch_size, tgt_seq_len, dim_model/2
candidate_embed = self.candidate_embedder(tgt_in_seq)
# state_embed: batch_size, dim_model/2
state_embed = self.state_embedder(state)
# state_embed: batch_size, tgt_seq_len, dim_model/2
state_embed = state_embed.repeat(1, tgt_seq_len).reshape(
batch_size, tgt_seq_len, -1
)
# tgt_embed: batch_size, tgt_seq_len, dim_model
tgt_embed = self.positional_encoding_decoder(
torch.cat((state_embed, candidate_embed), dim=2)
)
# tgt_tgt_mask shape: batch_size * num_heads, tgt_seq_len, tgt_seq_len
# tgt_src_mask shape: batch_size * num_heads, tgt_seq_len, src_seq_len
tgt_tgt_mask, tgt_src_mask = pytorch_decoder_mask(
memory, tgt_in_idx, self.num_heads
)
# output of decoder is probabilities over symbols.
# shape: batch_size, tgt_seq_len, candidate_size
probs = self.decoder(tgt_embed, memory, tgt_src_mask, tgt_tgt_mask)
else:
raise NotImplementedError()
return probs
@dataclass
class Seq2SlateNet(ModelBase):
__hash__ = param_hash
state_dim: int
candidate_dim: int
num_stacked_layers: int
dim_model: int
max_src_seq_len: int
max_tgt_seq_len: int
output_arch: Seq2SlateOutputArch
temperature: float
def __post_init_post_parse__(self) -> None:
super().__init__()
# pyre-fixme[16]: `Seq2SlateNet` has no attribute `seq2slate`.
self.seq2slate = self._build_model()
def _build_model(self):
return None
def input_prototype(self):
return rlt.PreprocessedRankingInput.from_tensors(
state=torch.randn(1, self.state_dim),
src_seq=torch.randn(1, self.max_src_seq_len, self.candidate_dim),
tgt_in_seq=torch.randn(1, self.max_tgt_seq_len, self.candidate_dim),
tgt_out_seq=torch.randn(1, self.max_tgt_seq_len, self.candidate_dim),
slate_reward=torch.randn(1),
)
def forward(
self,
input: rlt.PreprocessedRankingInput,
mode: Seq2SlateMode,
tgt_seq_len: Optional[int] = None,
greedy: Optional[bool] = None,
):
if mode == Seq2SlateMode.RANK_MODE:
# pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a function.
res = self.seq2slate(
mode=mode.value,
state=input.state.float_features,
src_seq=input.src_seq.float_features,
tgt_seq_len=tgt_seq_len,
greedy=greedy,
)
return rlt.RankingOutput(
ranked_per_symbol_probs=res.ranked_per_symbol_probs,
ranked_per_seq_probs=res.ranked_per_seq_probs,
ranked_tgt_out_idx=res.ranked_tgt_out_idx,
)
elif mode in (
Seq2SlateMode.PER_SYMBOL_LOG_PROB_DIST_MODE,
Seq2SlateMode.PER_SEQ_LOG_PROB_MODE,
):
assert input.tgt_in_seq is not None
assert input.tgt_in_idx is not None
assert input.tgt_out_idx is not None
# pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a function.
res = self.seq2slate(
mode=mode.value,
state=input.state.float_features,
src_seq=input.src_seq.float_features,
tgt_in_seq=input.tgt_in_seq.float_features,
tgt_in_idx=input.tgt_in_idx,
tgt_out_idx=input.tgt_out_idx,
)
if res.per_symbol_log_probs is not None:
log_probs = res.per_symbol_log_probs
else:
log_probs = res.per_seq_log_probs
return rlt.RankingOutput(log_probs=log_probs)
elif mode == Seq2SlateMode.ENCODER_SCORE_MODE:
assert input.tgt_out_idx is not None
# pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a function.
res = self.seq2slate(
mode=mode.value,
state=input.state.float_features,
src_seq=input.src_seq.float_features,
tgt_out_idx=input.tgt_out_idx,
)
return rlt.RankingOutput(encoder_scores=res.encoder_scores)
else:
raise NotImplementedError()
def get_distributed_data_parallel_model(self):
return _DistributedSeq2SlateNet(self)
@dataclass
class Seq2SlateTransformerNet(Seq2SlateNet):
__hash__ = param_hash
num_heads: int
dim_feedforward: int
state_embed_dim: Optional[int] = None
def _build_model(self):
return Seq2SlateTransformerModel(
state_dim=self.state_dim,
candidate_dim=self.candidate_dim,
num_stacked_layers=self.num_stacked_layers,
num_heads=self.num_heads,
dim_model=self.dim_model,
dim_feedforward=self.dim_feedforward,
max_src_seq_len=self.max_src_seq_len,
max_tgt_seq_len=self.max_tgt_seq_len,
output_arch=self.output_arch,
temperature=self.temperature,
state_embed_dim=self.state_embed_dim,
)
class _DistributedSeq2SlateNet(ModelBase):
def __init__(self, seq2slate_net: Seq2SlateNet):
super().__init__()
current_device = torch.cuda.current_device()
self.data_parallel = DistributedDataParallel(
seq2slate_net.seq2slate,
device_ids=[current_device],
output_device=current_device,
)
self.seq2slate_net = seq2slate_net
def input_prototype(self):
return self.seq2slate_net.input_prototype()
def cpu_model(self):
return self.seq2slate_net.cpu_model()
def forward(
self,
input: rlt.PreprocessedRankingInput,
mode: Seq2SlateMode,
tgt_seq_len: Optional[int] = None,
greedy: Optional[bool] = None,
):
return self.seq2slate_net(input, mode, tgt_seq_len, greedy)
| 37,382 | 37.029502 | 92 | py |
ReAgent | ReAgent-master/reagent/models/critic.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import List
import torch
from reagent.core import types as rlt
from reagent.models.base import ModelBase
from reagent.models.fully_connected_network import FullyConnectedNetwork
class FullyConnectedCritic(ModelBase):
def __init__(
self,
state_dim: int,
action_dim: int,
sizes: List[int],
activations: List[str],
use_batch_norm: bool = False,
use_layer_norm: bool = False,
output_dim: int = 1,
):
super().__init__()
assert state_dim > 0, "state_dim must be > 0, got {}".format(state_dim)
assert action_dim > 0, "action_dim must be > 0, got {}".format(action_dim)
self.state_dim = state_dim
self.action_dim = action_dim
assert len(sizes) == len(
activations
), "The numbers of sizes and activations must match; got {} vs {}".format(
len(sizes), len(activations)
)
self.fc = FullyConnectedNetwork(
[state_dim + action_dim] + sizes + [output_dim],
activations + ["linear"],
use_batch_norm=use_batch_norm,
use_layer_norm=use_layer_norm,
)
def input_prototype(self):
# for inference: (batchsize, feature_dim)
return (
rlt.FeatureData(torch.randn(1, self.state_dim)),
rlt.FeatureData(torch.randn(1, self.action_dim)),
)
def forward(self, state: rlt.FeatureData, action: rlt.FeatureData):
assert (
len(state.float_features.shape) == len(action.float_features.shape)
and len(action.float_features.shape) == 2
and (state.float_features.shape[0] == action.float_features.shape[0])
), (
f"state shape: {state.float_features.shape}; action shape: "
f"{action.float_features.shape} not equal to (batch_size, feature_dim)"
)
cat_input = torch.cat((state.float_features, action.float_features), dim=-1)
return self.fc(cat_input)
| 2,106 | 35.327586 | 84 | py |
ReAgent | ReAgent-master/reagent/models/containers.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import torch.nn as nn
from reagent.models.base import ModelBase
class Sequential(
nn.Sequential, # type: ignore
ModelBase,
):
"""
Used this instead of torch.nn.Sequential to automate model tracing
"""
def input_prototype(self):
first = self[0]
assert isinstance(
first, ModelBase
), "The first module of Sequential has to be ModelBase"
return first.input_prototype()
| 535 | 23.363636 | 71 | py |
ReAgent | ReAgent-master/reagent/models/synthetic_reward.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import math
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from reagent.core import parameters as rlp
from reagent.core import types as rlt
from reagent.models import convolutional_network
from reagent.models import fully_connected_network
from reagent.models.base import ModelBase
from reagent.models.fully_connected_network import ACTIVATION_MAP
logger = logging.getLogger(__name__)
def _get_activation_fn(activation):
if activation == "relu":
return F.relu
elif activation == "gelu":
return F.gelu
raise RuntimeError("activation should be relu/gelu, not {}".format(activation))
class Concat(nn.Module):
def forward(self, state: torch.Tensor, action: torch.Tensor):
return torch.cat((state, action), dim=-1)
# pyre-fixme[11]: Annotation `Sequential` is not defined as a type.
class SequentialMultiArguments(nn.Sequential):
"""Sequential which can take more than 1 argument in forward function"""
def forward(self, *inputs):
for module in self._modules.values():
if type(inputs) == tuple:
inputs = module(*inputs)
else:
inputs = module(inputs)
return inputs
class ResidualBlock(nn.Module):
def __init__(self, d_model=64, dim_feedforward=128):
super(ResidualBlock, self).__init__()
self.relu = nn.ReLU()
self.fc_residual = nn.Sequential(
nn.Linear(d_model, dim_feedforward),
nn.ReLU(),
nn.Linear(dim_feedforward, d_model),
)
self.relu = nn.ReLU()
def forward(self, x):
return self.relu(x + self.fc_residual(x))
class PositionalEncoding(nn.Module):
def __init__(self, feature_dim=128, dropout=0.0, max_len=100):
"""
This module injects some information about the relative or absolute position of the tokens in the sequence.
The generated positional encoding are concatenated together with the features.
Args: input dim
"""
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, feature_dim, requires_grad=False)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, feature_dim, 2).float() * (-math.log(10000.0) / feature_dim)
)
pe[:, 0::2] = torch.sin(position * div_term) # max_len * feature_dim // 2
pe[:, 1::2] = torch.cos(position * div_term)
# pe dimension: (max_len, 1, feature_dim)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer("pe", pe)
def forward(self, x):
# x dimension: (L, B, E)
# batch_size, seq_len, d_model
seq_len = x.shape[0]
pos_encoding = self.pe[:seq_len, :]
x = x + pos_encoding
return self.dropout(x)
class PETransformerEncoderLayer(nn.Module):
"""PETransformerEncoderLayer is made up of Positional Encoding (PE), residual connections, self-attn and feedforward network.
Major differences between this implementation and the pytorch official torch.nn.TransformerEncoderLayer are:
1. Augment input data with positional encoding. hat{x} = x + PE{x}
2. Two paralle residual blocks are applied to the raw input data (x) and encoded input data (hat{x}), respectively, i.e. z = Residual(x), hat{z} = Residual(hat{x})
3. Treat z as the Value input, and hat{z} as the Query and Key input to feed a self-attention block.
Main Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
activation: the activation function of intermediate layer, relu or gelu (default=relu).
layer_norm_eps: the eps value in layer normalization components (default=1e-5).
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False``.
max_len: argument passed to the Positional Encoding module, see more details in the PositionalEncoding class.
"""
__constants__ = ["batch_first"]
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.0,
activation="relu",
layer_norm_eps=1e-5,
max_len=100,
use_ff=True,
pos_weight=0.5,
batch_first=False,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super(PETransformerEncoderLayer, self).__init__()
self.use_ff = use_ff
self.pos_weight = pos_weight
self.self_attn = nn.MultiheadAttention(
d_model, nhead, dropout=dropout, batch_first=batch_first, **factory_kwargs
)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward, **factory_kwargs)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model, **factory_kwargs)
self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
# Customized implementation: to map Query & Key, Value with different embeddings.
self.qk_residual = ResidualBlock(d_model, dim_feedforward)
self.v_residual = ResidualBlock(d_model, dim_feedforward)
self.pos_encoder = PositionalEncoding(d_model, dropout=dropout, max_len=max_len)
self.activation = _get_activation_fn(activation)
def __setstate__(self, state):
if "activation" not in state:
state["activation"] = F.relu
super(PETransformerEncoderLayer, self).__setstate__(state)
def forward(self, src, src_mask=None, src_key_padding_mask=None):
encoded_src = self.pos_encoder(src)
query = self.qk_residual(encoded_src)
# do not involve pos_encoding info into the value
src = self.v_residual(src)
src2 = self.self_attn(
query, # query
query, # key = query as the input
src, # value
attn_mask=src_mask,
key_padding_mask=src_key_padding_mask,
)[0]
# add transformer related residual
src = src + self.dropout1(src2)
src = self.norm1(src)
# add another ff layer
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def ngram(input: torch.Tensor, context_size: int, ngram_padding: torch.Tensor):
# input shape: seq_len, batch_size, state_dim + action_dim
seq_len, batch_size, feature_dim = input.shape
shifted_list = []
for i in range(context_size):
offset = i - context_size // 2
if offset < 0:
shifted = torch.cat(
(
# pyre-fixme[16]: `Tensor` has no attribute `tile`.
ngram_padding.tile((-offset, batch_size, 1)),
# pyre-fixme[16]: `Tensor` has no attribute `narrow`.
input.narrow(0, 0, seq_len + offset),
),
dim=0,
)
elif offset > 0:
shifted = torch.cat(
(
input.narrow(0, offset, seq_len - offset),
ngram_padding.tile(offset, batch_size, 1),
),
dim=0,
)
else:
shifted = input
shifted_list.append(shifted)
# shape: seq_len, batch_size, feature_dim * context_size
return torch.cat(shifted_list, dim=-1)
def _gen_mask(valid_step: torch.Tensor, batch_size: int, seq_len: int):
"""
Mask for dealing with different lengths of MDPs
Example:
valid_step = [[1], [2], [3]], batch_size=3, seq_len = 4
mask = [
[0, 0, 0, 1],
[0, 0, 1, 1],
[0, 1, 1, 1],
]
"""
assert valid_step.shape == (batch_size, 1)
assert (1 <= valid_step).all()
assert (valid_step <= seq_len).all()
device = valid_step.device
mask = torch.arange(seq_len, device=device).repeat(batch_size, 1)
mask = (mask >= (seq_len - valid_step)).float()
return mask
class SyntheticRewardNet(ModelBase):
"""
This base class provides basic operations to consume inputs and call a synthetic reward net
A synthetic reward net (self.net) assumes the input contains only torch.Tensors.
Expected input shape:
state: seq_len, batch_size, state_dim
action: seq_len, batch_size, action_dim
Expected output shape:
reward: batch_size, seq_len
"""
def __init__(self, net: nn.Module):
super().__init__()
self.net = net
def forward(self, training_batch: rlt.MemoryNetworkInput):
# state shape: seq_len, batch_size, state_dim
state = training_batch.state.float_features
# action shape: seq_len, batch_size, action_dim
action = training_batch.action
# shape: batch_size, 1
valid_step = training_batch.valid_step
seq_len, batch_size, _ = training_batch.action.shape
# output shape: batch_size, seq_len
output = self.net(state, action)
assert valid_step is not None
mask = _gen_mask(valid_step, batch_size, seq_len)
output_masked = output * mask
pred_reward = output_masked.sum(dim=1, keepdim=True)
return rlt.RewardNetworkOutput(predicted_reward=pred_reward)
def export_mlp(self):
"""
Export an pytorch nn to feed to predictor wrapper.
"""
return self.net
class SingleStepSyntheticRewardNet(nn.Module):
def __init__(
self,
state_dim: int,
action_dim: int,
sizes: List[int],
activations: List[str],
last_layer_activation: str,
use_batch_norm: bool = False,
use_layer_norm: bool = False,
):
"""
Decompose rewards at the last step to individual steps.
"""
super().__init__()
modules: List[nn.Module] = [Concat()]
prev_layer_size = state_dim + action_dim
for size, activation in zip(sizes, activations):
if use_batch_norm:
modules.append(nn.BatchNorm1d(prev_layer_size))
modules.append(nn.Linear(prev_layer_size, size))
if use_layer_norm:
modules.append(nn.LayerNorm(size))
modules.append(ACTIVATION_MAP[activation]())
prev_layer_size = size
# last layer
modules.append(nn.Linear(prev_layer_size, 1))
modules.append(ACTIVATION_MAP[last_layer_activation]())
self.dnn = SequentialMultiArguments(*modules)
def forward(self, state: torch.Tensor, action: torch.Tensor):
# pyre-fixme[29]: `SequentialMultiArguments` is not a function.
# shape: batch_size, seq_len
return self.dnn(state, action).squeeze(2).transpose(0, 1)
class NGramConvolutionalNetwork(nn.Module):
def __init__(
self,
state_dim: int,
action_dim: int,
sizes: List[int],
activations: List[str],
last_layer_activation: str,
context_size: int,
conv_net_params: rlp.ConvNetParameters,
use_layer_norm: bool = False,
) -> None:
assert context_size % 2 == 1, f"Context size is not odd: {context_size}"
super().__init__()
self.context_size = context_size
self.input_width = state_dim + action_dim
self.input_height = context_size
self.num_input_channels = 1
num_conv_layers = len(conv_net_params.conv_height_kernels)
conv_width_kernels = [self.input_width] + [1] * (num_conv_layers - 1)
cnn_parameters = convolutional_network.CnnParameters(
conv_dims=[self.num_input_channels] + conv_net_params.conv_dims,
conv_height_kernels=conv_net_params.conv_height_kernels,
conv_width_kernels=conv_width_kernels,
pool_types=conv_net_params.pool_types,
pool_kernels_strides=conv_net_params.pool_kernel_sizes,
num_input_channels=self.num_input_channels,
input_height=self.input_height,
input_width=self.input_width,
)
self.conv_net = convolutional_network.ConvolutionalNetwork(
cnn_parameters,
[-1] + sizes + [1],
activations + [last_layer_activation],
use_layer_norm=use_layer_norm,
)
self.ngram_padding = torch.zeros(1, 1, state_dim + action_dim)
def forward(self, state: torch.Tensor, action: torch.Tensor) -> torch.Tensor:
"""Forward pass NGram conv net.
:param input shape: seq_len, batch_size, feature_dim
"""
# shape: seq_len, batch_size, state_dim + action_dim
input = torch.cat((state, action), dim=-1)
# shape: seq_len, batch_size, (state_dim + action_dim) * context_size
ngram_input = ngram(
input, self.context_size, self.ngram_padding.to(input.device)
)
seq_len, batch_size, _ = ngram_input.shape
# shape: seq_len * batch_size, 1, context_size, state_dim + action_dim
reshaped = ngram_input.reshape(-1, 1, self.input_height, self.input_width)
# shape: batch_size, seq_len
output = self.conv_net(reshaped).reshape(seq_len, batch_size).transpose(0, 1)
return output
class NGramFullyConnectedNetwork(nn.Module):
def __init__(
self,
state_dim: int,
action_dim: int,
sizes: List[int],
activations: List[str],
last_layer_activation: str,
context_size: int,
use_layer_norm: bool = False,
) -> None:
assert context_size % 2 == 1, f"Context size is not odd: {context_size}"
super().__init__()
self.context_size = context_size
self.ngram_padding = torch.zeros(1, 1, state_dim + action_dim)
self.fc = fully_connected_network.FullyConnectedNetwork(
[(state_dim + action_dim) * context_size] + sizes + [1],
activations + [last_layer_activation],
use_layer_norm=use_layer_norm,
)
def forward(self, state: torch.Tensor, action: torch.Tensor) -> torch.Tensor:
"""Forward pass NGram conv net.
:param input shape: seq_len, batch_size, feature_dim
"""
input = torch.cat((state, action), dim=-1)
# shape: seq_len, batch_size, (state_dim + action_dim) * context_size
ngram_input = ngram(
input, self.context_size, self.ngram_padding.to(input.device)
)
# shape: batch_size, seq_len
return self.fc(ngram_input).transpose(0, 1).squeeze(2)
class SequenceSyntheticRewardNet(nn.Module):
def __init__(
self,
state_dim: int,
action_dim: int,
lstm_hidden_size: int,
lstm_num_layers: int,
lstm_bidirectional: bool,
last_layer_activation: str,
):
"""
Decompose rewards at the last step to individual steps.
"""
super().__init__()
self.state_dim = state_dim
self.action_dim = action_dim
self.lstm_hidden_size = lstm_hidden_size
self.lstm_num_layers = lstm_num_layers
self.lstm_bidirectional = lstm_bidirectional
self.lstm = nn.LSTM(
input_size=self.state_dim + self.action_dim,
hidden_size=self.lstm_hidden_size,
num_layers=self.lstm_num_layers,
bidirectional=self.lstm_bidirectional,
)
if self.lstm_bidirectional:
self.fc_out = nn.Linear(self.lstm_hidden_size * 2, 1)
else:
self.fc_out = nn.Linear(self.lstm_hidden_size, 1)
self.output_activation = ACTIVATION_MAP[last_layer_activation]()
def forward(self, state: torch.Tensor, action: torch.Tensor):
# shape: seq_len, batch_size, state_dim + action_dim
cat_input = torch.cat((state, action), dim=-1)
# output shape: seq_len, batch_size, self.hidden_size
output, _ = self.lstm(cat_input)
# output shape: seq_len, batch_size, 1
output = self.fc_out(output)
# output shape: batch_size, seq_len
output = self.output_activation(output).squeeze(2).transpose(0, 1)
return output
class TransformerSyntheticRewardNet(nn.Module):
def __init__(
self,
state_dim: int,
action_dim: int,
d_model: int,
nhead: int = 2,
num_encoder_layers: int = 2,
dim_feedforward: int = 128,
dropout: float = 0.0,
activation: str = "relu",
last_layer_activation: str = "leaky_relu",
layer_norm_eps: float = 1e-5,
max_len: int = 10,
):
"""
Decompose rewards at the last step to individual steps using transformer modules.
Args:
nhead: the number of heads in the multiheadattention models (default=8).
num_encoder_layers: the number of sub-encoder-layers in the encoder (default=6).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of encoder/decoder intermediate layer, relu or gelu (default=relu).
layer_norm_eps: the eps value in layer normalization components (default=1e-5).
"""
super().__init__()
self.state_dim = state_dim
self.action_dim = action_dim
# d_model: dimension of transformer input
self.d_model = d_model
self.nhead = nhead
self.num_encoder_layers = num_encoder_layers
self.dim_feedforward = dim_feedforward
self.dropout = dropout
self.activation = activation
self.layer_norm_eps = layer_norm_eps
self.max_len = max_len
# map input features to higher latent space before sending to transformer
self.fc_in = nn.Sequential(
nn.Linear(self.state_dim + self.action_dim, self.d_model),
nn.ReLU(),
)
# use transformer encoder to get reward logits for each step
encoder_layer = PETransformerEncoderLayer(
self.d_model,
nhead,
dim_feedforward,
dropout,
activation,
layer_norm_eps,
max_len=self.max_len,
batch_first=False,
)
self.transformer = nn.TransformerEncoder(
encoder_layer,
num_encoder_layers,
)
self.fc_out = nn.Linear(self.d_model, 1)
self.output_activation = ACTIVATION_MAP[last_layer_activation]()
def forward(self, state: torch.Tensor, action: torch.Tensor):
# shape: seq_len (L), batch_size (B), state_dim + action_dim
cat_input = torch.cat((state, action), dim=-1)
# latent_input shape: (L,B,E)
latent_input = self.fc_in(cat_input)
# output shape: (L, B, E)
output = self.transformer(latent_input)
output = self.fc_out(output)
# output shape: seq_len, batch_size, 1
output = self.output_activation(output).squeeze(2).transpose(0, 1)
# output shape: batch_size, seq_len
return output
| 19,664 | 36.033898 | 167 | py |
ReAgent | ReAgent-master/reagent/models/no_soft_update_embedding.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import copy
import torch.nn as nn
class NoSoftUpdateEmbedding(nn.Embedding):
"""
Use this instead of vanilla Embedding module to avoid soft-updating the embedding
table in the target network.
"""
def __deepcopy__(self, memo):
return copy.copy(self)
| 377 | 21.235294 | 85 | py |
ReAgent | ReAgent-master/reagent/models/world_model.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import torch
from reagent.core import types as rlt
from reagent.models.base import ModelBase
from reagent.models.mdn_rnn import MDNRNN
class MemoryNetwork(ModelBase):
def __init__(
self, state_dim, action_dim, num_hiddens, num_hidden_layers, num_gaussians
):
super().__init__()
self.mdnrnn = MDNRNN(
state_dim=state_dim,
action_dim=action_dim,
num_hiddens=num_hiddens,
num_hidden_layers=num_hidden_layers,
num_gaussians=num_gaussians,
)
self.state_dim = state_dim
self.action_dim = action_dim
self.num_hiddens = num_hiddens
self.num_hidden_layers = num_hidden_layers
self.num_gaussians = num_gaussians
def input_prototype(self):
return (
rlt.FeatureData(torch.randn(1, 1, self.state_dim)),
rlt.FeatureData(torch.randn(1, 1, self.action_dim)),
)
def forward(self, state: rlt.FeatureData, action: rlt.FeatureData):
(
mus,
sigmas,
logpi,
rewards,
not_terminals,
all_steps_hidden,
last_step_hidden_and_cell,
) = self.mdnrnn(action.float_features, state.float_features)
return rlt.MemoryNetworkOutput(
mus=mus,
sigmas=sigmas,
logpi=logpi,
reward=rewards,
not_terminal=not_terminals,
last_step_lstm_hidden=last_step_hidden_and_cell[0],
last_step_lstm_cell=last_step_hidden_and_cell[1],
all_steps_lstm_hidden=all_steps_hidden,
)
| 1,722 | 30.907407 | 82 | py |
ReAgent | ReAgent-master/reagent/test/mab/test_ucb.py | import unittest
from io import BytesIO
import numpy as np
import numpy.testing as npt
import torch
from parameterized import parameterized
from reagent.mab.ucb import (
UCBTunedBernoulli,
MetricUCB,
UCBTuned,
UCB1,
_get_arm_indices,
_place_values_at_indices,
)
class TestUCButils(unittest.TestCase):
def test_get_arm_indices_happy_case(self):
ids_of_all_arms = ["a", "b", "c", "z", "4"]
ids_of_arms_in_batch = ["z", "4", "b"]
idxs = _get_arm_indices(ids_of_all_arms, ids_of_arms_in_batch)
self.assertListEqual(idxs, [3, 4, 1])
def test_get_arm_indices_fail(self):
ids_of_all_arms = ["a", "b", "c", "z", "4"]
ids_of_arms_in_batch = ["z", "4", "b", "o"]
with self.assertRaises(ValueError):
_get_arm_indices(ids_of_all_arms, ids_of_arms_in_batch)
def test_place_values_at_indices(self):
values = torch.tensor([3, 7, 11], dtype=torch.float)
idxs = [2, 3, 5]
len_ = 7
result = _place_values_at_indices(values, idxs, len_)
expected_result = torch.Tensor([0, 0, 3, 7, 0, 11, 0])
npt.assert_array_equal(result.numpy(), expected_result.numpy())
class TestUCB(unittest.TestCase):
@parameterized.expand(
[
["UCBTunedBernoulli", UCBTunedBernoulli],
["MetricUCB", MetricUCB],
["UCBTuned", UCBTuned],
["UCB1", UCB1],
]
)
def test_batch_training(self, name, cls):
n_arms = 5
b = cls(n_arms=n_arms)
total_obs_per_arm = torch.zeros(n_arms)
total_success_per_arm = torch.zeros(n_arms)
for _ in range(10):
n_obs_per_arm = torch.randint(0, 50, size=(n_arms,)).float()
n_success_per_arm = torch.rand(size=(n_arms,)) * n_obs_per_arm
total_obs_per_arm += n_obs_per_arm
total_success_per_arm += n_success_per_arm
if cls == UCBTuned:
# UCBTuned retquires additional input
b.add_batch_observations(
n_obs_per_arm, n_success_per_arm, n_success_per_arm
)
else:
b.add_batch_observations(n_obs_per_arm, n_success_per_arm)
npt.assert_array_equal(
b.total_n_obs_per_arm.numpy(), total_obs_per_arm.numpy()
) # observation counters are correct
npt.assert_array_equal(
b.total_sum_reward_per_arm.numpy(), total_success_per_arm.numpy()
) # success counters are corect
if issubclass(cls, UCBTuned):
# we keep track of squared rewards only for UCBTuned
npt.assert_array_equal(
b.total_sum_reward_squared_per_arm.numpy(),
total_success_per_arm.numpy(),
) # squared rewards equal to rewards for Bernoulli bandit
self.assertEqual(
b.total_n_obs_all_arms, total_obs_per_arm.sum().item()
) # total observation counter correct
ucb_scores = b.get_ucb_scores()
# UCB scores shape and type are correct
self.assertEqual(ucb_scores.shape, (n_arms,))
self.assertIsInstance(ucb_scores, torch.Tensor)
avg_rewards = total_success_per_arm / total_obs_per_arm
npt.assert_array_almost_equal(
b.get_avg_reward_values().numpy(), avg_rewards.numpy()
) # avg rewards computed correctly
npt.assert_array_less(
avg_rewards,
np.where(b.total_n_obs_per_arm.numpy() > 0, ucb_scores.numpy(), np.nan),
) # UCB scores greater than avg rewards
@parameterized.expand(
[
["UCBTunedBernoulli", UCBTunedBernoulli],
["MetricUCB", MetricUCB],
["UCBTuned", UCBTuned],
["UCB1", UCB1],
]
)
def test_class_method(self, name, cls):
n_arms = 5
n_obs_per_arm = torch.randint(0, 50, size=(n_arms,)).float()
n_success_per_arm = torch.rand(size=(n_arms,)) * n_obs_per_arm
if cls == UCBTuned:
ucb_scores = cls.get_ucb_scores_from_batch(
n_obs_per_arm, n_success_per_arm, n_success_per_arm
)
else:
ucb_scores = cls.get_ucb_scores_from_batch(n_obs_per_arm, n_success_per_arm)
# UCB scores shape and type are correct
self.assertEqual(ucb_scores.shape, (n_arms,))
self.assertIsInstance(ucb_scores, torch.Tensor)
avg_rewards = n_success_per_arm / n_obs_per_arm
npt.assert_array_less(
avg_rewards.numpy(),
np.where(n_obs_per_arm.numpy() > 0, ucb_scores.numpy(), np.nan),
) # UCB scores greater than avg rewards
@parameterized.expand(
[
["UCBTunedBernoulli", UCBTunedBernoulli],
["MetricUCB", MetricUCB],
["UCBTuned", UCBTuned],
["UCB1", UCB1],
]
)
def test_online_training(self, name, cls):
n_arms = 5
total_n_obs = 100
b = cls(n_arms=n_arms)
total_obs_per_arm = torch.zeros(n_arms)
total_success_per_arm = torch.zeros(n_arms)
true_ctrs = torch.rand(size=(n_arms,))
for _ in range(total_n_obs):
chosen_arm = b.get_action()
reward = torch.bernoulli(true_ctrs[chosen_arm])
b.add_single_observation(chosen_arm, reward)
total_obs_per_arm[chosen_arm] += 1
total_success_per_arm[chosen_arm] += reward
online_ucb_scores = b.get_ucb_scores()
if cls == UCBTuned:
offline_ucb_scores = cls.get_ucb_scores_from_batch(
total_obs_per_arm, total_success_per_arm, total_success_per_arm
)
else:
offline_ucb_scores = cls.get_ucb_scores_from_batch(
total_obs_per_arm, total_success_per_arm
)
npt.assert_array_equal(
online_ucb_scores.numpy(), offline_ucb_scores.numpy()
) # UCB scores computed by online and offline algorithms match
@parameterized.expand(
[
["UCBTunedBernoulli", UCBTunedBernoulli],
["MetricUCB", MetricUCB],
["UCBTuned", UCBTuned],
["UCB1", UCB1],
]
)
def test_save_load(self, name, cls):
n_arms = 5
b = cls(n_arms=n_arms)
n_obs_per_arm = torch.randint(0, 100, size=(n_arms,)).float()
n_success_per_arm = torch.rand(size=(n_arms,)) * n_obs_per_arm
if cls == UCBTuned:
# UCBTuned retquires additional input
b.add_batch_observations(
n_obs_per_arm, n_success_per_arm, n_success_per_arm
)
else:
b.add_batch_observations(n_obs_per_arm, n_success_per_arm)
ucb_scores_before_save = b.get_ucb_scores()
f_write = BytesIO()
torch.save(b, f_write)
f_write.seek(0)
f_read = BytesIO(f_write.read())
f_write.close()
b_loaded = torch.load(f_read)
f_read.close()
ucb_scores_after_load = b_loaded.get_ucb_scores()
npt.assert_array_equal(
ucb_scores_before_save.numpy(), ucb_scores_after_load.numpy()
) # UCB scores are same before saving and after loading
self.assertListEqual(b.arm_ids, b_loaded.arm_ids)
@parameterized.expand(
[
["UCBTunedBernoulli", UCBTunedBernoulli],
["MetricUCB", MetricUCB],
["UCBTuned", UCBTuned],
["UCB1", UCB1],
]
)
def test_custom_arm_ids(self, name, cls):
# arm 0 earns no rewards, so we specify arm_ids 1,...,N explicitly
n_arms = 5
b = cls(n_arms=n_arms)
n_obs_per_arm = torch.randint(0, 100, size=(n_arms - 1,)).float()
n_success_per_arm = torch.rand(size=(n_arms - 1,)) * n_obs_per_arm
if cls == UCBTuned:
# UCBTuned requires additional input
b.add_batch_observations(
n_obs_per_arm,
n_success_per_arm,
n_success_per_arm,
arm_ids=list(range(1, n_arms)),
)
else:
b.add_batch_observations(
n_obs_per_arm, n_success_per_arm, arm_ids=list(range(1, n_arms))
)
self.assertEqual(b.total_n_obs_per_arm[0], 0)
npt.assert_array_equal(n_obs_per_arm.numpy(), b.total_n_obs_per_arm[1:].numpy())
npt.assert_array_equal(
n_success_per_arm.numpy(), b.total_sum_reward_per_arm[1:].numpy()
)
if issubclass(cls, UCBTuned):
npt.assert_array_equal(
n_success_per_arm.numpy(),
b.total_sum_reward_squared_per_arm[1:].numpy(),
)
| 8,809 | 34.959184 | 88 | py |
ReAgent | ReAgent-master/reagent/test/evaluation/test_evaluation_data_page.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import unittest
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from reagent.core import types as rlt
from reagent.evaluation.doubly_robust_estimator import DoublyRobustEstimator
from reagent.evaluation.evaluation_data_page import EvaluationDataPage
from reagent.evaluation.ope_adapter import OPEstimatorAdapter
from reagent.model_utils.seq2slate_utils import Seq2SlateMode
from reagent.ope.estimators.contextual_bandits_estimators import (
SwitchDREstimator,
SwitchEstimator,
)
logger = logging.getLogger(__name__)
class FakeSeq2SlateRewardNetwork(nn.Module):
def __init__(self):
super().__init__()
self.fake_parms = nn.Linear(1, 1)
def forward(
self,
state: torch.Tensor,
src_seq: torch.Tensor,
tgt_out_seq: torch.Tensor,
src_src_mask: torch.Tensor,
tgt_out_idx: torch.Tensor,
):
batch_size = state.shape[0]
rewards = []
for i in range(batch_size):
rewards.append(self._forward(state[i], tgt_out_idx[i]))
return torch.tensor(rewards).unsqueeze(1)
def _forward(self, state: torch.Tensor, tgt_out_idx: torch.Tensor):
if (state == torch.tensor([1.0, 0.0, 0.0])).all():
if (tgt_out_idx == torch.tensor([2, 3])).all():
return 1.0
else:
return 2.0
elif (state == torch.tensor([0.0, 1.0, 0.0])).all():
if (tgt_out_idx == torch.tensor([2, 3])).all():
return 3.0
else:
return 4.0
elif (state == torch.tensor([0.0, 0.0, 1.0])).all():
if (tgt_out_idx == torch.tensor([2, 3])).all():
return 5.0
else:
return 6.0
class FakeSeq2SlateTransformerNet(nn.Module):
def __init__(self):
super().__init__()
self.fake_parms = nn.Linear(1, 1)
def forward(
self,
input: rlt.PreprocessedRankingInput,
mode: str,
greedy: Optional[bool] = None,
):
# The creation of evaluation data pages only uses these specific arguments
assert mode in (Seq2SlateMode.RANK_MODE, Seq2SlateMode.PER_SEQ_LOG_PROB_MODE)
if mode == Seq2SlateMode.RANK_MODE:
assert greedy
return rlt.RankingOutput(
ranked_tgt_out_idx=torch.tensor([[2, 3], [3, 2], [2, 3]]).long()
)
return rlt.RankingOutput(
log_probs=torch.log(torch.tensor([0.4, 0.3, 0.7]).unsqueeze(1))
)
class TestEvaluationDataPage(unittest.TestCase):
def test_seq2slate_eval_data_page(self):
"""
Create 3 slate ranking logs and evaluate using Direct Method, Inverse
Propensity Scores, and Doubly Robust.
The logs are as follows:
state: [1, 0, 0], [0, 1, 0], [0, 0, 1]
indices in logged slates: [3, 2], [3, 2], [3, 2]
model output indices: [2, 3], [3, 2], [2, 3]
logged reward: 4, 5, 7
logged propensities: 0.2, 0.5, 0.4
predicted rewards on logged slates: 2, 4, 6
predicted rewards on model outputted slates: 1, 4, 5
predicted propensities: 0.4, 0.3, 0.7
When eval_greedy=True:
Direct Method uses the predicted rewards on model outputted slates.
Thus the result is expected to be (1 + 4 + 5) / 3
Inverse Propensity Scores would scale the reward by 1.0 / logged propensities
whenever the model output slate matches with the logged slate.
Since only the second log matches with the model output, the IPS result
is expected to be 5 / 0.5 / 3
Doubly Robust is the sum of the direct method result and propensity-scaled
reward difference; the latter is defined as:
1.0 / logged_propensities * (logged reward - predicted reward on logged slate)
* Indicator(model slate == logged slate)
Since only the second logged slate matches with the model outputted slate,
the DR result is expected to be (1 + 4 + 5) / 3 + 1.0 / 0.5 * (5 - 4) / 3
When eval_greedy=False:
Only Inverse Propensity Scores would be accurate. Because it would be too
expensive to compute all possible slates' propensities and predicted rewards
for Direct Method.
The expected IPS = (0.4 / 0.2 * 4 + 0.3 / 0.5 * 5 + 0.7 / 0.4 * 7) / 3
"""
batch_size = 3
state_dim = 3
src_seq_len = 2
tgt_seq_len = 2
candidate_dim = 2
reward_net = FakeSeq2SlateRewardNetwork()
seq2slate_net = FakeSeq2SlateTransformerNet()
src_seq = torch.eye(candidate_dim).repeat(batch_size, 1, 1)
tgt_out_idx = torch.LongTensor([[3, 2], [3, 2], [3, 2]])
tgt_out_seq = src_seq[
torch.arange(batch_size).repeat_interleave(tgt_seq_len),
tgt_out_idx.flatten() - 2,
].reshape(batch_size, tgt_seq_len, candidate_dim)
ptb = rlt.PreprocessedRankingInput(
state=rlt.FeatureData(float_features=torch.eye(state_dim)),
src_seq=rlt.FeatureData(float_features=src_seq),
tgt_out_seq=rlt.FeatureData(float_features=tgt_out_seq),
src_src_mask=torch.ones(batch_size, src_seq_len, src_seq_len),
tgt_out_idx=tgt_out_idx,
tgt_out_probs=torch.tensor([0.2, 0.5, 0.4]),
slate_reward=torch.tensor([4.0, 5.0, 7.0]),
extras=rlt.ExtraData(
sequence_number=torch.tensor([0, 0, 0]),
mdp_id=np.array(["0", "1", "2"]),
),
)
edp = EvaluationDataPage.create_from_tensors_seq2slate(
seq2slate_net, reward_net, ptb, eval_greedy=True
)
logger.info("---------- Start evaluating eval_greedy=True -----------------")
doubly_robust_estimator = DoublyRobustEstimator()
(
direct_method,
inverse_propensity,
doubly_robust,
) = doubly_robust_estimator.estimate(edp)
switch_estimator, switch_dr_estimator = (
OPEstimatorAdapter(SwitchEstimator()),
OPEstimatorAdapter(SwitchDREstimator()),
)
# Verify that Switch with low exponent is equivalent to IPS
switch_ips = switch_estimator.estimate(edp, exp_base=1)
# Verify that Switch with no candidates is equivalent to DM
switch_dm = switch_estimator.estimate(edp, candidates=0)
# Verify that SwitchDR with low exponent is equivalent to DR
switch_dr_dr = switch_dr_estimator.estimate(edp, exp_base=1)
# Verify that SwitchDR with no candidates is equivalent to DM
switch_dr_dm = switch_dr_estimator.estimate(edp, candidates=0)
logger.info(f"{direct_method}, {inverse_propensity}, {doubly_robust}")
avg_logged_reward = (4 + 5 + 7) / 3
self.assertAlmostEqual(direct_method.raw, (1 + 4 + 5) / 3, delta=1e-6)
self.assertAlmostEqual(
direct_method.normalized, direct_method.raw / avg_logged_reward, delta=1e-6
)
self.assertAlmostEqual(inverse_propensity.raw, 5 / 0.5 / 3, delta=1e-6)
self.assertAlmostEqual(
inverse_propensity.normalized,
inverse_propensity.raw / avg_logged_reward,
delta=1e-6,
)
self.assertAlmostEqual(
doubly_robust.raw, direct_method.raw + 1 / 0.5 * (5 - 4) / 3, delta=1e-6
)
self.assertAlmostEqual(
doubly_robust.normalized, doubly_robust.raw / avg_logged_reward, delta=1e-6
)
self.assertAlmostEqual(switch_ips.raw, inverse_propensity.raw, delta=1e-6)
self.assertAlmostEqual(switch_dm.raw, direct_method.raw, delta=1e-6)
self.assertAlmostEqual(switch_dr_dr.raw, doubly_robust.raw, delta=1e-6)
self.assertAlmostEqual(switch_dr_dm.raw, direct_method.raw, delta=1e-6)
logger.info("---------- Finish evaluating eval_greedy=True -----------------")
logger.info("---------- Start evaluating eval_greedy=False -----------------")
edp = EvaluationDataPage.create_from_tensors_seq2slate(
seq2slate_net, reward_net, ptb, eval_greedy=False
)
doubly_robust_estimator = DoublyRobustEstimator()
_, inverse_propensity, _ = doubly_robust_estimator.estimate(edp)
self.assertAlmostEqual(
inverse_propensity.raw,
(0.4 / 0.2 * 4 + 0.3 / 0.5 * 5 + 0.7 / 0.4 * 7) / 3,
delta=1e-6,
)
self.assertAlmostEqual(
inverse_propensity.normalized,
inverse_propensity.raw / avg_logged_reward,
delta=1e-6,
)
logger.info("---------- Finish evaluating eval_greedy=False -----------------")
| 8,858 | 38.726457 | 87 | py |
ReAgent | ReAgent-master/reagent/test/evaluation/test_ope_integration.py | import logging
import random
import unittest
import numpy as np
import torch
from reagent.core import types as rlt
from reagent.evaluation.evaluation_data_page import EvaluationDataPage
from reagent.evaluation.ope_adapter import (
OPEstimatorAdapter,
SequentialOPEstimatorAdapter,
)
from reagent.ope.estimators.contextual_bandits_estimators import (
DMEstimator,
DoublyRobustEstimator,
IPSEstimator,
SwitchDREstimator,
SwitchEstimator,
)
from reagent.ope.estimators.sequential_estimators import (
DoublyRobustEstimator as SeqDREstimator,
EpsilonGreedyRLPolicy,
RandomRLPolicy,
RLEstimatorInput,
)
from reagent.ope.estimators.types import Action, ActionSpace
from reagent.ope.test.envs import PolicyLogGenerator
from reagent.ope.test.gridworld import GridWorld, NoiseGridWorldModel
from reagent.ope.trainers.rl_tabular_trainers import (
DPTrainer,
DPValueFunction,
TabularPolicy,
)
from reagent.test.evaluation.test_evaluation_data_page import (
FakeSeq2SlateRewardNetwork,
FakeSeq2SlateTransformerNet,
)
logger = logging.getLogger(__name__)
def rlestimator_input_to_edp(
input: RLEstimatorInput, num_actions: int
) -> EvaluationDataPage:
mdp_ids = []
logged_propensities = []
logged_rewards = []
action_mask = []
model_propensities = []
model_values = []
for mdp in input.log:
mdp_id = len(mdp_ids)
for t in mdp:
mdp_ids.append(mdp_id)
logged_propensities.append(t.action_prob)
logged_rewards.append(t.reward)
assert t.action is not None
action_mask.append(
[1 if x == t.action.value else 0 for x in range(num_actions)]
)
assert t.last_state is not None
model_propensities.append(
[
input.target_policy(t.last_state)[Action(x)]
for x in range(num_actions)
]
)
assert input.value_function is not None
model_values.append(
[
input.value_function(t.last_state, Action(x))
for x in range(num_actions)
]
)
return EvaluationDataPage(
mdp_id=torch.tensor(mdp_ids).reshape(len(mdp_ids), 1),
logged_propensities=torch.tensor(logged_propensities).reshape(
(len(logged_propensities), 1)
),
logged_rewards=torch.tensor(logged_rewards).reshape((len(logged_rewards), 1)),
action_mask=torch.tensor(action_mask),
model_propensities=torch.tensor(model_propensities),
model_values=torch.tensor(model_values),
sequence_number=torch.tensor([]),
model_rewards=torch.tensor([]),
model_rewards_for_logged_action=torch.tensor([]),
)
class TestOPEModuleAlgs(unittest.TestCase):
GAMMA = 0.9
CPE_PASS_BAR = 1.0
CPE_MAX_VALUE = 2.0
MAX_HORIZON = 1000
NOISE_EPSILON = 0.3
EPISODES = 2
def test_gridworld_sequential_adapter(self):
"""
Create a gridworld environment, logging policy, and target policy
Evaluates target policy using the direct OPE sequential doubly robust estimator,
then transforms the log into an evaluation data page which is passed to the ope adapter.
This test is meant to verify the adaptation of EDPs into RLEstimatorInputs as employed
by ReAgent since ReAgent provides EDPs to Evaluators. Going from EDP -> RLEstimatorInput
is more involved than RLEstimatorInput -> EDP since the EDP does not store the state
at each timestep in each MDP, only the corresponding logged outputs & model outputs.
Thus, the adapter must do some tricks to represent these timesteps as states so the
ope module can extract the correct outputs.
Note that there is some randomness in the model outputs since the model is purposefully
noisy. However, the same target policy is being evaluated on the same logged walks through
the gridworld, so the two results should be close in value (within 1).
"""
random.seed(0)
np.random.seed(0)
torch.random.manual_seed(0)
device = torch.device("cuda") if torch.cuda.is_available() else None
gridworld = GridWorld.from_grid(
[
["s", "0", "0", "0", "0"],
["0", "0", "0", "W", "0"],
["0", "0", "0", "0", "0"],
["0", "W", "0", "0", "0"],
["0", "0", "0", "0", "g"],
],
max_horizon=TestOPEModuleAlgs.MAX_HORIZON,
)
action_space = ActionSpace(4)
opt_policy = TabularPolicy(action_space)
trainer = DPTrainer(gridworld, opt_policy)
value_func = trainer.train(gamma=TestOPEModuleAlgs.GAMMA)
behavivor_policy = RandomRLPolicy(action_space)
target_policy = EpsilonGreedyRLPolicy(
opt_policy, TestOPEModuleAlgs.NOISE_EPSILON
)
model = NoiseGridWorldModel(
gridworld,
action_space,
epsilon=TestOPEModuleAlgs.NOISE_EPSILON,
max_horizon=TestOPEModuleAlgs.MAX_HORIZON,
)
value_func = DPValueFunction(target_policy, model, TestOPEModuleAlgs.GAMMA)
ground_truth = DPValueFunction(
target_policy, gridworld, TestOPEModuleAlgs.GAMMA
)
log = []
log_generator = PolicyLogGenerator(gridworld, behavivor_policy)
num_episodes = TestOPEModuleAlgs.EPISODES
for state in gridworld.states:
for _ in range(num_episodes):
log.append(log_generator.generate_log(state))
estimator_input = RLEstimatorInput(
gamma=TestOPEModuleAlgs.GAMMA,
log=log,
target_policy=target_policy,
value_function=value_func,
ground_truth=ground_truth,
)
edp = rlestimator_input_to_edp(estimator_input, len(model.action_space))
dr_estimator = SeqDREstimator(
weight_clamper=None, weighted=False, device=device
)
module_results = SequentialOPEstimatorAdapter.estimator_results_to_cpe_estimate(
dr_estimator.evaluate(estimator_input)
)
adapter_results = SequentialOPEstimatorAdapter(
dr_estimator, TestOPEModuleAlgs.GAMMA, device=device
).estimate(edp)
self.assertAlmostEqual(
adapter_results.raw,
module_results.raw,
delta=TestOPEModuleAlgs.CPE_PASS_BAR,
), f"OPE adapter results differed too much from underlying module (Diff: {abs(adapter_results.raw - module_results.raw)} > {TestOPEModuleAlgs.CPE_PASS_BAR})"
self.assertLess(
adapter_results.raw, TestOPEModuleAlgs.CPE_MAX_VALUE
), f"OPE adapter results are too large ({adapter_results.raw} > {TestOPEModuleAlgs.CPE_MAX_VALUE})"
def test_seq2slate_eval_data_page(self):
"""
Create 3 slate ranking logs and evaluate using Direct Method, Inverse
Propensity Scores, and Doubly Robust.
The logs are as follows:
state: [1, 0, 0], [0, 1, 0], [0, 0, 1]
indices in logged slates: [3, 2], [3, 2], [3, 2]
model output indices: [2, 3], [3, 2], [2, 3]
logged reward: 4, 5, 7
logged propensities: 0.2, 0.5, 0.4
predicted rewards on logged slates: 2, 4, 6
predicted rewards on model outputted slates: 1, 4, 5
predicted propensities: 0.4, 0.3, 0.7
When eval_greedy=True:
Direct Method uses the predicted rewards on model outputted slates.
Thus the result is expected to be (1 + 4 + 5) / 3
Inverse Propensity Scores would scale the reward by 1.0 / logged propensities
whenever the model output slate matches with the logged slate.
Since only the second log matches with the model output, the IPS result
is expected to be 5 / 0.5 / 3
Doubly Robust is the sum of the direct method result and propensity-scaled
reward difference; the latter is defined as:
1.0 / logged_propensities * (logged reward - predicted reward on logged slate)
* Indicator(model slate == logged slate)
Since only the second logged slate matches with the model outputted slate,
the DR result is expected to be (1 + 4 + 5) / 3 + 1.0 / 0.5 * (5 - 4) / 3
When eval_greedy=False:
Only Inverse Propensity Scores would be accurate. Because it would be too
expensive to compute all possible slates' propensities and predicted rewards
for Direct Method.
The expected IPS = (0.4 / 0.2 * 4 + 0.3 / 0.5 * 5 + 0.7 / 0.4 * 7) / 3
"""
batch_size = 3
state_dim = 3
src_seq_len = 2
tgt_seq_len = 2
candidate_dim = 2
reward_net = FakeSeq2SlateRewardNetwork()
seq2slate_net = FakeSeq2SlateTransformerNet()
src_seq = torch.eye(candidate_dim).repeat(batch_size, 1, 1)
tgt_out_idx = torch.LongTensor([[3, 2], [3, 2], [3, 2]])
tgt_out_seq = src_seq[
torch.arange(batch_size).repeat_interleave(tgt_seq_len),
tgt_out_idx.flatten() - 2,
].reshape(batch_size, tgt_seq_len, candidate_dim)
ptb = rlt.PreprocessedRankingInput(
state=rlt.FeatureData(float_features=torch.eye(state_dim)),
src_seq=rlt.FeatureData(float_features=src_seq),
tgt_out_seq=rlt.FeatureData(float_features=tgt_out_seq),
src_src_mask=torch.ones(batch_size, src_seq_len, src_seq_len),
tgt_out_idx=tgt_out_idx,
tgt_out_probs=torch.tensor([0.2, 0.5, 0.4]),
slate_reward=torch.tensor([4.0, 5.0, 7.0]),
extras=rlt.ExtraData(
sequence_number=torch.tensor([0, 0, 0]),
mdp_id=np.array(["0", "1", "2"]),
),
)
edp = EvaluationDataPage.create_from_tensors_seq2slate(
seq2slate_net, reward_net, ptb, eval_greedy=True
)
logger.info("---------- Start evaluating eval_greedy=True -----------------")
doubly_robust_estimator = OPEstimatorAdapter(DoublyRobustEstimator())
dm_estimator = OPEstimatorAdapter(DMEstimator())
ips_estimator = OPEstimatorAdapter(IPSEstimator())
switch_estimator = OPEstimatorAdapter(SwitchEstimator())
switch_dr_estimator = OPEstimatorAdapter(SwitchDREstimator())
doubly_robust = doubly_robust_estimator.estimate(edp)
inverse_propensity = ips_estimator.estimate(edp)
direct_method = dm_estimator.estimate(edp)
# Verify that Switch with low exponent is equivalent to IPS
switch_ips = switch_estimator.estimate(edp, exp_base=1)
# Verify that Switch with no candidates is equivalent to DM
switch_dm = switch_estimator.estimate(edp, candidates=0)
# Verify that SwitchDR with low exponent is equivalent to DR
switch_dr_dr = switch_dr_estimator.estimate(edp, exp_base=1)
# Verify that SwitchDR with no candidates is equivalent to DM
switch_dr_dm = switch_dr_estimator.estimate(edp, candidates=0)
logger.info(f"{direct_method}, {inverse_propensity}, {doubly_robust}")
avg_logged_reward = (4 + 5 + 7) / 3
self.assertAlmostEqual(direct_method.raw, (1 + 4 + 5) / 3, delta=1e-6)
self.assertAlmostEqual(
direct_method.normalized, direct_method.raw / avg_logged_reward, delta=1e-6
)
self.assertAlmostEqual(inverse_propensity.raw, 5 / 0.5 / 3, delta=1e-6)
self.assertAlmostEqual(
inverse_propensity.normalized,
inverse_propensity.raw / avg_logged_reward,
delta=1e-6,
)
self.assertAlmostEqual(
doubly_robust.raw, direct_method.raw + 1 / 0.5 * (5 - 4) / 3, delta=1e-6
)
self.assertAlmostEqual(
doubly_robust.normalized, doubly_robust.raw / avg_logged_reward, delta=1e-6
)
self.assertAlmostEqual(switch_ips.raw, inverse_propensity.raw, delta=1e-6)
self.assertAlmostEqual(switch_dm.raw, direct_method.raw, delta=1e-6)
self.assertAlmostEqual(switch_dr_dr.raw, doubly_robust.raw, delta=1e-6)
self.assertAlmostEqual(switch_dr_dm.raw, direct_method.raw, delta=1e-6)
logger.info("---------- Finish evaluating eval_greedy=True -----------------")
logger.info("---------- Start evaluating eval_greedy=False -----------------")
edp = EvaluationDataPage.create_from_tensors_seq2slate(
seq2slate_net, reward_net, ptb, eval_greedy=False
)
doubly_robust_estimator = OPEstimatorAdapter(DoublyRobustEstimator())
dm_estimator = OPEstimatorAdapter(DMEstimator())
ips_estimator = OPEstimatorAdapter(IPSEstimator())
doubly_robust = doubly_robust_estimator.estimate(edp)
inverse_propensity = ips_estimator.estimate(edp)
direct_method = dm_estimator.estimate(edp)
self.assertAlmostEqual(
inverse_propensity.raw,
(0.4 / 0.2 * 4 + 0.3 / 0.5 * 5 + 0.7 / 0.4 * 7) / 3,
delta=1e-6,
)
self.assertAlmostEqual(
inverse_propensity.normalized,
inverse_propensity.raw / avg_logged_reward,
delta=1e-6,
)
logger.info("---------- Finish evaluating eval_greedy=False -----------------")
| 13,472 | 39.338323 | 165 | py |
ReAgent | ReAgent-master/reagent/test/core/aggregators_test.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
from reagent.core.aggregators import ActionCountAggregator
class ActionCountAggregatorTest(unittest.TestCase):
def setUp(self):
self.actions = ["A", "B", "C"]
key = "logged_action"
self.aggregator = ActionCountAggregator(key, self.actions)
logged_actions = [
[
torch.tensor([0, 0, 1, 2, 2]).unsqueeze(1),
torch.tensor([0, 1, 1, 2, 2]).unsqueeze(1),
],
[
torch.tensor([1, 0, 1, 2, 0]).unsqueeze(1),
torch.tensor([0, 1, 1, 0, 2]).unsqueeze(1),
],
]
for x in logged_actions:
self.aggregator(key, x)
def test_get_distributions(self):
distr = self.aggregator.get_distributions()
self.assertEqual(len(distr), 3)
self.assertEqual(distr["A"], [0.3, 0.4])
self.assertEqual(distr["B"], [0.3, 0.4])
self.assertEqual(distr["C"], [0.4, 0.2])
def test_get_cumulative_distributions(self):
distr = self.aggregator.get_cumulative_distributions()
self.assertEqual(len(distr), 3)
self.assertEqual(distr["A"], 0.35)
self.assertEqual(distr["B"], 0.35)
self.assertEqual(distr["C"], 0.3)
| 1,360 | 31.404762 | 71 | py |
ReAgent | ReAgent-master/reagent/test/training/test_ars_optimizer.py | #!/usr/bin/env python3
import unittest
import numpy as np
import torch
from reagent.training.gradient_free.ars_util import ARSOptimizer
class TestARSOptimizer(unittest.TestCase):
def metric(self, x):
# Ackley Function
# https://www.sfu.ca/~ssurjano/ackley.html
x *= 100
return (
-20 * np.exp(-0.2 * np.sqrt(np.inner(x, x) / x.size))
- np.exp(np.cos(2 * np.pi * x).sum() / x.size)
+ 20
+ np.e
)
def test_ars_optimizer(self):
dim = 10
n_generations = 30
X = torch.Tensor([[i] for i in range(dim)])
y = torch.ones(dim)
n_pert = 100
feature_dim = 2
np.random.seed(seed=123456)
ars_opt = ARSOptimizer(feature_dim, n_pert, rand_ars_params=True)
for i in range(n_generations):
perturbed_params = ars_opt.sample_perturbed_params()
rewards = []
for idx in range(0, len(perturbed_params)):
pos_param, neg_param = perturbed_params[idx]
pos_weight = torch.sigmoid(
torch.matmul(torch.column_stack((X, y)), pos_param)
)
# ARSOptimizer works in an ascent manner,
# thus a neg sign for minimizing objectives.
r_pos = -self.metric(pos_weight.numpy())
rewards.append(r_pos)
neg_weight = torch.sigmoid(
torch.matmul(torch.column_stack((X, y)), neg_param)
)
r_neg = -self.metric(neg_weight.numpy())
rewards.append(r_neg)
ars_opt.update_ars_params(torch.Tensor(rewards))
new_weight = torch.sigmoid(
torch.matmul(
torch.column_stack((X, y)),
torch.from_numpy(ars_opt.ars_params).float(),
)
)
perf = self.metric(new_weight.numpy())
print(f"gen {i}: perf {perf}")
self.assertLessEqual(perf, 1e-15)
| 2,041 | 33.610169 | 73 | py |
ReAgent | ReAgent-master/reagent/test/training/test_qrdqn.py | #!/usr/bin/env python3
import unittest
import torch
from reagent.core.parameters import EvaluationParameters, RLParameters
from reagent.core.types import FeatureData, DiscreteDqnInput, ExtraData
from reagent.evaluation.evaluator import get_metrics_to_score
from reagent.models.dqn import FullyConnectedDQN
from reagent.training.parameters import QRDQNTrainerParameters
from reagent.training.qrdqn_trainer import QRDQNTrainer
from reagent.workflow.types import RewardOptions
class TestQRDQN(unittest.TestCase):
def setUp(self):
# preparing various components for qr-dqn trainer initialization
self.params = QRDQNTrainerParameters(actions=["1", "2"], num_atoms=11)
self.reward_options = RewardOptions()
self.metrics_to_score = get_metrics_to_score(
self.reward_options.metric_reward_values
)
self.state_dim = 10
self.action_dim = 2
self.sizes = [20, 20]
self.num_atoms = 11
self.activations = ["relu", "relu"]
self.dropout_ratio = 0
self.q_network = FullyConnectedDQN(
state_dim=self.state_dim,
action_dim=self.action_dim,
sizes=self.sizes,
num_atoms=self.num_atoms,
activations=self.activations,
dropout_ratio=self.dropout_ratio,
)
self.q_network_target = self.q_network.get_target_network()
self.x = FeatureData(float_features=torch.rand(5, 10))
self.eval_parameters = EvaluationParameters(calc_cpe_in_training=True)
self.num_output_nodes = (len(self.metrics_to_score) + 1) * len(
# pyre-fixme[16]: `QRDQNTrainerParameters` has no attribute `actions`.
self.params.actions
)
self.reward_network = FullyConnectedDQN(
state_dim=self.state_dim,
action_dim=self.num_output_nodes,
sizes=self.sizes,
activations=self.activations,
)
self.q_network_cpe = FullyConnectedDQN(
state_dim=self.state_dim,
action_dim=self.num_output_nodes,
sizes=self.sizes,
activations=self.activations,
)
self.q_network_cpe_target = self.q_network_cpe.get_target_network()
def _construct_trainer(self, new_params=None, no_cpe=False):
reward_network = self.reward_network
q_network_cpe = self.q_network_cpe
q_network_cpe_target = self.q_network_cpe_target
evaluation = self.eval_parameters
params = self.params
if new_params is not None:
params = new_params
if no_cpe:
reward_network = q_network_cpe = q_network_cpe_target = None
evaluation = EvaluationParameters(calc_cpe_in_training=False)
return QRDQNTrainer(
q_network=self.q_network,
q_network_target=self.q_network_target,
reward_network=reward_network,
q_network_cpe=q_network_cpe,
q_network_cpe_target=q_network_cpe_target,
metrics_to_score=self.metrics_to_score,
evaluation=evaluation,
# pyre-fixme[16]: `QRDQNTrainerParameters` has no attribute `asdict`.
**params.asdict()
)
def test_init(self):
trainer = self._construct_trainer()
quantiles = (0.5 + torch.arange(self.num_atoms).float()) / float(self.num_atoms)
self.assertTrue((torch.isclose(trainer.quantiles, quantiles)).all())
self.assertTrue((torch.isclose(trainer.reward_boosts, torch.zeros(2))).all())
param_copy = QRDQNTrainerParameters(
actions=["1", "2"],
num_atoms=11,
rl=RLParameters(reward_boost={"1": 1, "2": 2}),
)
reward_boost_trainer = self._construct_trainer(new_params=param_copy)
self.assertTrue(
(
torch.isclose(
reward_boost_trainer.reward_boosts, torch.tensor([1.0, 2.0])
)
).all()
)
def test_train_step_gen(self):
inp = DiscreteDqnInput(
state=FeatureData(float_features=torch.rand(3, 10)),
next_state=FeatureData(float_features=torch.rand(3, 10)),
reward=torch.ones(3, 1),
time_diff=torch.ones(3, 1) * 2,
step=torch.ones(3, 1) * 2,
not_terminal=torch.ones(3, 1), # todo: check terminal behavior
action=torch.tensor([[0, 1], [1, 0], [0, 1]]),
next_action=torch.tensor([[1, 0], [0, 1], [1, 0]]),
possible_actions_mask=torch.ones(3, 2),
possible_next_actions_mask=torch.ones(3, 2),
extras=ExtraData(),
)
mse_backward_type = type(
torch.nn.functional.mse_loss(
torch.tensor([1.0], requires_grad=True), torch.zeros(1)
).grad_fn
)
add_backward_type = type(
(
torch.tensor([1.0], requires_grad=True)
+ torch.tensor([1.0], requires_grad=True)
).grad_fn
)
mean_backward_type = type(
torch.tensor([1.0, 2.0], requires_grad=True).mean().grad_fn
)
# vanilla
trainer = self._construct_trainer()
loss_gen = trainer.train_step_gen(inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 4)
self.assertEqual(type(losses[0].grad_fn), mean_backward_type)
self.assertEqual(type(losses[1].grad_fn), mse_backward_type)
self.assertEqual(type(losses[2].grad_fn), mse_backward_type)
self.assertEqual(type(losses[3].grad_fn), add_backward_type)
# no CPE
trainer = self._construct_trainer(no_cpe=True)
loss_gen = trainer.train_step_gen(inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 2)
# seq_num
param_copy = QRDQNTrainerParameters(
actions=["1", "2"],
num_atoms=11,
rl=RLParameters(use_seq_num_diff_as_time_diff=True),
)
trainer = self._construct_trainer(new_params=param_copy)
loss_gen = trainer.train_step_gen(inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 4)
# multi_steps
param_copy = QRDQNTrainerParameters(
actions=["1", "2"], num_atoms=11, rl=RLParameters(multi_steps=2)
)
trainer = self._construct_trainer(new_params=param_copy)
loss_gen = trainer.train_step_gen(inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 4)
# non_max_q
param_copy = QRDQNTrainerParameters(
actions=["1", "2"], num_atoms=11, rl=RLParameters(maxq_learning=False)
)
trainer = self._construct_trainer(new_params=param_copy)
loss_gen = trainer.train_step_gen(inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 4)
def test_configure_optimizers(self):
trainer = self._construct_trainer()
optimizers = trainer.configure_optimizers()
self.assertEqual(len(optimizers), 4)
train_step_yield_order = [
trainer.q_network,
trainer.reward_network,
trainer.q_network_cpe,
trainer.q_network,
]
for i in range(len(train_step_yield_order)):
opt_param = optimizers[i]["optimizer"].param_groups[0]["params"][0]
loss_param = list(train_step_yield_order[i].parameters())[0]
self.assertTrue(torch.all(torch.isclose(opt_param, loss_param)))
trainer = self._construct_trainer(no_cpe=True)
optimizers = trainer.configure_optimizers()
self.assertEqual(len(optimizers), 2)
def test_get_detached_model_outputs(self):
trainer = self._construct_trainer()
q_out, q_target = trainer.get_detached_model_outputs(self.x)
self.assertEqual(q_out.shape[0], q_target.shape[0], 3)
self.assertEqual(q_out.shape[1], q_target.shape[1], 2)
| 8,027 | 39.14 | 88 | py |
ReAgent | ReAgent-master/reagent/test/training/test_synthetic_reward_training.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import unittest
import pytorch_lightning as pl
import torch
from reagent.core import parameters as rlp
from reagent.core import types as rlt
from reagent.models.synthetic_reward import (
SyntheticRewardNet,
SingleStepSyntheticRewardNet,
TransformerSyntheticRewardNet,
NGramFullyConnectedNetwork,
NGramConvolutionalNetwork,
SequenceSyntheticRewardNet,
)
from reagent.optimizer.union import Optimizer__Union
from reagent.optimizer.union import classes
from reagent.reporting.reward_network_reporter import RewardNetworkReporter
from reagent.training import RewardNetTrainer
from torch.utils.data import DataLoader
logger = logging.getLogger(__name__)
def create_data(state_dim, action_dim, seq_len, batch_size, num_batches):
SCALE = 2
# reward is a linear function of (state, action)
weight = SCALE * torch.randn(state_dim + action_dim)
data = [None for _ in range(num_batches)]
for i in range(num_batches):
state = SCALE * torch.randn(seq_len, batch_size, state_dim)
action = SCALE * torch.randn(seq_len, batch_size, action_dim)
# random valid step
valid_step = torch.randint(1, seq_len + 1, (batch_size, 1))
# reward_matrix shape: batch_size x seq_len
reward_matrix = torch.matmul(
torch.cat((state, action), dim=2), weight
).transpose(0, 1)
mask = torch.arange(seq_len).repeat(batch_size, 1)
mask = (mask >= (seq_len - valid_step)).float()
reward = (reward_matrix * mask).sum(dim=1).reshape(-1, 1)
data[i] = rlt.MemoryNetworkInput(
state=rlt.FeatureData(state),
action=action,
valid_step=valid_step,
reward=reward,
# the rest fields will not be used
next_state=torch.tensor([]),
step=torch.tensor([]),
not_terminal=torch.tensor([]),
time_diff=torch.tensor([]),
)
return weight, data
def create_sequence_data(state_dim, action_dim, seq_len, batch_size, num_batches):
SCALE = 2
weight = SCALE * torch.randn(state_dim + action_dim)
data = [None for _ in range(num_batches)]
for i in range(num_batches):
state = SCALE * torch.randn(seq_len, batch_size, state_dim)
action = SCALE * torch.randn(seq_len, batch_size, action_dim)
# random valid step
valid_step = torch.randint(1, seq_len + 1, (batch_size, 1))
feature_mask = torch.arange(seq_len).repeat(batch_size, 1)
feature_mask = (feature_mask >= (seq_len - valid_step)).float()
assert feature_mask.shape == (batch_size, seq_len), feature_mask.shape
feature_mask = feature_mask.transpose(0, 1).unsqueeze(-1)
assert feature_mask.shape == (seq_len, batch_size, 1), feature_mask.shape
feature = torch.cat((state, action), dim=2)
masked_feature = feature * feature_mask
# seq_len, batch_size, state_dim + action_dim
left_shifted = torch.cat(
(
masked_feature.narrow(0, 1, seq_len - 1),
torch.zeros(1, batch_size, state_dim + action_dim),
),
dim=0,
)
# seq_len, batch_size, state_dim + action_dim
right_shifted = torch.cat(
(
torch.zeros(1, batch_size, state_dim + action_dim),
masked_feature.narrow(0, 0, seq_len - 1),
),
dim=0,
)
# reward_matrix shape: batch_size x seq_len
reward_matrix = torch.matmul(left_shifted + right_shifted, weight).transpose(
0, 1
)
mask = torch.arange(seq_len).repeat(batch_size, 1)
mask = (mask >= (seq_len - valid_step)).float()
reward = (reward_matrix * mask).sum(dim=1).reshape(-1, 1)
data[i] = rlt.MemoryNetworkInput(
state=rlt.FeatureData(state),
action=action,
valid_step=valid_step,
reward=reward,
# the rest fields will not be used
next_state=torch.tensor([]),
step=torch.tensor([]),
not_terminal=torch.tensor([]),
time_diff=torch.tensor([]),
)
return weight, data
def train_and_eval(trainer, data, num_eval_batches=100, max_epochs=1):
train_dataloader = DataLoader(data[:-num_eval_batches], collate_fn=lambda x: x[0])
eval_data = data[-num_eval_batches:]
# disable logging in tests
pl_trainer = pl.Trainer(max_epochs=max_epochs, logger=False)
pl_trainer.fit(trainer, train_dataloader)
total_loss = 0
for i, batch in enumerate(eval_data):
loss = trainer.validation_step(batch, batch_idx=i)
total_loss += loss
return total_loss / num_eval_batches
class TestSyntheticRewardTraining(unittest.TestCase):
def setUp(self):
pl.seed_everything(123)
def test_linear_reward_parametric_reward_success(self):
avg_eval_loss = self._test_linear_reward_parametric_reward(
ground_truth_reward_from_multiple_steps=False
)
threshold = 0.1
assert avg_eval_loss < threshold
def test_linear_reward_parametric_reward_fail(self):
avg_eval_loss = self._test_linear_reward_parametric_reward(
ground_truth_reward_from_multiple_steps=True
)
# fail to learn
threshold = 100.0
assert avg_eval_loss > threshold
def _test_linear_reward_parametric_reward(
self, ground_truth_reward_from_multiple_steps=False
):
"""
Reward at each step is a linear function of present state and action.
However, we can only observe aggregated reward at the last step
This model will fail to learn when ground-truth reward is a function of
multiple steps' states and actions.
"""
state_dim = 10
action_dim = 2
seq_len = 5
batch_size = 512
num_batches = 5000
sizes = [256, 128]
activations = ["relu", "relu"]
last_layer_activation = "linear"
reward_net = SyntheticRewardNet(
SingleStepSyntheticRewardNet(
state_dim=state_dim,
action_dim=action_dim,
sizes=sizes,
activations=activations,
last_layer_activation=last_layer_activation,
)
)
optimizer = Optimizer__Union(Adam=classes["Adam"]())
trainer = RewardNetTrainer(reward_net, optimizer)
trainer.set_reporter(
RewardNetworkReporter(
trainer.loss_type,
str(reward_net),
)
)
if ground_truth_reward_from_multiple_steps:
weight, data = create_sequence_data(
state_dim, action_dim, seq_len, batch_size, num_batches
)
else:
weight, data = create_data(
state_dim, action_dim, seq_len, batch_size, num_batches
)
avg_eval_loss = train_and_eval(trainer, data)
return avg_eval_loss
def test_ngram_fc_parametric_reward(self):
"""
Reward at each step is a linear function of states and actions in a
context window around the step.
However, we can only observe aggregated reward at the last step
"""
state_dim = 10
action_dim = 2
seq_len = 5
batch_size = 512
num_batches = 10000
sizes = [256, 128]
activations = ["relu", "relu"]
last_layer_activation = "linear"
reward_net = SyntheticRewardNet(
NGramFullyConnectedNetwork(
state_dim=state_dim,
action_dim=action_dim,
sizes=sizes,
activations=activations,
last_layer_activation=last_layer_activation,
context_size=3,
)
)
optimizer = Optimizer__Union(Adam=classes["Adam"]())
trainer = RewardNetTrainer(reward_net, optimizer)
trainer.set_reporter(
RewardNetworkReporter(
trainer.loss_type,
str(reward_net),
)
)
weight, data = create_sequence_data(
state_dim, action_dim, seq_len, batch_size, num_batches
)
threshold = 0.2
avg_eval_loss = train_and_eval(trainer, data)
assert avg_eval_loss < threshold
def test_ngram_conv_net_parametric_reward(self):
"""
Reward at each step is a linear function of states and actions in a
context window around the step.
However, we can only observe aggregated reward at the last step
"""
state_dim = 10
action_dim = 2
seq_len = 5
batch_size = 512
num_batches = 5000
sizes = [128, 64]
activations = ["relu", "relu"]
last_layer_activation = "linear"
conv_net_params = rlp.ConvNetParameters(
conv_dims=[128],
conv_height_kernels=[1],
pool_types=["max"],
pool_kernel_sizes=[1],
)
reward_net = SyntheticRewardNet(
NGramConvolutionalNetwork(
state_dim=state_dim,
action_dim=action_dim,
sizes=sizes,
activations=activations,
last_layer_activation=last_layer_activation,
context_size=3,
conv_net_params=conv_net_params,
)
)
optimizer = Optimizer__Union(Adam=classes["Adam"]())
trainer = RewardNetTrainer(reward_net, optimizer)
trainer.set_reporter(
RewardNetworkReporter(
trainer.loss_type,
str(reward_net),
)
)
weight, data = create_sequence_data(
state_dim, action_dim, seq_len, batch_size, num_batches
)
threshold = 0.2
avg_eval_loss = train_and_eval(trainer, data)
assert avg_eval_loss < threshold, "loss = {} larger than threshold {}".format(
avg_eval_loss, threshold
)
def test_lstm_parametric_reward(self):
"""
Reward at each step is a linear function of states and actions in a
context window around the step.
However, we can only observe aggregated reward at the last step
"""
state_dim = 10
action_dim = 2
seq_len = 5
batch_size = 512
num_batches = 5000
last_layer_activation = "linear"
reward_net = SyntheticRewardNet(
SequenceSyntheticRewardNet(
state_dim=state_dim,
action_dim=action_dim,
lstm_hidden_size=128,
lstm_num_layers=2,
lstm_bidirectional=True,
last_layer_activation=last_layer_activation,
)
)
optimizer = Optimizer__Union(Adam=classes["Adam"]())
trainer = RewardNetTrainer(reward_net, optimizer)
trainer.set_reporter(
RewardNetworkReporter(
trainer.loss_type,
str(reward_net),
)
)
weight, data = create_sequence_data(
state_dim, action_dim, seq_len, batch_size, num_batches
)
threshold = 0.2
avg_eval_loss = train_and_eval(trainer, data)
assert avg_eval_loss < threshold
def test_transformer_parametric_reward(self):
"""
Reward at each step is a linear function of states and actions in a
context window around the step.
However, we can only observe aggregated reward at the last step
"""
state_dim = 10
action_dim = 2
seq_len = 5
batch_size = 512
num_batches = 10000
d_model = 64
nhead = 8
num_encoder_layers = 1
dim_feedforward = 64
last_layer_activation = "linear"
max_len = seq_len + 1
reward_net = SyntheticRewardNet(
TransformerSyntheticRewardNet(
state_dim=state_dim,
action_dim=action_dim,
d_model=d_model,
nhead=nhead,
num_encoder_layers=num_encoder_layers,
dim_feedforward=dim_feedforward,
dropout=0.0,
activation="relu",
last_layer_activation=last_layer_activation,
layer_norm_eps=1e-5,
max_len=max_len,
)
)
optimizer = Optimizer__Union(Adam=classes["Adam"]())
trainer = RewardNetTrainer(reward_net, optimizer)
trainer.set_reporter(
RewardNetworkReporter(
trainer.loss_type,
str(reward_net),
)
)
weight, data = create_sequence_data(
state_dim, action_dim, seq_len, batch_size, num_batches
)
threshold = 0.25
avg_eval_loss = train_and_eval(trainer, data)
assert (
avg_eval_loss < threshold
), "loss = {:.4f} larger than threshold {}".format(avg_eval_loss, threshold)
| 13,181 | 33.781003 | 86 | py |
ReAgent | ReAgent-master/reagent/test/training/test_multi_stage_trainer.py | #!/usr/bin/env python3
import unittest
from typing import List
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.optim as optim
from reagent.reporting import ReporterBase, CompoundReporter
from reagent.training import ReAgentLightningModule, MultiStageTrainer
from torch.utils.data import TensorDataset, DataLoader
class DummyReporter(ReporterBase):
def __init__(self, name: str, expected_epochs: List[int]):
super().__init__({}, {})
self.name = name
self.expected_epochs = expected_epochs
self._log_count = 0
self._flush_count = 0
self._testing = False
def log(self, **kwargs) -> None:
self._log_count += 1
def flush(self, epoch: int):
if not self._testing:
assert epoch in self.expected_epochs, f"{epoch} {self.expected_epochs}"
self._flush_count += 1
class DummyTrainer(ReAgentLightningModule):
def __init__(
self,
name: str,
input_dim: int,
expected_epochs: List[int],
validation_keys: List[str],
test_keys: List[str],
):
super().__init__()
self.name = name
self.linear1 = nn.Linear(input_dim, 1)
self.linear2 = nn.Linear(input_dim, 1)
self.loss_fn = nn.BCEWithLogitsLoss()
self._call_count = {
"train": 0,
"validation": 0,
"test": 0,
}
self.expected_epochs = expected_epochs
self.validation_keys = validation_keys
self.test_keys = test_keys
def configure_optimizers(self):
return [
optim.SGD(self.linear1.parameters(), lr=1e2),
optim.SGD(self.linear2.parameters(), lr=1e2),
]
def on_test_start(self):
self.reporter._testing = True
def on_test_end(self):
self.reporter._testing = False
def train_step_gen(self, training_batch, batch_idx: int):
print(f"train_step_gen {self.name}")
assert (
self.current_epoch in self.expected_epochs
), f"{self.current_epoch} {self.expected_epochs}"
self._call_count["train"] += 1
x, label = training_batch
self.reporter.log()
y = self.linear1(x)
yield self.loss_fn(y, label)
y = self.linear2(x)
yield self.loss_fn(y, label)
def validation_step(self, batch, batch_idx: int):
print(f"validation_step {self.name}")
self._call_count["validation"] += 1
assert self.current_epoch in self.expected_epochs
return {k: torch.ones(2, 3) for k in self.validation_keys}
def validation_epoch_end(self, outputs):
print(f"validation_step_end {self.name}")
print(outputs)
for output in outputs:
assert set(output.keys()) == set(self.validation_keys)
def test_step(self, batch, batch_idx: int):
print(f"test_step {self.name}")
self._call_count["test"] += 1
return {k: torch.ones(2, 3) for k in self.test_keys}
def test_epoch_end(self, outputs):
print(f"test_epoch_end {self.name}")
print(outputs)
for output in outputs:
assert set(output.keys()) == set(self.test_keys)
def make_dataset(input_dim, size):
return TensorDataset(
torch.randn(size, input_dim),
torch.randint(0, 2, (size, 1), dtype=torch.float32),
)
def _merge_report(reporters):
pass
class TestMultiStageTrainer(unittest.TestCase):
def test_multi_stage_trainer(self):
input_dim = 5
stage1 = DummyTrainer(
"stage1",
input_dim,
expected_epochs=[0, 1, 2],
validation_keys=["a", "b", "c"],
test_keys=["d", "e"],
)
stage2 = DummyTrainer(
"stage2",
input_dim,
expected_epochs=[3, 4, 5],
validation_keys=["x", "y", "z"],
test_keys=["u", "v"],
)
multi_stage_trainer = MultiStageTrainer(
[stage1, stage2],
epochs=[3, 3],
)
reporters = [
DummyReporter("stage1", expected_epochs=[0, 1, 2]),
DummyReporter("stage2", expected_epochs=[3, 4, 5]),
]
compound_reporter = CompoundReporter(reporters, _merge_report)
multi_stage_trainer.set_reporter(compound_reporter)
training_size = 100
validation_size = 20
train_dataloader = DataLoader(
make_dataset(input_dim, training_size), batch_size=5
)
validation_dataloader = DataLoader(
make_dataset(input_dim, validation_size),
batch_size=5,
)
trainer = pl.Trainer(max_epochs=6, min_epochs=6)
trainer.fit(multi_stage_trainer, train_dataloader, validation_dataloader)
test_size = 20
test_dataloader = DataLoader(
make_dataset(input_dim, test_size),
batch_size=5,
)
trainer.test(test_dataloaders=test_dataloader)
print(f"stage1 {stage1._call_count}")
print(f"stage2 {stage2._call_count}")
self.assertEqual(stage1._call_count["train"], 60)
# It seems that lightning call validation 2 times at the beginning
self.assertEqual(stage1._call_count["validation"], 14)
self.assertEqual(stage1._call_count["test"], 4)
self.assertEqual(stage2._call_count["train"], 60)
self.assertEqual(stage2._call_count["validation"], 12)
self.assertEqual(stage2._call_count["test"], 4)
for reporter, t in zip(reporters, [stage1, stage2]):
print(f"{reporter.name} {reporter._log_count} {reporter._flush_count}")
self.assertEqual(reporter._log_count, t._call_count["train"])
# flush got called in train & validation 3 times each.
# In stage1, there is an additional call to validation at the beginning
self.assertEqual(reporter._flush_count, 8 if t == stage1 else 7)
| 5,966 | 31.606557 | 83 | py |
ReAgent | ReAgent-master/reagent/test/training/test_crr.py | import unittest
import torch
from reagent.core.parameters import EvaluationParameters, RLParameters
from reagent.core.types import FeatureData, DiscreteDqnInput, ExtraData
from reagent.evaluation.evaluator import get_metrics_to_score
from reagent.models.actor import FullyConnectedActor
from reagent.models.dqn import FullyConnectedDQN
from reagent.training.discrete_crr_trainer import DiscreteCRRTrainer
from reagent.training.parameters import CRRTrainerParameters
from reagent.workflow.types import RewardOptions
class TestCRR(unittest.TestCase):
def setUp(self):
# preparing various components for qr-dqn trainer initialization
self.batch_size = 3
self.state_dim = 10
self.action_dim = 2
self.num_layers = 2
self.sizes = [20 for _ in range(self.num_layers)]
self.num_atoms = 11
self.activations = ["relu" for _ in range(self.num_layers)]
self.dropout_ratio = 0
self.exploration_variance = 1e-10
self.actions = [str(i) for i in range(self.action_dim)]
self.params = CRRTrainerParameters(actions=self.actions)
self.reward_options = RewardOptions()
self.metrics_to_score = get_metrics_to_score(
self.reward_options.metric_reward_values
)
self.actor_network = FullyConnectedActor(
state_dim=self.state_dim,
action_dim=self.action_dim,
sizes=self.sizes,
activations=self.activations,
exploration_variance=self.exploration_variance,
)
self.actor_network_target = self.actor_network.get_target_network()
self.q1_network = FullyConnectedDQN(
state_dim=self.state_dim,
action_dim=self.action_dim,
sizes=self.sizes,
activations=self.activations,
dropout_ratio=self.dropout_ratio,
)
self.q1_network_target = self.q1_network.get_target_network()
self.q2_network = FullyConnectedDQN(
state_dim=self.state_dim,
action_dim=self.action_dim,
sizes=self.sizes,
activations=self.activations,
dropout_ratio=self.dropout_ratio,
)
self.q2_network_target = self.q2_network.get_target_network()
self.num_output_nodes = (len(self.metrics_to_score) + 1) * len(
self.params.actions
)
self.eval_parameters = EvaluationParameters(calc_cpe_in_training=True)
self.reward_network = FullyConnectedDQN(
state_dim=self.state_dim,
action_dim=self.num_output_nodes,
sizes=self.sizes,
activations=self.activations,
)
self.q_network_cpe = FullyConnectedDQN(
state_dim=self.state_dim,
action_dim=self.num_output_nodes,
sizes=self.sizes,
activations=self.activations,
)
self.q_network_cpe_target = self.q_network_cpe.get_target_network()
self.inp = DiscreteDqnInput(
state=FeatureData(
float_features=torch.rand(self.batch_size, self.state_dim)
),
next_state=FeatureData(
float_features=torch.rand(self.batch_size, self.state_dim)
),
reward=torch.ones(self.batch_size, 1),
time_diff=torch.ones(self.batch_size, 1) * 2,
step=torch.ones(self.batch_size, 1) * 2,
not_terminal=torch.ones(
self.batch_size, 1
), # todo: check terminal behavior
action=torch.tensor([[0, 1], [1, 0], [0, 1]]),
next_action=torch.tensor([[1, 0], [0, 1], [1, 0]]),
possible_actions_mask=torch.ones(self.batch_size, self.action_dim),
possible_next_actions_mask=torch.ones(self.batch_size, self.action_dim),
extras=ExtraData(action_probability=torch.ones(self.batch_size, 1)),
)
@staticmethod
def dummy_log(*args, **kwargs):
# replaces calls to self.log() which otherwise require the pytorch lighting trainer to be intialized
return None
def _construct_trainer(self, new_params=None, no_cpe=False, no_q2=False):
trainer = DiscreteCRRTrainer(
actor_network=self.actor_network,
actor_network_target=self.actor_network_target,
q1_network=self.q1_network,
q1_network_target=self.q1_network_target,
q2_network=(None if no_q2 else self.q2_network),
q2_network_target=(None if no_q2 else self.q2_network_target),
reward_network=(None if no_cpe else self.reward_network),
q_network_cpe=(None if no_cpe else self.q_network_cpe),
q_network_cpe_target=(None if no_cpe else self.q_network_cpe_target),
metrics_to_score=self.metrics_to_score,
evaluation=EvaluationParameters(
calc_cpe_in_training=(False if no_cpe else True)
),
# pyre-fixme[16]: `QRDQNTrainerParameters` has no attribute `asdict`.
**(new_params if new_params is not None else self.params).asdict()
)
trainer.log = self.dummy_log
return trainer
def test_init(self):
trainer = self._construct_trainer()
self.assertTrue((torch.isclose(trainer.reward_boosts, torch.zeros(2))).all())
param_copy = CRRTrainerParameters(
actions=self.actions,
rl=RLParameters(reward_boost={i: int(i) + 1 for i in self.actions}),
)
reward_boost_trainer = self._construct_trainer(new_params=param_copy)
self.assertTrue(
(
torch.isclose(
reward_boost_trainer.reward_boosts, torch.tensor([1.0, 2.0])
)
).all()
)
def test_train_step_gen(self):
mse_backward_type = type(
torch.nn.functional.mse_loss(
torch.tensor([1.0], requires_grad=True), torch.zeros(1)
).grad_fn
)
add_backward_type = type(
(
torch.tensor([1.0], requires_grad=True)
+ torch.tensor([1.0], requires_grad=True)
).grad_fn
)
# vanilla
trainer = self._construct_trainer()
loss_gen = trainer.train_step_gen(self.inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 6)
self.assertEqual(type(losses[0].grad_fn), mse_backward_type)
self.assertEqual(type(losses[1].grad_fn), mse_backward_type)
self.assertEqual(type(losses[2].grad_fn), add_backward_type)
self.assertEqual(type(losses[3].grad_fn), mse_backward_type)
self.assertEqual(type(losses[4].grad_fn), mse_backward_type)
self.assertEqual(type(losses[5].grad_fn), add_backward_type)
# no CPE
trainer = self._construct_trainer(no_cpe=True)
loss_gen = trainer.train_step_gen(self.inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 4)
# no q2 net
trainer = self._construct_trainer(no_q2=True)
loss_gen = trainer.train_step_gen(self.inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 5)
# use_target_actor
params_copy = CRRTrainerParameters(actions=self.actions, use_target_actor=True)
trainer = self._construct_trainer(new_params=params_copy)
loss_gen = trainer.train_step_gen(self.inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 6)
# delayed policy update
params_copy = CRRTrainerParameters(
actions=self.actions, delayed_policy_update=2
)
trainer = self._construct_trainer(new_params=params_copy)
loss_gen = trainer.train_step_gen(self.inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 6)
self.assertEqual(losses[2], None)
# entropy
params_copy = CRRTrainerParameters(actions=self.actions, entropy_coeff=1.0)
trainer = self._construct_trainer(new_params=params_copy)
loss_gen = trainer.train_step_gen(self.inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 6)
def test_q_network_property(self):
trainer = self._construct_trainer()
self.assertEqual(trainer.q_network, trainer.q1_network)
def test_configure_optimizers(self):
trainer = self._construct_trainer()
optimizers = trainer.configure_optimizers()
self.assertEqual(len(optimizers), 6)
train_step_yield_order = [
trainer.q1_network,
trainer.q2_network,
trainer.actor_network,
trainer.reward_network,
trainer.q_network_cpe,
trainer.q1_network,
]
for i in range(len(train_step_yield_order)):
opt_param = optimizers[i]["optimizer"].param_groups[0]["params"][0]
loss_param = list(train_step_yield_order[i].parameters())[0]
self.assertTrue(torch.all(torch.isclose(opt_param, loss_param)))
trainer = self._construct_trainer(no_cpe=True)
optimizers = trainer.configure_optimizers()
self.assertEqual(len(optimizers), 4)
trainer = self._construct_trainer(no_q2=True)
optimizers = trainer.configure_optimizers()
self.assertEqual(len(optimizers), 5)
def test_get_detached_model_outputs(self):
trainer = self._construct_trainer()
action_scores, _ = trainer.get_detached_model_outputs(
FeatureData(float_features=torch.rand(self.batch_size, self.state_dim))
)
self.assertEqual(action_scores.shape[0], self.batch_size)
self.assertEqual(action_scores.shape[1], self.action_dim)
def test_validation_step(self):
trainer = self._construct_trainer()
edp = trainer.validation_step(self.inp, batch_idx=1)
out = trainer.actor_network(self.inp.state)
# Note: in current code EDP assumes policy induced by q-net instead of actor
self.assertTrue(torch.all(torch.isclose(edp.optimal_q_values, out.action)))
| 10,153 | 41.13278 | 108 | py |
ReAgent | ReAgent-master/reagent/test/training/test_ppo.py | import unittest
from collections import defaultdict
from unittest import mock
import torch
from reagent.core.types import PolicyGradientInput
from reagent.evaluation.evaluator import get_metrics_to_score
from reagent.gym.policies.policy import Policy
from reagent.gym.policies.samplers.discrete_sampler import SoftmaxActionSampler
from reagent.models.dueling_q_network import DuelingQNetwork
from reagent.models.fully_connected_network import FloatFeatureFullyConnected
from reagent.training.parameters import PPOTrainerParameters
from reagent.training.ppo_trainer import PPOTrainer
from reagent.workflow.types import RewardOptions
class TestPPO(unittest.TestCase):
def setUp(self):
# preparing various components for qr-dqn trainer initialization
self.batch_size = 3
self.state_dim = 10
self.action_dim = 2
self.num_layers = 2
self.sizes = [20 for _ in range(self.num_layers)]
self.activations = ["relu" for _ in range(self.num_layers)]
self.use_layer_norm = False
self.softmax_temperature = 1
self.actions = [str(i) for i in range(self.action_dim)]
self.params = PPOTrainerParameters(actions=self.actions, normalize=False)
self.reward_options = RewardOptions()
self.metrics_to_score = get_metrics_to_score(
self.reward_options.metric_reward_values
)
self.policy_network = DuelingQNetwork.make_fully_connected(
state_dim=self.state_dim,
action_dim=self.action_dim,
layers=self.sizes,
activations=self.activations,
)
self.sampler = SoftmaxActionSampler(temperature=self.softmax_temperature)
self.policy = Policy(scorer=self.policy_network, sampler=self.sampler)
self.value_network = FloatFeatureFullyConnected(
state_dim=self.state_dim,
output_dim=1,
sizes=self.sizes,
activations=self.activations,
use_layer_norm=self.use_layer_norm,
)
def _construct_trainer(self, new_params=None, use_value_net=True):
value_network = self.value_network if use_value_net else None
params = new_params if new_params else self.params
trainer = PPOTrainer(
policy=self.policy, value_net=value_network, **params.asdict()
)
trainer.optimizers = mock.Mock(return_value=[0, 0])
return trainer
def test_init(self):
trainer = self._construct_trainer()
self.assertEqual(
type(trainer.value_loss_fn), type(torch.nn.MSELoss(reduction="mean"))
)
with self.assertRaises(AssertionError):
new_params = PPOTrainerParameters(ppo_epsilon=-1)
self._construct_trainer(new_params)
with self.assertRaises(AssertionError):
new_params = PPOTrainerParameters(ppo_epsilon=2)
self._construct_trainer(new_params)
with self.assertRaises(AssertionError):
params = PPOTrainerParameters(actions=["1", "2"], normalize=True)
trainer = self._construct_trainer(new_params=params)
def test__trajectory_to_losses(self):
inp = PolicyGradientInput.input_prototype(
batch_size=self.batch_size,
action_dim=self.action_dim,
state_dim=self.state_dim,
)
# Normalize + offset clamp min
params = PPOTrainerParameters(
actions=["1", "2"], normalize=True, offset_clamp_min=True
)
trainer = self._construct_trainer(new_params=params, use_value_net=False)
losses = trainer._trajectory_to_losses(inp)
self.assertEqual(len(losses), 1)
self.assertTrue("ppo_loss" in losses)
trainer = self._construct_trainer()
losses = trainer._trajectory_to_losses(inp)
self.assertEqual(len(losses), 2)
self.assertTrue("ppo_loss" in losses and "value_net_loss" in losses)
# entropy weight should always lower ppo_loss
trainer.entropy_weight = 1.0
entropy_losses = trainer._trajectory_to_losses(inp)
self.assertTrue(entropy_losses["ppo_loss"] < losses["ppo_loss"])
def test_configure_optimizers(self):
# Ordering is value then policy
trainer = self._construct_trainer()
optimizers = trainer.configure_optimizers()
self.assertTrue(
torch.all(
torch.isclose(
optimizers[0]["optimizer"].param_groups[0]["params"][0],
list(trainer.value_net.fc.dnn[0].parameters())[0],
)
)
)
self.assertTrue(
torch.all(
torch.isclose(
optimizers[1]["optimizer"].param_groups[0]["params"][0],
list(trainer.scorer.shared_network.fc.dnn[0].parameters())[0],
)
)
)
def test_get_optimizers(self):
# ordering covered in test_configure_optimizers
trainer = self._construct_trainer()
optimizers = trainer.get_optimizers()
self.assertIsNotNone(optimizers[0])
trainer = self._construct_trainer(use_value_net=False)
optimizers = trainer.get_optimizers()
self.assertIsNone(optimizers[0])
def test_training_step(self):
trainer = self._construct_trainer()
inp = defaultdict(lambda: torch.ones(1, 5))
trainer.update_model = mock.Mock()
trainer.training_step(inp, batch_idx=1)
trainer.update_model.assert_called_with()
trainer.update_freq = 10
trainer.update_model = mock.Mock()
trainer.training_step(inp, batch_idx=1)
trainer.update_model.assert_not_called()
def test_update_model(self):
trainer = self._construct_trainer()
# can't update empty model
with self.assertRaises(AssertionError):
trainer.update_model()
# _update_model called with permutation of traj_buffer contents update_epoch # times
trainer = self._construct_trainer(
new_params=PPOTrainerParameters(
ppo_batch_size=1,
update_epochs=2,
update_freq=2,
normalize=False,
)
)
trainer.traj_buffer = [1, 2]
trainer._update_model = mock.Mock()
trainer.update_model()
calls = [mock.call([1]), mock.call([2]), mock.call([1]), mock.call([2])]
trainer._update_model.assert_has_calls(calls, any_order=True)
# trainer empties buffer
self.assertEqual(trainer.traj_buffer, [])
# _update_model
trainer = self._construct_trainer()
value_net_opt_mock = mock.Mock()
ppo_opt_mock = mock.Mock()
trainer.get_optimizers = mock.Mock(
return_value=[value_net_opt_mock, ppo_opt_mock]
)
trainer._trajectory_to_losses = mock.Mock(
side_effect=[
{"ppo_loss": torch.tensor(1), "value_net_loss": torch.tensor(2)},
{"ppo_loss": torch.tensor(3), "value_net_loss": torch.tensor(4)},
]
)
trainer.manual_backward = mock.Mock()
inp1 = PolicyGradientInput.input_prototype(
batch_size=1, action_dim=1, state_dim=1
)
inp2 = PolicyGradientInput.input_prototype(
batch_size=1, action_dim=1, state_dim=1
)
trainer._update_model([inp1, inp2])
trainer._trajectory_to_losses.assert_has_calls(
[mock.call(inp1), mock.call(inp2)]
)
value_net_opt_mock.zero_grad.assert_called()
value_net_opt_mock.step.assert_called()
ppo_opt_mock.zero_grad.assert_called()
ppo_opt_mock.step.assert_called()
trainer.manual_backward.assert_has_calls(
[mock.call(torch.tensor(6)), mock.call(torch.tensor(4))]
)
| 7,870 | 37.583333 | 92 | py |
ReAgent | ReAgent-master/reagent/test/world_model/test_seq2reward.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import os
import random
import unittest
from typing import Optional
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
from parameterized import parameterized
from reagent.core import types as rlt
from reagent.core.parameters import (
NormalizationData,
NormalizationParameters,
ProblemDomain,
Seq2RewardTrainerParameters,
)
from reagent.gym.envs import Gym
from reagent.gym.utils import create_df_from_replay_buffer
from reagent.models.seq2reward_model import Seq2RewardNetwork
from reagent.net_builder.value.fully_connected import FullyConnected
from reagent.prediction.predictor_wrapper import (
Seq2RewardWithPreprocessor,
Seq2RewardPlanShortSeqWithPreprocessor,
FAKE_STATE_ID_LIST_FEATURES,
FAKE_STATE_ID_SCORE_LIST_FEATURES,
)
from reagent.preprocessing.identify_types import DO_NOT_PREPROCESS
from reagent.preprocessing.preprocessor import Preprocessor
from reagent.training.utils import gen_permutations
from reagent.training.world_model.compress_model_trainer import CompressModelTrainer
from reagent.training.world_model.seq2reward_trainer import get_Q, Seq2RewardTrainer
from torch.utils.data import DataLoader
logger = logging.getLogger(__name__)
SEED = 0
STRING_GAME_TESTS = [(False,), (True,)]
class FakeStepPredictionNetwork(nn.Module):
def __init__(self, look_ahead_steps):
super().__init__()
self.look_ahead_steps = look_ahead_steps
def forward(self, state: torch.Tensor):
"""
Given the current state, predict the probability of
experiencing next n steps (1 <=n <= look_ahead_steps)
For the test purpose, it outputs fixed fake numbers
"""
batch_size, _ = state.shape
return torch.ones(batch_size, self.look_ahead_steps).float()
class FakeSeq2RewardNetwork(nn.Module):
def forward(
self,
state: rlt.FeatureData,
action: rlt.FeatureData,
valid_reward_len: Optional[torch.Tensor] = None,
):
"""
Mimic I/O of Seq2RewardNetwork but return fake reward
Reward is the concatenation of action indices, independent
of state.
For example, when seq_len = 3, batch_size = 1, action_num = 2,
acc_reward = tensor(
[[ 0.],
[ 1.],
[ 10.],
[ 11.],
[100.],
[101.],
[110.],
[111.]]
)
Input action shape: seq_len, batch_size, num_action
Output acc_reward shape: batch_size, 1
"""
# pyre-fixme[9]: action has type `FeatureData`; used as `Tensor`.
action = action.float_features.transpose(0, 1)
action_indices = torch.argmax(action, dim=2).tolist()
acc_reward = torch.tensor(
list(map(lambda x: float("".join(map(str, x))), action_indices))
).reshape(-1, 1)
logger.info(f"acc_reward: {acc_reward}")
return rlt.Seq2RewardOutput(acc_reward=acc_reward)
def create_string_game_data(
dataset_size=10000, training_data_ratio=0.9, filter_short_sequence=False
):
SEQ_LEN = 6
NUM_ACTION = 2
NUM_MDP_PER_BATCH = 5
env = Gym(env_name="StringGame-v0", set_max_steps=SEQ_LEN)
df = create_df_from_replay_buffer(
env=env,
problem_domain=ProblemDomain.DISCRETE_ACTION,
desired_size=dataset_size,
multi_steps=None,
ds="2020-10-10",
)
if filter_short_sequence:
batch_size = NUM_MDP_PER_BATCH
time_diff = torch.ones(SEQ_LEN, batch_size)
valid_step = SEQ_LEN * torch.ones(batch_size, dtype=torch.int64)[:, None]
not_terminal = torch.Tensor(
[0 if i == SEQ_LEN - 1 else 1 for i in range(SEQ_LEN)]
)
not_terminal = torch.transpose(not_terminal.tile(NUM_MDP_PER_BATCH, 1), 0, 1)
else:
batch_size = NUM_MDP_PER_BATCH * SEQ_LEN
time_diff = torch.ones(SEQ_LEN, batch_size)
valid_step = torch.arange(SEQ_LEN, 0, -1).tile(NUM_MDP_PER_BATCH)[:, None]
not_terminal = torch.transpose(
torch.tril(torch.ones(SEQ_LEN, SEQ_LEN), diagonal=-1).tile(
NUM_MDP_PER_BATCH, 1
),
0,
1,
)
num_batches = int(dataset_size / SEQ_LEN / NUM_MDP_PER_BATCH)
batches = [None for _ in range(num_batches)]
batch_count, batch_seq_count = 0, 0
batch_reward = torch.zeros(SEQ_LEN, batch_size)
batch_action = torch.zeros(SEQ_LEN, batch_size, NUM_ACTION)
batch_state = torch.zeros(SEQ_LEN, batch_size, NUM_ACTION)
for mdp_id in sorted(set(df.mdp_id)):
mdp = df[df["mdp_id"] == mdp_id].sort_values("sequence_number", ascending=True)
if len(mdp) != SEQ_LEN:
continue
all_step_reward = torch.Tensor(list(mdp["reward"]))
all_step_state = torch.Tensor([list(s.values()) for s in mdp["state_features"]])
all_step_action = torch.zeros_like(all_step_state)
all_step_action[torch.arange(SEQ_LEN), [int(a) for a in mdp["action"]]] = 1.0
for j in range(SEQ_LEN):
if filter_short_sequence and j > 0:
break
reward = torch.zeros_like(all_step_reward)
reward[: SEQ_LEN - j] = all_step_reward[-(SEQ_LEN - j) :]
batch_reward[:, batch_seq_count] = reward
state = torch.zeros_like(all_step_state)
state[: SEQ_LEN - j] = all_step_state[-(SEQ_LEN - j) :]
batch_state[:, batch_seq_count] = state
action = torch.zeros_like(all_step_action)
action[: SEQ_LEN - j] = all_step_action[-(SEQ_LEN - j) :]
batch_action[:, batch_seq_count] = action
batch_seq_count += 1
if batch_seq_count == batch_size:
batches[batch_count] = rlt.MemoryNetworkInput(
reward=batch_reward,
action=batch_action,
state=rlt.FeatureData(float_features=batch_state),
next_state=rlt.FeatureData(
float_features=torch.zeros_like(batch_state)
), # fake, not used anyway
not_terminal=not_terminal,
time_diff=time_diff,
valid_step=valid_step,
step=None,
)
batch_count += 1
batch_seq_count = 0
batch_reward = torch.zeros_like(batch_reward)
batch_action = torch.zeros_like(batch_action)
batch_state = torch.zeros_like(batch_state)
assert batch_count == num_batches
num_training_batches = int(training_data_ratio * num_batches)
training_data = DataLoader(
batches[:num_training_batches], collate_fn=lambda x: x[0]
)
eval_data = DataLoader(batches[num_training_batches:], collate_fn=lambda x: x[0])
return training_data, eval_data
def train_seq2reward_model(training_data, learning_rate=0.01, num_epochs=5):
SEQ_LEN, batch_size, NUM_ACTION = next(iter(training_data)).action.shape
assert SEQ_LEN == 6 and NUM_ACTION == 2
seq2reward_network = Seq2RewardNetwork(
state_dim=NUM_ACTION,
action_dim=NUM_ACTION,
num_hiddens=64,
num_hidden_layers=2,
)
trainer_param = Seq2RewardTrainerParameters(
learning_rate=learning_rate,
multi_steps=SEQ_LEN,
action_names=["0", "1"],
gamma=1.0,
view_q_value=True,
)
trainer = Seq2RewardTrainer(
seq2reward_network=seq2reward_network, params=trainer_param
)
pl.seed_everything(SEED)
pl_trainer = pl.Trainer(max_epochs=num_epochs, deterministic=True)
pl_trainer.fit(trainer, training_data)
return trainer
def eval_seq2reward_model(eval_data, seq2reward_trainer):
SEQ_LEN, batch_size, NUM_ACTION = next(iter(eval_data)).action.shape
initial_state = torch.Tensor([[0, 0]])
initial_state_q_values = torch.squeeze(
get_Q(
seq2reward_trainer.seq2reward_network,
initial_state,
seq2reward_trainer.all_permut,
)
)
total_mse_loss = 0
total_q_values = torch.zeros(NUM_ACTION)
total_action_distribution = torch.zeros(NUM_ACTION)
for idx, batch in enumerate(eval_data):
(
mse_loss,
_,
q_values,
action_distribution,
) = seq2reward_trainer.validation_step(batch, idx)
total_mse_loss += mse_loss
total_q_values += torch.tensor(q_values)
total_action_distribution += torch.tensor(action_distribution)
N_eval = len(eval_data)
eval_mse_loss = total_mse_loss / N_eval
eval_q_values = total_q_values / N_eval
eval_action_distribution = total_action_distribution / N_eval
return (
initial_state_q_values,
eval_mse_loss,
eval_q_values,
eval_action_distribution,
)
def train_seq2reward_compress_model(
training_data, seq2reward_network, learning_rate=0.1, num_epochs=5
):
SEQ_LEN, batch_size, NUM_ACTION = next(iter(training_data)).action.shape
assert SEQ_LEN == 6 and NUM_ACTION == 2
compress_net_builder = FullyConnected(sizes=[8, 8])
state_normalization_data = NormalizationData(
dense_normalization_parameters={
0: NormalizationParameters(feature_type=DO_NOT_PREPROCESS),
1: NormalizationParameters(feature_type=DO_NOT_PREPROCESS),
}
)
compress_model_network = compress_net_builder.build_value_network(
state_normalization_data,
output_dim=NUM_ACTION,
)
trainer_param = Seq2RewardTrainerParameters(
learning_rate=0.0,
multi_steps=SEQ_LEN,
action_names=["0", "1"],
compress_model_learning_rate=learning_rate,
gamma=1.0,
view_q_value=True,
)
trainer = CompressModelTrainer(
compress_model_network=compress_model_network,
seq2reward_network=seq2reward_network,
params=trainer_param,
)
pl.seed_everything(SEED)
pl_trainer = pl.Trainer(max_epochs=num_epochs, deterministic=True)
pl_trainer.fit(trainer, training_data)
return trainer
def eval_seq2reward_compress_model(eval_data, compress_model_trainer):
SEQ_LEN, batch_size, NUM_ACTION = next(iter(eval_data)).action.shape
total_mse_loss = 0
total_q_values = torch.zeros(NUM_ACTION)
total_action_distribution = torch.zeros(NUM_ACTION)
for idx, batch in enumerate(eval_data):
(
mse_loss,
q_values,
action_distribution,
_,
) = compress_model_trainer.validation_step(batch, idx)
total_mse_loss += mse_loss
total_q_values += torch.tensor(q_values)
total_action_distribution += torch.tensor(action_distribution)
N_eval = len(eval_data)
eval_mse_loss = total_mse_loss / N_eval
eval_q_values = total_q_values / N_eval
eval_action_distribution = total_action_distribution / N_eval
return eval_mse_loss, eval_q_values, eval_action_distribution
class TestSeq2Reward(unittest.TestCase):
def test_seq2reward_with_preprocessor_plan_short_sequence(self):
self._test_seq2reward_with_preprocessor(plan_short_sequence=True)
def test_seq2reward_with_preprocessor_plan_full_sequence(self):
self._test_seq2reward_with_preprocessor(plan_short_sequence=False)
def _test_seq2reward_with_preprocessor(self, plan_short_sequence):
state_dim = 4
action_dim = 2
seq_len = 3
model = FakeSeq2RewardNetwork()
state_normalization_parameters = {
i: NormalizationParameters(
feature_type=DO_NOT_PREPROCESS, mean=0.0, stddev=1.0
)
for i in range(1, state_dim)
}
state_preprocessor = Preprocessor(state_normalization_parameters, False)
if plan_short_sequence:
step_prediction_model = FakeStepPredictionNetwork(seq_len)
model_with_preprocessor = Seq2RewardPlanShortSeqWithPreprocessor(
model,
step_prediction_model,
state_preprocessor,
seq_len,
action_dim,
)
else:
model_with_preprocessor = Seq2RewardWithPreprocessor(
model,
state_preprocessor,
seq_len,
action_dim,
)
input_prototype = rlt.ServingFeatureData(
float_features_with_presence=state_preprocessor.input_prototype(),
id_list_features=FAKE_STATE_ID_LIST_FEATURES,
id_score_list_features=FAKE_STATE_ID_SCORE_LIST_FEATURES,
)
q_values = model_with_preprocessor(input_prototype)
if plan_short_sequence:
# When planning for 1, 2, and 3 steps ahead,
# the expected q values are respectively:
# [0, 1], [1, 11], [11, 111]
# Weighting the expected q values by predicted step
# probabilities [0.33, 0.33, 0.33], we have [4, 41]
expected_q_values = torch.tensor([[4.0, 41.0]])
else:
expected_q_values = torch.tensor([[11.0, 111.0]])
assert torch.all(expected_q_values == q_values)
def test_get_Q(self):
NUM_ACTION = 2
MULTI_STEPS = 3
BATCH_SIZE = 2
STATE_DIM = 4
all_permut = gen_permutations(MULTI_STEPS, NUM_ACTION)
seq2reward_network = FakeSeq2RewardNetwork()
state = torch.zeros(BATCH_SIZE, STATE_DIM)
q_values = get_Q(seq2reward_network, state, all_permut)
expected_q_values = torch.tensor([[11.0, 111.0], [11.0, 111.0]])
logger.info(f"q_values: {q_values}")
assert torch.all(expected_q_values == q_values)
def test_gen_permutations_seq_len_1_action_6(self):
SEQ_LEN = 1
NUM_ACTION = 6
expected_outcome = torch.tensor([[0], [1], [2], [3], [4], [5]])
self._test_gen_permutations(SEQ_LEN, NUM_ACTION, expected_outcome)
def test_gen_permutations_seq_len_3_num_action_2(self):
SEQ_LEN = 3
NUM_ACTION = 2
expected_outcome = torch.tensor(
[
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
]
)
self._test_gen_permutations(SEQ_LEN, NUM_ACTION, expected_outcome)
def _test_gen_permutations(self, SEQ_LEN, NUM_ACTION, expected_outcome):
# expected shape: SEQ_LEN, PERM_NUM, ACTION_DIM
result = gen_permutations(SEQ_LEN, NUM_ACTION)
assert result.shape == (SEQ_LEN, NUM_ACTION ** SEQ_LEN, NUM_ACTION)
outcome = torch.argmax(result.transpose(0, 1), dim=-1)
assert torch.all(outcome == expected_outcome)
@parameterized.expand(STRING_GAME_TESTS)
@unittest.skipIf("SANDCASTLE" in os.environ, "Skipping long test on sandcastle.")
def test_seq2reward_on_string_game_v0(self, filter_short_sequence):
np.random.seed(SEED)
random.seed(SEED)
torch.manual_seed(SEED)
training_data, eval_data = create_string_game_data(
filter_short_sequence=filter_short_sequence
)
seq2reward_trainer = train_seq2reward_model(training_data)
(
initial_state_q_values,
eval_mse_loss,
eval_q_values,
eval_action_distribution,
) = eval_seq2reward_model(eval_data, seq2reward_trainer)
assert abs(initial_state_q_values[0].item() - 10) < 1.0
assert abs(initial_state_q_values[1].item() - 5) < 1.0
if filter_short_sequence:
assert eval_mse_loss < 0.1
else:
# Same short sequences may have different total rewards due to the missing
# states and actions in previous steps, so the trained network is not able
# to reduce the mse loss to values close to zero.
assert eval_mse_loss < 10
compress_model_trainer = train_seq2reward_compress_model(
training_data, seq2reward_trainer.seq2reward_network
)
(
compress_eval_mse_loss,
compress_eval_q_values,
compress_eval_action_distribution,
) = eval_seq2reward_compress_model(eval_data, compress_model_trainer)
assert compress_eval_mse_loss < 1e-5
assert torch.all(eval_q_values - compress_eval_q_values < 1e-5)
assert torch.all(
eval_action_distribution - compress_eval_action_distribution < 1e-5
)
| 16,680 | 34.719486 | 88 | py |
ReAgent | ReAgent-master/reagent/test/world_model/test_mdnrnn.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import unittest
import numpy as np
import torch
from reagent.core.parameters import MDNRNNTrainerParameters
from reagent.models.mdn_rnn import MDNRNNMemoryPool, gmm_loss
from reagent.models.world_model import MemoryNetwork
from reagent.reporting.world_model_reporter import WorldModelReporter
from reagent.test.world_model.simulated_world_model import SimulatedWorldModel
from reagent.training.world_model.mdnrnn_trainer import MDNRNNTrainer
from torch.distributions.categorical import Categorical
from torch.distributions.normal import Normal
logger = logging.getLogger(__name__)
class TestMDNRNN(unittest.TestCase):
def test_gmm_loss(self):
# seq_len x batch_size x gaussian_size x feature_size
# 1 x 1 x 2 x 2
mus = torch.Tensor([[[[0.0, 0.0], [6.0, 6.0]]]])
sigmas = torch.Tensor([[[[2.0, 2.0], [2.0, 2.0]]]])
# seq_len x batch_size x gaussian_size
pi = torch.Tensor([[[0.5, 0.5]]])
logpi = torch.log(pi)
# seq_len x batch_size x feature_size
batch = torch.Tensor([[[3.0, 3.0]]])
gl = gmm_loss(batch, mus, sigmas, logpi)
# first component, first dimension
n11 = Normal(mus[0, 0, 0, 0], sigmas[0, 0, 0, 0])
# first component, second dimension
n12 = Normal(mus[0, 0, 0, 1], sigmas[0, 0, 0, 1])
p1 = (
pi[0, 0, 0]
* torch.exp(n11.log_prob(batch[0, 0, 0]))
* torch.exp(n12.log_prob(batch[0, 0, 1]))
)
# second component, first dimension
n21 = Normal(mus[0, 0, 1, 0], sigmas[0, 0, 1, 0])
# second component, second dimension
n22 = Normal(mus[0, 0, 1, 1], sigmas[0, 0, 1, 1])
p2 = (
pi[0, 0, 1]
* torch.exp(n21.log_prob(batch[0, 0, 0]))
* torch.exp(n22.log_prob(batch[0, 0, 1]))
)
logger.info(
"gmm loss={}, p1={}, p2={}, p1+p2={}, -log(p1+p2)={}".format(
gl, p1, p2, p1 + p2, -(torch.log(p1 + p2))
)
)
assert -(torch.log(p1 + p2)) == gl
def test_mdnrnn_simulate_world_cpu(self):
self._test_mdnrnn_simulate_world()
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_mdnrnn_simulate_world_gpu(self):
self._test_mdnrnn_simulate_world(use_gpu=True)
def _test_mdnrnn_simulate_world(self, use_gpu=False):
num_epochs = 300
num_episodes = 400
batch_size = 200
action_dim = 2
seq_len = 5
state_dim = 2
simulated_num_gaussians = 2
mdrnn_num_gaussians = 2
simulated_num_hidden_layers = 1
simulated_num_hiddens = 3
mdnrnn_num_hidden_layers = 1
mdnrnn_num_hiddens = 10
adam_lr = 0.01
replay_buffer = MDNRNNMemoryPool(max_replay_memory_size=num_episodes)
swm = SimulatedWorldModel(
action_dim=action_dim,
state_dim=state_dim,
num_gaussians=simulated_num_gaussians,
lstm_num_hidden_layers=simulated_num_hidden_layers,
lstm_num_hiddens=simulated_num_hiddens,
)
possible_actions = torch.eye(action_dim)
for _ in range(num_episodes):
cur_state_mem = torch.zeros((seq_len, state_dim))
next_state_mem = torch.zeros((seq_len, state_dim))
action_mem = torch.zeros((seq_len, action_dim))
reward_mem = torch.zeros(seq_len)
not_terminal_mem = torch.zeros(seq_len)
next_mus_mem = torch.zeros((seq_len, simulated_num_gaussians, state_dim))
swm.init_hidden(batch_size=1)
next_state = torch.randn((1, 1, state_dim))
for s in range(seq_len):
cur_state = next_state
action = possible_actions[np.random.randint(action_dim)].view(
1, 1, action_dim
)
next_mus, reward = swm(action, cur_state)
not_terminal = 1
if s == seq_len - 1:
not_terminal = 0
# randomly draw for next state
next_pi = torch.ones(simulated_num_gaussians) / simulated_num_gaussians
index = Categorical(next_pi).sample((1,)).long().item()
next_state = next_mus[0, 0, index].view(1, 1, state_dim)
cur_state_mem[s] = cur_state.detach()
action_mem[s] = action
reward_mem[s] = reward.detach()
not_terminal_mem[s] = not_terminal
next_state_mem[s] = next_state.detach()
next_mus_mem[s] = next_mus.detach()
replay_buffer.insert_into_memory(
cur_state_mem, action_mem, next_state_mem, reward_mem, not_terminal_mem
)
num_batch = num_episodes // batch_size
mdnrnn_params = MDNRNNTrainerParameters(
hidden_size=mdnrnn_num_hiddens,
num_hidden_layers=mdnrnn_num_hidden_layers,
learning_rate=adam_lr,
num_gaussians=mdrnn_num_gaussians,
)
mdnrnn_net = MemoryNetwork(
state_dim=state_dim,
action_dim=action_dim,
num_hiddens=mdnrnn_params.hidden_size,
num_hidden_layers=mdnrnn_params.num_hidden_layers,
num_gaussians=mdnrnn_params.num_gaussians,
)
if use_gpu:
mdnrnn_net = mdnrnn_net.cuda()
trainer = MDNRNNTrainer(
memory_network=mdnrnn_net, params=mdnrnn_params, cum_loss_hist=num_batch
)
reporter = WorldModelReporter(report_interval=1)
trainer.set_reporter(reporter)
optimizer = trainer.configure_optimizers()[0]
for e in range(num_epochs):
for i in range(num_batch):
training_batch = replay_buffer.sample_memories(
batch_size, use_gpu=use_gpu
)
optimizer.zero_grad()
loss = next(trainer.train_step_gen(training_batch, i))
loss.backward()
optimizer.step()
logger.info(
"{}-th epoch, {}-th minibatch: \n"
"loss={}, bce={}, gmm={}, mse={} \n"
"cum loss={}, cum bce={}, cum gmm={}, cum mse={}\n".format(
e,
i,
reporter.loss.values[-1],
reporter.bce.values[-1],
reporter.gmm.values[-1],
reporter.mse.values[-1],
np.mean(reporter.loss.values[-100:]),
np.mean(reporter.bce.values[-100:]),
np.mean(reporter.gmm.values[-100:]),
np.mean(reporter.mse.values[-100:]),
)
)
if (
np.mean(reporter.loss.values[-100:]) < 0
and np.mean(reporter.gmm.values[-100:]) < -3.0
and np.mean(reporter.bce.values[-100:]) < 0.6
and np.mean(reporter.mse.values[-100:]) < 0.2
):
return
raise RuntimeError("losses not reduced significantly during training")
| 7,353 | 37.705263 | 87 | py |
ReAgent | ReAgent-master/reagent/test/world_model/simulated_world_model.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import torch
import torch.nn as nn
class SimulatedWorldModel(nn.Module):
"""
A world model used for simulation. Underlying is an RNN with fixed
parameters. Given a sequence of actions and states, it outputs the next
state's mixture means and reward.
"""
def __init__(
self,
action_dim,
state_dim,
num_gaussians,
lstm_num_hidden_layers,
lstm_num_hiddens,
):
super().__init__()
self.action_dim = action_dim
self.state_dim = state_dim
self.num_gaussians = num_gaussians
self.lstm_num_hidden_layers = lstm_num_hidden_layers
self.lstm_num_hiddens = lstm_num_hiddens
self.init_lstm()
self.init_weight()
self.init_hidden()
self.eval()
def init_lstm(self):
self.lstm = nn.LSTM(
input_size=self.action_dim + self.state_dim,
hidden_size=self.lstm_num_hiddens,
num_layers=self.lstm_num_hidden_layers,
)
# output mu for each guassian, and reward
self.gmm_linear = nn.Linear(
self.lstm_num_hiddens, self.state_dim * self.num_gaussians + 1
)
def init_hidden(self, batch_size=1):
# (num_layers * num_directions, batch, hidden_size)
self.hidden = (
torch.zeros(self.lstm_num_hidden_layers, batch_size, self.lstm_num_hiddens),
torch.zeros(self.lstm_num_hidden_layers, batch_size, self.lstm_num_hiddens),
)
def init_weight(self):
torch.manual_seed(3212)
for _, p in self.lstm.named_parameters():
nn.init.normal_(p, 0, 1)
for _, p in self.gmm_linear.named_parameters():
nn.init.normal_(p, 0, 1)
def forward(self, actions, cur_states):
# actions: (SEQ_LEN, BATCH_SIZE, ACTION_SIZE)
# cur_states: (SEQ_LEN, BATCH_SIZE, FEATURE_SIZE)
seq_len, batch_size = actions.size(0), actions.size(1)
X = torch.cat([actions, cur_states], dim=-1)
# X_shape: (1, act_seq_len, lstm_input_dim)
Y, self.hidden = self.lstm(X, self.hidden)
gmm_outs = self.gmm_linear(Y)
mus = gmm_outs[:, :, :-1]
mus = mus.view(seq_len, batch_size, self.num_gaussians, self.state_dim)
rewards = gmm_outs[:, :, -1]
return mus, rewards
| 2,425 | 32.232877 | 88 | py |
ReAgent | ReAgent-master/reagent/test/models/test_bcq.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import unittest
import numpy.testing as npt
import torch
import torch.nn.init as init
from reagent.core import types as rlt
from reagent.models.bcq import BatchConstrainedDQN
from reagent.models.dqn import FullyConnectedDQN
from reagent.models.fully_connected_network import FullyConnectedNetwork
from reagent.test.models.test_utils import check_save_load
logger = logging.getLogger(__name__)
class TestBCQ(unittest.TestCase):
def test_basic(self):
state_dim = 8
action_dim = 4
q_network = FullyConnectedDQN(
state_dim, action_dim, sizes=[8, 4], activations=["relu", "relu"]
)
imitator_network = FullyConnectedNetwork(
layers=[state_dim, 8, 4, action_dim], activations=["relu", "relu", "linear"]
)
model = BatchConstrainedDQN(
state_dim=state_dim,
q_network=q_network,
imitator_network=imitator_network,
bcq_drop_threshold=0.05,
)
input = model.input_prototype()
self.assertEqual((1, state_dim), input.float_features.shape)
q_values = model(input)
self.assertEqual((1, action_dim), q_values.shape)
def test_save_load(self):
state_dim = 8
action_dim = 4
q_network = FullyConnectedDQN(
state_dim, action_dim, sizes=[8, 4], activations=["relu", "relu"]
)
imitator_network = FullyConnectedNetwork(
layers=[state_dim, 8, 4, action_dim], activations=["relu", "relu", "linear"]
)
model = BatchConstrainedDQN(
state_dim=state_dim,
q_network=q_network,
imitator_network=imitator_network,
bcq_drop_threshold=0.05,
)
# 6 for DQN + 6 for Imitator Network + 2 for BCQ constants
expected_num_params, expected_num_inputs, expected_num_outputs = 14, 1, 1
check_save_load(
self, model, expected_num_params, expected_num_inputs, expected_num_outputs
)
def test_forward_pass(self):
torch.manual_seed(123)
state_dim = 1
action_dim = 2
state = rlt.FeatureData(torch.tensor([[2.0]]))
bcq_drop_threshold = 0.20
q_network = FullyConnectedDQN(
state_dim, action_dim, sizes=[2], activations=["relu"]
)
init.constant_(q_network.fc.dnn[-2].bias, 3.0)
imitator_network = FullyConnectedNetwork(
layers=[state_dim, 2, action_dim], activations=["relu", "linear"]
)
imitator_probs = torch.nn.functional.softmax(
imitator_network(state.float_features), dim=1
)
bcq_mask = imitator_probs < bcq_drop_threshold
npt.assert_array_equal(bcq_mask.detach(), [[True, False]])
model = BatchConstrainedDQN(
state_dim=state_dim,
q_network=q_network,
imitator_network=imitator_network,
bcq_drop_threshold=bcq_drop_threshold,
)
final_q_values = model(state)
npt.assert_array_equal(final_q_values.detach(), [[-1e10, 3.0]])
| 3,184 | 33.619565 | 88 | py |
ReAgent | ReAgent-master/reagent/test/models/test_synthetic_reward_net.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import unittest
import torch
from reagent.core import parameters as rlp
from reagent.models.synthetic_reward import (
SingleStepSyntheticRewardNet,
SequenceSyntheticRewardNet,
TransformerSyntheticRewardNet,
NGramFullyConnectedNetwork,
NGramConvolutionalNetwork,
SyntheticRewardNet,
_gen_mask,
)
logger = logging.getLogger(__name__)
class TestSyntheticReward(unittest.TestCase):
def test_single_step_synthetic_reward(self):
state_dim = 10
action_dim = 2
sizes = [256, 128]
activations = ["sigmoid", "relu"]
last_layer_activation = "leaky_relu"
reward_net = SyntheticRewardNet(
SingleStepSyntheticRewardNet(
state_dim=state_dim,
action_dim=action_dim,
sizes=sizes,
activations=activations,
last_layer_activation=last_layer_activation,
)
)
dnn = reward_net.export_mlp().dnn
# dnn[0] is a concat layer
assert dnn[1].in_features == state_dim + action_dim
assert dnn[1].out_features == 256
assert dnn[2]._get_name() == "Sigmoid"
assert dnn[3].in_features == 256
assert dnn[3].out_features == 128
assert dnn[4]._get_name() == "ReLU"
assert dnn[5].in_features == 128
assert dnn[5].out_features == 1
assert dnn[6]._get_name() == "LeakyReLU"
valid_step = torch.tensor([[1], [2], [3]])
batch_size = 3
seq_len = 4
mask = _gen_mask(valid_step, batch_size, seq_len)
assert torch.all(
mask
== torch.tensor(
[[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 1.0, 1.0, 1.0]]
)
)
def test_ngram_fc_synthetic_reward(self):
state_dim = 10
action_dim = 2
sizes = [256, 128]
activations = ["sigmoid", "relu"]
last_layer_activation = "leaky_relu"
context_size = 3
net = NGramFullyConnectedNetwork(
state_dim=state_dim,
action_dim=action_dim,
sizes=sizes,
activations=activations,
last_layer_activation=last_layer_activation,
context_size=context_size,
)
reward_net = SyntheticRewardNet(net)
dnn = reward_net.export_mlp().fc.dnn
assert dnn[0].in_features == (state_dim + action_dim) * context_size
assert dnn[0].out_features == 256
assert dnn[1]._get_name() == "Sigmoid"
assert dnn[2].in_features == 256
assert dnn[2].out_features == 128
assert dnn[3]._get_name() == "ReLU"
assert dnn[4].in_features == 128
assert dnn[4].out_features == 1
assert dnn[5]._get_name() == "LeakyReLU"
valid_step = torch.tensor([[1], [2], [3]])
batch_size = 3
seq_len = 4
mask = _gen_mask(valid_step, batch_size, seq_len)
assert torch.all(
mask
== torch.tensor(
[[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 1.0, 1.0, 1.0]]
)
)
def test_ngram_conv_net_synthetic_reward(self):
state_dim = 10
action_dim = 2
sizes = [256, 128]
activations = ["sigmoid", "relu"]
last_layer_activation = "leaky_relu"
context_size = 3
conv_net_params = rlp.ConvNetParameters(
conv_dims=[256, 128],
conv_height_kernels=[1, 1],
pool_types=["max", "max"],
pool_kernel_sizes=[1, 1],
)
net = NGramConvolutionalNetwork(
state_dim=state_dim,
action_dim=action_dim,
sizes=sizes,
activations=activations,
last_layer_activation=last_layer_activation,
context_size=context_size,
conv_net_params=conv_net_params,
)
reward_net = SyntheticRewardNet(net)
conv_net = reward_net.export_mlp().conv_net
assert conv_net.conv_dims == [1, 256, 128]
assert conv_net.conv_height_kernels == [1, 1]
assert conv_net.conv_width_kernels == [12, 1]
assert conv_net.conv_layers[0].in_channels == 1
assert conv_net.conv_layers[0].out_channels == 256
assert conv_net.conv_layers[0].kernel_size == (1, 12)
assert conv_net.conv_layers[0].stride == (1, 1)
assert conv_net.conv_layers[1].in_channels == 256
assert conv_net.conv_layers[1].out_channels == 128
assert conv_net.conv_layers[1].kernel_size == (1, 1)
assert conv_net.conv_layers[1].stride == (1, 1)
dnn = reward_net.export_mlp().conv_net.feed_forward.dnn
assert dnn[0].in_features == 384
assert dnn[0].out_features == 256
assert dnn[1]._get_name() == "Sigmoid"
assert dnn[2].in_features == 256
assert dnn[2].out_features == 128
assert dnn[3]._get_name() == "ReLU"
assert dnn[4].in_features == 128
assert dnn[4].out_features == 1
assert dnn[5]._get_name() == "LeakyReLU"
def test_lstm_synthetic_reward(self):
state_dim = 10
action_dim = 2
last_layer_activation = "leaky_relu"
net = SequenceSyntheticRewardNet(
state_dim=state_dim,
action_dim=action_dim,
lstm_hidden_size=128,
lstm_num_layers=2,
lstm_bidirectional=True,
last_layer_activation=last_layer_activation,
)
reward_net = SyntheticRewardNet(net)
lstm = reward_net.export_mlp().lstm
assert lstm.bidirectional
assert lstm.input_size == 12
assert lstm.hidden_size == 128
assert lstm.num_layers == 2
dnn = reward_net.export_mlp().fc_out
assert dnn.in_features == 128 * 2
assert dnn.out_features == 1
output_activation = reward_net.export_mlp().output_activation
assert output_activation._get_name() == "LeakyReLU"
def test_transformer_synthetic_reward(self):
state_dim = 10
action_dim = 2
d_model = 64
nhead = 8
num_encoder_layers = 2
dim_feedforward = 64
dropout = 0.0
activation = "relu"
last_layer_activation = "leaky_relu"
layer_norm_eps = 1e-5
max_len = 10
net = TransformerSyntheticRewardNet(
state_dim=state_dim,
action_dim=action_dim,
d_model=d_model,
nhead=nhead,
num_encoder_layers=num_encoder_layers,
dim_feedforward=dim_feedforward,
dropout=dropout,
activation=activation,
last_layer_activation=last_layer_activation,
layer_norm_eps=layer_norm_eps,
max_len=max_len,
)
reward_net = SyntheticRewardNet(net)
export_net = reward_net.export_mlp()
transformer = export_net.transformer
assert export_net.state_dim == state_dim
assert export_net.action_dim == action_dim
assert export_net.d_model == d_model
assert export_net.nhead == nhead
assert export_net.dim_feedforward == dim_feedforward
assert export_net.dropout == dropout
assert export_net.activation == activation
assert export_net.layer_norm_eps == layer_norm_eps
assert transformer.num_layers == num_encoder_layers
dnn_out = export_net.fc_out
assert dnn_out.in_features == d_model
assert dnn_out.out_features == 1
output_activation = export_net.output_activation
assert output_activation._get_name() == "LeakyReLU"
| 7,718 | 33.306667 | 82 | py |
ReAgent | ReAgent-master/reagent/test/models/test_actor.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import unittest
import numpy.testing as npt
import torch
from reagent.models.actor import (
DirichletFullyConnectedActor,
FullyConnectedActor,
GaussianFullyConnectedActor,
)
from reagent.test.models.test_utils import check_save_load
logger = logging.getLogger(__name__)
class TestFullyConnectedActor(unittest.TestCase):
def test_basic(self):
state_dim = 8
action_dim = 4
model = FullyConnectedActor(
state_dim,
action_dim,
sizes=[7, 6],
activations=["relu", "relu"],
use_batch_norm=True,
)
input = model.input_prototype()
self.assertEqual((1, state_dim), input.float_features.shape)
# Using batch norm requires more than 1 example in training, avoid that
model.eval()
action = model(input)
self.assertEqual((1, action_dim), action.action.shape)
def test_save_load(self):
state_dim = 8
action_dim = 4
model = FullyConnectedActor(
state_dim,
action_dim,
sizes=[7, 6],
activations=["relu", "relu"],
use_batch_norm=False,
)
expected_num_params, expected_num_inputs, expected_num_outputs = 6, 1, 1
check_save_load(
self, model, expected_num_params, expected_num_inputs, expected_num_outputs
)
def test_save_load_batch_norm(self):
state_dim = 8
action_dim = 4
model = FullyConnectedActor(
state_dim,
action_dim,
sizes=[7, 6],
activations=["relu", "relu"],
use_batch_norm=True,
)
# Freezing batch_norm
model.eval()
expected_num_params, expected_num_inputs, expected_num_outputs = 21, 1, 1
check_save_load(
self, model, expected_num_params, expected_num_inputs, expected_num_outputs
)
class TestGaussianFullyConnectedActor(unittest.TestCase):
def test_basic(self):
state_dim = 8
action_dim = 4
model = GaussianFullyConnectedActor(
state_dim,
action_dim,
sizes=[7, 6],
activations=["relu", "relu"],
use_batch_norm=True,
)
input = model.input_prototype()
self.assertEqual((1, state_dim), input.float_features.shape)
# Using batch norm requires more than 1 example in training, avoid that
model.eval()
action = model(input)
self.assertEqual((1, action_dim), action.action.shape)
def test_save_load(self):
state_dim = 8
action_dim = 4
model = GaussianFullyConnectedActor(
state_dim,
action_dim,
sizes=[7, 6],
activations=["relu", "relu"],
use_batch_norm=False,
)
expected_num_params, expected_num_inputs, expected_num_outputs = 6, 1, 1
# Actor output is stochastic and won't match between PyTorch & Caffe2
check_save_load(
self,
model,
expected_num_params,
expected_num_inputs,
expected_num_outputs,
check_equality=False,
)
def test_get_log_prob(self):
torch.manual_seed(0)
state_dim = 8
action_dim = 4
model = GaussianFullyConnectedActor(
state_dim,
action_dim,
sizes=[7, 6],
activations=["relu", "relu"],
use_batch_norm=False,
)
input = model.input_prototype()
self.assertEqual((1, state_dim), input.float_features.shape)
action = model(input)
squashed_action = action.action.detach()
action_log_prob = model.get_log_prob(input, squashed_action).detach()
npt.assert_allclose(action.log_prob.detach(), action_log_prob, rtol=1e-4)
class TestDirichletFullyConnectedActor(unittest.TestCase):
def test_basic(self):
state_dim = 8
action_dim = 4
model = DirichletFullyConnectedActor(
state_dim,
action_dim,
sizes=[7, 6],
activations=["relu", "relu"],
use_batch_norm=True,
)
input = model.input_prototype()
self.assertEqual((1, state_dim), input.float_features.shape)
# Using batch norm requires more than 1 example in training, avoid that
model.eval()
action = model(input)
self.assertEqual((1, action_dim), action.action.shape)
def test_save_load(self):
state_dim = 8
action_dim = 4
model = DirichletFullyConnectedActor(
state_dim,
action_dim,
sizes=[7, 6],
activations=["relu", "relu"],
use_batch_norm=False,
)
expected_num_params, expected_num_inputs, expected_num_outputs = 7, 1, 1
check_save_load(
self,
model,
expected_num_params,
expected_num_inputs,
expected_num_outputs,
check_equality=False,
)
| 5,180 | 30.210843 | 87 | py |
ReAgent | ReAgent-master/reagent/test/models/test_no_soft_update_embedding.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import copy
import unittest
import numpy.testing as npt
import torch
import torch.nn as nn
from reagent.models.no_soft_update_embedding import NoSoftUpdateEmbedding
class Model(nn.Module):
def __init__(self):
super().__init__()
self.embedding = NoSoftUpdateEmbedding(10, 3)
def forward(self, input):
return self.embedding(input)
class TestNoSoftUpdteEmbedding(unittest.TestCase):
def test_no_soft_update(self):
model = Model()
target_model = copy.deepcopy(model)
for target_param, param in zip(model.parameters(), target_model.parameters()):
self.assertIs(target_param, param)
optimizer = torch.optim.Adam(model.parameters())
x = torch.tensor([1, 2], dtype=torch.int64)
emb = model(x)
loss = emb.sum()
loss.backward()
optimizer.step()
params = list(model.parameters())
self.assertEqual(1, len(params))
param = params[0].detach().numpy()
self._soft_update(model, target_model, 0.1)
target_params = list(target_model.parameters())
self.assertEqual(1, len(target_params))
target_param = target_params[0].detach().numpy()
npt.assert_array_equal(target_param, param)
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def _soft_update(self, network, target_network, tau) -> None:
"""Target network update logic as defined in DDPG paper
updated_params = tau * network_params + (1 - tau) * target_network_params
:param network network with parameters to include in soft update
:param target_network target network with params to soft update
:param tau hyperparameter to control target tracking speed
"""
for t_param, param in zip(target_network.parameters(), network.parameters()):
if t_param is param:
# Skip soft-updating when the target network shares the parameter with
# the network being train.
continue
new_param = tau * param.data + (1.0 - tau) * t_param.data
t_param.data.copy_(new_param)
| 2,326 | 32.724638 | 86 | py |
ReAgent | ReAgent-master/reagent/test/models/test_base.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import dataclasses
import logging
import unittest
from typing import Any
import torch
import torch.nn as nn
from reagent.core import types as rlt
from reagent.models.base import ModelBase
from reagent.test.models.test_utils import check_save_load
logger = logging.getLogger(__name__)
@dataclasses.dataclass
class ModelOutput:
# These should be torch.Tensor but the type checking failed when I used it
sum: Any
mul: Any
plus_one: Any
linear: Any
class Model(ModelBase):
def __init__(self):
super().__init__()
self.linear = nn.Linear(4, 1)
def input_prototype(self):
return (
rlt.FeatureData(torch.randn([1, 4])),
rlt.FeatureData(torch.randn([1, 4])),
)
def forward(self, state, action):
state = state.float_features
action = action.float_features
return ModelOutput(
state + action, state * action, state + 1, self.linear(state)
)
class TestBase(unittest.TestCase):
def test_get_predictor_export_meta_and_workspace(self):
model = Model()
# 2 params + 1 const
expected_num_params, expected_num_inputs, expected_num_outputs = 3, 2, 4
check_save_load(
self, model, expected_num_params, expected_num_inputs, expected_num_outputs
)
| 1,423 | 23.982456 | 87 | py |
ReAgent | ReAgent-master/reagent/test/models/test_utils.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import numpy.testing as npt
import torch
logger = logging.getLogger(__name__)
def check_save_load(
self,
model,
expected_num_params,
expected_num_inputs,
expected_num_outputs,
check_equality=True,
):
# TODO: remove the expected_num* from call sites
# TODO: revive this test or kill it
# input_prototype = model.input_prototype()
# traced_model = torch.jit.trace(model, input_prototype)
# if check_equality:
# x = model(*input_prototype)
# y = traced_model(*input_prototype)
# self.assertEqual(x, y)
pass
| 695 | 20.090909 | 71 | py |
ReAgent | ReAgent-master/reagent/test/samplers/test_frechet_sort.py | #!/usr/bin/env python3
import torch
from reagent.samplers.frechet import FrechetSort
from reagent.test.base.horizon_test_base import HorizonTestBase
class FrechetSortTest(HorizonTestBase):
def test_log_prob(self):
scores = torch.tensor(
[
[1.0, 2.0, 3.0, 4.0, 5.0],
[5.0, 1.0, 2.0, 3.0, 4.0],
]
)
shape = 2.0
frechet_sort = FrechetSort(topk=3, shape=shape, log_scores=True)
# The log-prob should be the same; the last 2 positions don't matter
action = torch.tensor(
[
[0, 1, 2, 3, 4],
[1, 2, 3, 0, 4],
],
dtype=torch.long,
)
log_probs = frechet_sort.log_prob(scores, action)
self.assertEqual(log_probs[0], log_probs[1])
action = torch.tensor(
[
[0, 1, 2, 3, 4],
[3, 2, 1, 0, 4],
],
dtype=torch.long,
)
log_probs = frechet_sort.log_prob(scores, action)
self.assertLess(log_probs[0], log_probs[1])
# manually calculating the log prob for the second case
s = scores[1][action[1]]
log_prob = 0.0
for p in range(3):
log_prob -= torch.exp((s[p:] - s[p]) * shape).sum().log()
self.assertAlmostEqual(log_prob, log_probs[1])
def test_log_prob_padding(self):
scores = torch.tensor(
[
[1.0, 2.0, 3.0, 4.0, 5.0],
[1.0, 2.0, 3.0, 4.0, 5.0],
],
requires_grad=True,
)
shape = 2.0
frechet_sort = FrechetSort(topk=3, shape=shape, log_scores=True)
# A shorter sequence should have a higher prob
action = torch.tensor(
[
[0, 1, 2, 3, 4],
[0, 1, 5, 5, 5],
],
dtype=torch.long,
)
log_probs = frechet_sort.log_prob(scores, action)
self.assertLess(log_probs[0], log_probs[1])
log_probs.sum().backward()
self.assertGreater(scores.grad.sum(), 0)
# manually calculating the log prob for the second case
# 5 is padding, so we remove it here
s = scores[1][action[1][:2]]
log_prob = 0.0
for p in range(2):
log_prob -= torch.exp((s[p:] - s[p]) * shape).sum().log()
self.assertAlmostEqual(log_prob, log_probs[1])
| 2,434 | 29.061728 | 76 | py |
ReAgent | ReAgent-master/reagent/test/base/test_tensorboardX.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
from tempfile import TemporaryDirectory
from unittest.mock import MagicMock, call
import torch
from reagent.core.tensorboardX import SummaryWriterContext, summary_writer_context
from reagent.test.base.horizon_test_base import HorizonTestBase
from torch.utils.tensorboard import SummaryWriter
class TestSummaryWriterContext(HorizonTestBase):
def test_noop(self):
self.assertIsNone(SummaryWriterContext.add_scalar("test", torch.ones(1)))
def test_with_none(self):
with summary_writer_context(None):
self.assertIsNone(SummaryWriterContext.add_scalar("test", torch.ones(1)))
def test_writing(self):
with TemporaryDirectory() as tmp_dir:
writer = SummaryWriter(tmp_dir)
writer.add_scalar = MagicMock()
with summary_writer_context(writer):
SummaryWriterContext.add_scalar("test", torch.ones(1))
writer.add_scalar.assert_called_once_with(
"test", torch.ones(1), global_step=0
)
def test_writing_stack(self):
with TemporaryDirectory() as tmp_dir1, TemporaryDirectory() as tmp_dir2:
writer1 = SummaryWriter(tmp_dir1)
writer1.add_scalar = MagicMock()
writer2 = SummaryWriter(tmp_dir2)
writer2.add_scalar = MagicMock()
with summary_writer_context(writer1):
with summary_writer_context(writer2):
SummaryWriterContext.add_scalar("test2", torch.ones(1))
SummaryWriterContext.add_scalar("test1", torch.zeros(1))
writer1.add_scalar.assert_called_once_with(
"test1", torch.zeros(1), global_step=0
)
writer2.add_scalar.assert_called_once_with(
"test2", torch.ones(1), global_step=0
)
def test_swallowing_exception(self):
with TemporaryDirectory() as tmp_dir:
writer = SummaryWriter(tmp_dir)
writer.add_scalar = MagicMock(side_effect=NotImplementedError("test"))
writer.exceptions_to_ignore = (NotImplementedError, KeyError)
with summary_writer_context(writer):
SummaryWriterContext.add_scalar("test", torch.ones(1))
def test_not_swallowing_exception(self):
with TemporaryDirectory() as tmp_dir:
writer = SummaryWriter(tmp_dir)
writer.add_scalar = MagicMock(side_effect=NotImplementedError("test"))
with self.assertRaisesRegex(
NotImplementedError, "test"
), summary_writer_context(writer):
SummaryWriterContext.add_scalar("test", torch.ones(1))
def test_swallowing_histogram_value_error(self):
with TemporaryDirectory() as tmp_dir:
writer = SummaryWriter(tmp_dir)
with summary_writer_context(writer):
SummaryWriterContext.add_histogram("bad_histogram", torch.ones(100, 1))
def test_global_step(self):
with TemporaryDirectory() as tmp_dir:
writer = SummaryWriter(tmp_dir)
writer.add_scalar = MagicMock()
with summary_writer_context(writer):
SummaryWriterContext.add_scalar("test", torch.ones(1))
SummaryWriterContext.increase_global_step()
SummaryWriterContext.add_scalar("test", torch.zeros(1))
writer.add_scalar.assert_has_calls(
[
call("test", torch.ones(1), global_step=0),
call("test", torch.zeros(1), global_step=1),
]
)
self.assertEqual(2, len(writer.add_scalar.mock_calls))
def test_add_custom_scalars(self):
with TemporaryDirectory() as tmp_dir:
writer = SummaryWriter(tmp_dir)
writer.add_custom_scalars = MagicMock()
with summary_writer_context(writer):
SummaryWriterContext.add_custom_scalars_multilinechart(
["a", "b"], category="cat", title="title"
)
with self.assertRaisesRegex(
AssertionError, "Title \\(title\\) is already in category \\(cat\\)"
):
SummaryWriterContext.add_custom_scalars_multilinechart(
["c", "d"], category="cat", title="title"
)
SummaryWriterContext.add_custom_scalars_multilinechart(
["e", "f"], category="cat", title="title2"
)
SummaryWriterContext.add_custom_scalars_multilinechart(
["g", "h"], category="cat2", title="title"
)
SummaryWriterContext.add_custom_scalars(writer)
writer.add_custom_scalars.assert_called_once_with(
{
"cat": {
"title": ["Multiline", ["a", "b"]],
"title2": ["Multiline", ["e", "f"]],
},
"cat2": {"title": ["Multiline", ["g", "h"]]},
}
)
| 5,172 | 42.470588 | 88 | py |
ReAgent | ReAgent-master/reagent/test/base/horizon_test_base.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import random
import unittest
from typing import Callable
import numpy as np
import torch
from reagent.core.configuration import make_config_class
from reagent.core.tensorboardX import SummaryWriterContext
from ruamel.yaml import YAML
SEED = 0
class HorizonTestBase(unittest.TestCase):
def setUp(self):
SummaryWriterContext._reset_globals()
logging.basicConfig(level=logging.INFO)
np.random.seed(SEED)
torch.manual_seed(SEED)
random.seed(SEED)
def tearDown(self):
SummaryWriterContext._reset_globals()
@classmethod
def run_from_config(cls, run_test: Callable, config_path: str, use_gpu: bool):
yaml = YAML(typ="safe")
with open(config_path, "r") as f:
config_dict = yaml.load(f.read())
config_dict["use_gpu"] = use_gpu
@make_config_class(run_test)
class ConfigClass:
pass
config = ConfigClass(**config_dict)
# pyre-fixme[16]: `ConfigClass` has no attribute `asdict`.
return run_test(**config.asdict())
| 1,173 | 25.681818 | 82 | py |
ReAgent | ReAgent-master/reagent/test/base/test_utils.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import numpy as np
import numpy.testing as npt
import torch
from reagent.core.torch_utils import masked_softmax, rescale_torch_tensor
class TestUtils(unittest.TestCase):
def test_rescale_torch_tensor(self):
rows, cols = 3, 5
original_tensor = torch.randint(low=10, high=40, size=(rows, cols)).float()
prev_max_tensor = torch.ones(1, 5) * 40.0
prev_min_tensor = torch.ones(1, 5) * 10.0
new_min_tensor = torch.ones(1, 5) * -1.0
new_max_tensor = torch.ones(1, 5).float()
print("Original tensor: ", original_tensor)
rescaled_tensor = rescale_torch_tensor(
original_tensor,
new_min_tensor,
new_max_tensor,
prev_min_tensor,
prev_max_tensor,
)
print("Rescaled tensor: ", rescaled_tensor)
reconstructed_original_tensor = rescale_torch_tensor(
rescaled_tensor,
prev_min_tensor,
prev_max_tensor,
new_min_tensor,
new_max_tensor,
)
print("Reconstructed Original tensor: ", reconstructed_original_tensor)
comparison_tensor = torch.eq(original_tensor, reconstructed_original_tensor)
self.assertTrue(torch.sum(comparison_tensor), rows * cols)
def test_masked_softmax(self):
# Postive value case
x = torch.tensor([[15.0, 6.0, 9.0], [3.0, 2.0, 1.0]])
temperature = 1
mask = torch.tensor([[1.0, 0.0, 1.0], [0.0, 1.0, 1.0]])
out = masked_softmax(x, mask, temperature)
expected_out = torch.tensor([[0.9975, 0.0000, 0.0025], [0, 0.7311, 0.2689]])
npt.assert_array_almost_equal(out, expected_out, 4)
# Postive value case (masked value goes to inf)
x = torch.tensor([[150.0, 2.0]])
temperature = 0.01
mask = torch.tensor([[0.0, 1.0]])
out = masked_softmax(x, mask, temperature)
expected_out = torch.tensor([[0.0, 1.0]])
npt.assert_array_almost_equal(out, expected_out, 4)
# Negative value case
x = torch.tensor([[-10.0, -1.0, -5.0]])
temperature = 0.01
mask = torch.tensor([[1.0, 1.0, 0.0]])
out = masked_softmax(x, mask, temperature)
expected_out = torch.tensor([[0.0, 1.0, 0.0]])
npt.assert_array_almost_equal(out, expected_out, 4)
# All values in a row are masked case
x = torch.tensor([[-5.0, 4.0, 3.0], [2.0, 1.0, 2.0]])
temperature = 1
mask = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
out = masked_softmax(x, mask, temperature)
expected_out = torch.tensor([[0.0, 0.0, 0.0], [0.4223, 0.1554, 0.4223]])
npt.assert_array_almost_equal(out, expected_out, 4)
| 2,836 | 37.337838 | 84 | py |
ReAgent | ReAgent-master/reagent/test/lite/test_combo_optimizer.py | #!/usr/bin/env python3
import random
import unittest
from collections import defaultdict
from typing import Dict
import nevergrad as ng
import numpy as np
import torch
import torch.nn as nn
from reagent.lite.optimizer import (
PolicyGradientOptimizer,
GumbelSoftmaxOptimizer,
QLearningOptimizer,
NeverGradOptimizer,
RandomSearchOptimizer,
BayesianOptimizer,
BayesianMLPEnsemblerOptimizer,
GREEDY_TEMP,
sol_to_tensors,
)
# nevergrad performs a little worse in the test environment
NEVERGRAD_TEST_THRES = 6.0
POLICY_GRADIENT_TEST_THRES = 3.0
GUMBEL_SOFTMAX_TEST_THRES = 3.0
Q_LEARNING_TEST_THRES = 3.0
BAYESSIAN_MLP_TEST_THRES = 3.0
BAYESSIAN_MLP_CONV_THRES = 6.0
class GroundTruthNet(nn.Module):
def __init__(self, dim_input, dim_model):
super().__init__()
self.net = nn.Sequential(
torch.nn.Linear(dim_input, dim_model),
torch.nn.ReLU(),
torch.nn.Linear(dim_model, 1),
)
for p in self.parameters():
if p.dim() > 1:
nn.init.uniform_(p, -3, 3)
def forward(self, x):
return self.net(x)
def random_sample(input_param, obj_func, n_generations=100):
"""Return the best result from random sampling"""
rs_optimizer = RandomSearchOptimizer(
input_param,
obj_func,
batch_size=512,
)
min_reward_rs_optimizer = torch.tensor(9999.0)
print("Random Sampling")
for i in range(n_generations):
(
sampled_solutions,
reward,
) = rs_optimizer.optimize_step()
min_reward_rs_optimizer = torch.min(
min_reward_rs_optimizer, torch.min(reward.data)
)
print(f"Generation={i}, min_reward={min_reward_rs_optimizer}")
print()
return min_reward_rs_optimizer
def discrete_input_param():
# Some random discrete choice space
ng_param = ng.p.Dict(
choice1=ng.p.Choice(["128", "256", "512", "768"]),
choice2=ng.p.Choice(["128", "256", "512", "768"]),
choice3=ng.p.Choice(["True", "False"]),
choice4=ng.p.Choice(["Red", "Blue", "Green", "Yellow", "Purple"]),
choice5=ng.p.Choice(["Red", "Blue", "Green", "Yellow", "Purple"]),
)
return ng_param
def create_ground_truth_net(ng_param):
dim_input = sum([len(ng_param[k].choices) for k in ng_param])
dim_model = 256
gt_net = GroundTruthNet(dim_input, dim_model)
print(f"Ground-Truth Net DIM_INPUT={dim_input}, DIM_MODEL={dim_model}")
return gt_net
def create_discrete_choice_obj_func(ng_param, gt_net):
def obj_func(sampled_sol: Dict[str, torch.Tensor]) -> torch.Tensor:
# sampled_sol format:
# key = choice_name
# val = choice_idx (a tensor of length `batch_size`)
assert list(sampled_sol.values())[0].dim() == 1
batch_size = list(sampled_sol.values())[0].shape[0]
batch_tensors = []
for i in range(batch_size):
tensors = []
for k in sorted(sampled_sol.keys()):
num_choices = len(ng_param[k].choices)
one_hot = torch.zeros(num_choices)
one_hot[sampled_sol[k][i]] = 1
tensors.append(one_hot)
batch_tensors.append(torch.cat(tensors, dim=-1))
batch_tensors = torch.stack(batch_tensors)
return gt_net(batch_tensors)
return obj_func
def create_discrete_choice_gumbel_softmax_obj_func(ng_param, gt_net):
def obj_func(sampled_sol: Dict[str, torch.Tensor]) -> torch.Tensor:
# sampled_sol format:
# key = choice_name
# val = sampled softmax distribution, a tensor of shape (batch_size, num_choices)
assert list(sampled_sol.values())[0].dim() == 2
batch_size = list(sampled_sol.values())[0].shape[0]
batch_tensors = []
for i in range(batch_size):
tensors = []
for k in sorted(sampled_sol.keys()):
tensors.append(sampled_sol[k][i])
batch_tensors.append(torch.cat(tensors, dim=-1))
batch_tensors = torch.stack(batch_tensors)
return gt_net(batch_tensors)
return obj_func
class TestComboOptimizer(unittest.TestCase):
def setUp(self):
seed = 123
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def test_random_sample_with_raw_choices_using_uncommon_key(self):
batch_size = 200
input_param = ng.p.Dict(
**{
"#1": ng.p.Choice([32, 64, 128]),
"choice2[3]": ng.p.Choice([True, False]),
"choice3.attr": ng.p.Choice(
["Red", "Blue", "Green", "Yellow", "Purple"]
),
}
)
obj_func = None
sampling_weights = {
"#1": [0.5, 0.5, 0.0],
"choice2[3]": [0.25, 0.75],
"choice3.attr": [0.1, 0.9, 0.0, 0.0, 0.0],
}
optimizer = RandomSearchOptimizer(
input_param,
obj_func,
batch_size=batch_size,
sampling_weights=sampling_weights,
)
sampled_sol = optimizer.sample(batch_size)
sampled_sol = optimizer.indices_to_raw_choices(sampled_sol)
self.assertEqual(len(sampled_sol), batch_size)
self.assertIsInstance(sampled_sol, list)
counts = {key: defaultdict(int) for key in sampling_weights}
for sample in sampled_sol:
self.assertSetEqual(set(sample.keys()), set(input_param.keys()))
self.assertIn(sample["#1"], [32, 64])
self.assertIn(sample["choice2[3]"], [True, False])
self.assertIn(sample["choice3.attr"], ["Red", "Blue"])
for key in sample:
counts[key][sample[key]] += 1
self.assertAlmostEqual(counts["#1"][32] / float(batch_size), 0.5, places=1)
self.assertAlmostEqual(counts["#1"][64] / float(batch_size), 0.5, places=1)
self.assertEqual(counts["#1"][128], 0)
self.assertAlmostEqual(
counts["choice2[3]"][True] / float(batch_size), 0.25, places=1
)
self.assertAlmostEqual(
counts["choice2[3]"][False] / float(batch_size), 0.75, places=1
)
self.assertAlmostEqual(
counts["choice3.attr"]["Red"] / float(batch_size), 0.1, places=1
)
self.assertAlmostEqual(
counts["choice3.attr"]["Blue"] / float(batch_size), 0.9, places=1
)
self.assertEqual(counts["choice3.attr"]["Green"], 0)
self.assertEqual(counts["choice3.attr"]["Yellow"], 0)
self.assertEqual(counts["choice3.attr"]["Purple"], 0)
def test_random_sample_with_raw_choices_1(self):
batch_size = 1
input_param = ng.p.Dict(
choice1=ng.p.Choice([32, 64, 128]),
choice2=ng.p.Choice([True, False]),
choice3=ng.p.Choice(["Red", "Blue", "Green", "Yellow", "Purple"]),
)
obj_func = None
optimizer = RandomSearchOptimizer(
input_param, obj_func, batch_size=batch_size, sampling_weights=None
)
sampled_sol = optimizer.sample(batch_size)
sampled_sol = optimizer.indices_to_raw_choices(sampled_sol)
self.assertEqual(len(sampled_sol), batch_size)
self.assertIsInstance(sampled_sol, list)
for sample in sampled_sol:
self.assertSetEqual(set(sample.keys()), set(input_param.keys()))
for key in sample:
self.assertIn(sample[key], input_param[key].choices.value)
def test_random_sample_with_raw_choices_2(self):
batch_size = 200
input_param = ng.p.Dict(
choice1=ng.p.Choice([32, 64, 128]),
choice2=ng.p.Choice([True, False]),
choice3=ng.p.Choice(["Red", "Blue", "Green", "Yellow", "Purple"]),
)
obj_func = None
sampling_weights = {
"choice1": [0.5, 0.5, 0.0],
"choice2": [0.25, 0.75],
"choice3": [0.1, 0.9, 0.0, 0.0, 0.0],
}
optimizer = RandomSearchOptimizer(
input_param,
obj_func,
batch_size=batch_size,
sampling_weights=sampling_weights,
)
sampled_sol = optimizer.sample(batch_size)
sampled_sol = optimizer.indices_to_raw_choices(sampled_sol)
self.assertEqual(len(sampled_sol), batch_size)
self.assertIsInstance(sampled_sol, list)
counts = {key: defaultdict(int) for key in sampling_weights}
for sample in sampled_sol:
self.assertSetEqual(set(sample.keys()), set(input_param.keys()))
self.assertIn(sample["choice1"], [32, 64])
self.assertIn(sample["choice2"], [True, False])
self.assertIn(sample["choice3"], ["Red", "Blue"])
for key in sample:
counts[key][sample[key]] += 1
self.assertAlmostEqual(counts["choice1"][32] / float(batch_size), 0.5, places=1)
self.assertAlmostEqual(counts["choice1"][64] / float(batch_size), 0.5, places=1)
self.assertEqual(counts["choice1"][128], 0)
self.assertAlmostEqual(
counts["choice2"][True] / float(batch_size), 0.25, places=1
)
self.assertAlmostEqual(
counts["choice2"][False] / float(batch_size), 0.75, places=1
)
self.assertAlmostEqual(
counts["choice3"]["Red"] / float(batch_size), 0.1, places=1
)
self.assertAlmostEqual(
counts["choice3"]["Blue"] / float(batch_size), 0.9, places=1
)
self.assertEqual(counts["choice3"]["Green"], 0)
self.assertEqual(counts["choice3"]["Yellow"], 0)
self.assertEqual(counts["choice3"]["Purple"], 0)
def test_nevergrad_optimizer_discrete(self):
batch_size = 32
n_generations = 40
input_param = discrete_input_param()
gt_net = create_ground_truth_net(input_param)
obj_func = create_discrete_choice_obj_func(input_param, gt_net)
optimizer = NeverGradOptimizer(
input_param,
batch_size * n_generations, # estimated_budgets
obj_func=obj_func,
batch_size=batch_size,
optimizer_name="DoubleFastGADiscreteOnePlusOne",
)
best_rs_result = random_sample(input_param, obj_func, n_generations=20)
history_min_reward = torch.tensor(9999.0)
for i in range(n_generations):
(
sampled_solutions,
reward,
) = optimizer.optimize_step()
history_min_reward = torch.min(history_min_reward, torch.min(reward.data))
print(
f"Generation={i}, min_reward={torch.min(reward.data)}, "
f"history_min_reward={history_min_reward}"
)
assert (
abs(best_rs_result - history_min_reward) < NEVERGRAD_TEST_THRES
), f"Learning not converged. best random search={best_rs_result}, optimizer best result={history_min_reward}"
assert (
optimizer.best_solutions(1)[0][0] == history_min_reward
), "Best solutions (n=1) inconsistent with the best reward"
# just test sampling() can run
optimizer.sample(10)
def test_policy_gradient_optimizer_discrete(self):
batch_size = 32
learning_rate = 0.1
input_param = discrete_input_param()
gt_net = create_ground_truth_net(input_param)
obj_func = create_discrete_choice_obj_func(input_param, gt_net)
optimizer = PolicyGradientOptimizer(
input_param, obj_func, batch_size=batch_size, learning_rate=learning_rate
)
best_rs_result = random_sample(input_param, obj_func, n_generations=20)
n_generations = 100
for i in range(n_generations):
(
sampled_solutions,
reward,
sampled_log_probs,
) = optimizer.optimize_step()
mean_reward = torch.mean(reward.data)
print(
f"Generation={i}, mean_reward={mean_reward}, "
f"min_reward={torch.min(reward.data)}, "
f"mean_sample_prob={torch.mean(torch.exp(sampled_log_probs))}, "
f"temperature={optimizer.temp}"
)
assert (
abs(best_rs_result - mean_reward) < POLICY_GRADIENT_TEST_THRES
), f"Learning not converged. best random search={best_rs_result}, optimizer mean result={mean_reward}"
# just test sampling() can run
optimizer.sample(10)
def test_q_learning_optimizer_discrete(self):
batch_size = 256
input_param = discrete_input_param()
gt_net = create_ground_truth_net(input_param)
obj_func = create_discrete_choice_obj_func(input_param, gt_net)
optimizer = QLearningOptimizer(input_param, obj_func, batch_size=batch_size)
best_rs_result = random_sample(input_param, obj_func, n_generations=20)
n_generations = 100
for i in range(n_generations):
(
sampled_solutions,
reward,
) = optimizer.optimize_step()
mean_reward = torch.mean(reward.data)
print(
f"Generation={i}, mean_reward={mean_reward}, "
f"min_reward={torch.min(reward.data)}, "
f"temperature={optimizer.temp}"
)
eval_result = obj_func(optimizer.sample(1))
assert (
abs(best_rs_result - eval_result) < Q_LEARNING_TEST_THRES
), f"Learning not converged. best random search={best_rs_result}, eval result={eval_result}"
def test_gumbel_softmax_optimizer_discrete(self):
batch_size = 32
anneal_rate = 0.97
learning_rate = 0.1
input_param = discrete_input_param()
gt_net = create_ground_truth_net(input_param)
obj_func = create_discrete_choice_gumbel_softmax_obj_func(input_param, gt_net)
optimizer = GumbelSoftmaxOptimizer(
input_param,
obj_func,
anneal_rate=anneal_rate,
batch_size=batch_size,
learning_rate=learning_rate,
)
obj_func_rs = create_discrete_choice_obj_func(input_param, gt_net)
best_rs_result = random_sample(input_param, obj_func_rs, n_generations=20)
n_generations = 100
for i in range(n_generations):
(sampled_softmax_vals, reward, logits) = optimizer.optimize_step()
mean_reward = torch.mean(reward.data)
print(
f"Generation={i}, mean_reward={mean_reward}, "
f"min_reward={torch.min(reward.data)}, "
f"temperature={optimizer.temp}"
)
assert (
optimizer.temp == optimizer.min_temp
), "Towards the end of learning, GumbelSoftmax Optimizer should have a low temperature"
assert (
abs(best_rs_result - mean_reward) < GUMBEL_SOFTMAX_TEST_THRES
), f"Learning not converged. best random search={best_rs_result}, optimizer mean result={mean_reward}"
eval_obj_func = create_discrete_choice_obj_func(input_param, gt_net)
eval_result = eval_obj_func(optimizer.sample(1))
assert (
abs(best_rs_result - eval_result) < GUMBEL_SOFTMAX_TEST_THRES
), f"Learning not converged. best random search={best_rs_result}, eval result={eval_result}"
def run_policy_gradient_optimizer(
self,
input_param,
obj_func,
batch_size,
n_generations,
repeats,
):
results = []
for r in range(repeats):
print(f"\n\n**** Policy Gradient Optimizer, Repeat={r} ****")
pg_optimizer = PolicyGradientOptimizer(
input_param,
obj_func,
batch_size=batch_size,
)
for i in range(n_generations):
# non-exploration at the last generation
if i == n_generations - 1:
pg_optimizer.temp = GREEDY_TEMP
temp = pg_optimizer.temp
(
sampled_solutions,
reward,
sampled_log_probs,
) = pg_optimizer.optimize_step()
mean_reward_pg_optimizer = torch.mean(reward.data)
min_reward_pg_optimizer = torch.min(reward.data)
print(
f"Generation={i}, mean_reward={mean_reward_pg_optimizer}, "
f"min_reward={min_reward_pg_optimizer}, "
f"mean_sample_prob={torch.mean(torch.exp(sampled_log_probs))}, "
f"temperature={temp}"
)
results.append(mean_reward_pg_optimizer)
return results
def run_q_learning_optimizer(
self,
input_param,
obj_func,
batch_size,
n_generations,
repeats,
):
results = []
for r in range(repeats):
print(f"\n\n**** QLearning Optimizer, Repeat={r} ****")
ql_optimizer = QLearningOptimizer(
input_param,
obj_func,
batch_size=batch_size,
anneal_rate=0.997,
)
for i in range(n_generations):
# non-exploration at the last generation
if i == n_generations - 1:
ql_optimizer.temp = GREEDY_TEMP
temp = ql_optimizer.temp
(
sampled_solutions,
reward,
) = ql_optimizer.optimize_step()
mean_reward_ql_optimizer = torch.mean(reward.data)
min_reward_ql_optimizer = torch.min(reward.data)
print(
f"Generation={i}, mean_reward={mean_reward_ql_optimizer}, "
f"min_reward={min_reward_ql_optimizer}, "
f"temp={temp}"
)
results.append(mean_reward_ql_optimizer)
return results
def test_policy_gradient_vs_q_learning_discrete(self):
"""
Comparison between policy gradient and Q-learning-based optimizer
The input param has two axes, choice1 and choice2.
The value achieved by different combinations of the two choices:
a b c
1 0.43 0.9 0.45
2 0.9 0.4 0.9
3 0.45 0.9 0.45
In summary, the global minimum is at (choice1=2, choice2=b), but there are local minima
and maxima which easily hurdle an optimizer from finding the global minimum.
In this setting, Q-learning performs better than policy gradient
"""
input_param = ng.p.Dict(
choice1=ng.p.Choice(["1", "2", "3"]),
choice2=ng.p.Choice(["a", "b", "c"]),
)
def obj_func(sampled_sol: Dict[str, torch.Tensor]) -> torch.Tensor:
# sampled_sol format:
# key = choice_name
# val = choice_idx (a tensor of length `batch_size`)
assert list(sampled_sol.values())[0].dim() == 1
batch_size = list(sampled_sol.values())[0].shape[0]
result = torch.zeros(batch_size, 1)
choice1 = sampled_sol["choice1"]
choice2 = sampled_sol["choice2"]
for i in range(batch_size):
if choice1[i] == 1 and choice2[i] == 1:
result[i] = 0.4
elif choice1[i] == 0 and choice2[i] == 0:
result[i] = 0.43
elif choice1[i] == 1 or choice2[i] == 1:
result[i] = 0.9
else:
result[i] = 0.45
return result
batch_size = 32
n_generations = 100
repeat = 10
qlearning_res = self.run_q_learning_optimizer(
input_param, obj_func, batch_size, n_generations, repeat
)
pg_res = self.run_policy_gradient_optimizer(
input_param, obj_func, batch_size, n_generations, repeat
)
print(f"QLearning results over {repeat} repeats: {qlearning_res}")
print(f"PG results over {repeat} repeats: {pg_res}")
assert (
np.mean(qlearning_res) < 0.42
), "QLearning should end up better than local minimum (0.43)"
assert np.mean(qlearning_res) < np.mean(
pg_res
), f"In this setting. qlearning should be better than policy gradient over {repeat} repeats"
def test_sol_to_tensors(self):
input_param = discrete_input_param()
sampled_sol = {
"choice1": torch.tensor([0, 1, 2]),
"choice2": torch.tensor([1, 2, 0]),
"choice3": torch.tensor([0, 1, 0]),
"choice4": torch.tensor([4, 3, 2]),
"choice5": torch.tensor([1, 2, 3]),
}
tensor = torch.FloatTensor(
[
[1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0],
]
)
sampled_tensor = sol_to_tensors(sampled_sol, input_param)
self.assertTrue(torch.all(tensor == sampled_tensor))
def test_bayesian_optimizer_its_random_mutation_discrete(self):
acq_type = "its"
mutation_type = "random"
input_param = discrete_input_param()
gt_net = create_ground_truth_net(input_param)
obj_func = create_discrete_choice_obj_func(input_param, gt_net)
optimizer = BayesianOptimizer(
param=input_param,
obj_func=obj_func,
start_temp=1.0,
min_temp=0.0,
acq_type=acq_type,
mutation_type=mutation_type,
)
sampled_solution = {
"choice1": torch.tensor([0]),
"choice2": torch.tensor([1]),
"choice3": torch.tensor([0]),
"choice4": torch.tensor([1]),
"choice5": torch.tensor([0]),
}
optimizer._maintain_best_solutions(sampled_solution, torch.tensor([0.0]))
# no mutation
mutated_solution = optimizer.sample(1, 0.0)
self.assertEqual(sampled_solution, mutated_solution)
# mutation in one idx (at most)
mutated_solution = optimizer.sample(1, 1 / len(input_param))
difference = 0
for k in sorted(input_param.keys()):
if sampled_solution[k] != mutated_solution[k]:
difference += 1
self.assertTrue(difference <= 1)
# mutation in two idxs (at most)
mutated_solution = optimizer.sample(1, 2 / len(input_param))
difference = 0
for k in sorted(input_param.keys()):
if sampled_solution[k] != mutated_solution[k]:
difference += 1
self.assertTrue(difference <= 2)
def test_bayessian_optimizer_its_random_mutation_ensembler_discrete(self):
batch_size = 8
num_mutations = 10
input_param = discrete_input_param()
gt_net = create_ground_truth_net(input_param)
obj_func = create_discrete_choice_obj_func(input_param, gt_net)
optimizer = BayesianMLPEnsemblerOptimizer(
param=input_param,
obj_func=obj_func,
batch_size=batch_size,
num_mutations=num_mutations,
anneal_rate=0.95,
)
best_rs_result = random_sample(input_param, obj_func, n_generations=20)
n_generations = 200
all_sampled_solutions = []
for i in range(n_generations):
(sampled_solutions, reward, loss) = optimizer.optimize_step()
all_sampled_solutions.append(sampled_solutions)
mean_reward = torch.mean(reward.data)
print(
f"Generation={i}, mean_reward={mean_reward}, "
f"min_reward={torch.min(reward.data)}, "
f"Avg. loss={loss},"
)
best_sol = optimizer.sample(1, 0.0)
eval_result = obj_func(best_sol)
assert (
abs(best_rs_result - eval_result) < BAYESSIAN_MLP_TEST_THRES
), f"Learning not converged. best random search={best_rs_result}, eval result={eval_result}"
sampled_solutions = {}
for k in sorted(input_param.keys()):
sampled_solutions[k] = torch.cat([sol[k] for sol in all_sampled_solutions])
acq_reward = optimizer.acquisition(
acq_type="its", sampled_sol=sampled_solutions, predictor=optimizer.predictor
)
min_acq_reward = torch.min(acq_reward).item()
best_sol_acq_reward = optimizer.acquisition(
acq_type="its", sampled_sol=best_sol, predictor=optimizer.predictor
).item()
assert (
abs(best_sol_acq_reward - min_acq_reward) < BAYESSIAN_MLP_CONV_THRES
), f"Learning not converged. min acquisition reward={min_acq_reward}, best solution's acquisition reward={best_sol_acq_reward}"
| 25,061 | 37.795666 | 135 | py |
ReAgent | ReAgent-master/reagent/test/optimizer/test_make_optimizer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
from reagent.optimizer.uninferrable_optimizers import Adam
from reagent.optimizer.uninferrable_schedulers import (
CosineAnnealingLR,
CosineAnnealingWarmRestarts,
ExponentialLR,
MultiStepLR,
OneCycleLR,
StepLR,
)
from reagent.optimizer.utils import is_torch_lr_scheduler, is_torch_optimizer
class TestMakeOptimizer(unittest.TestCase):
def setUp(self):
self.model = torch.nn.Linear(3, 4)
def _verify_optimizer(self, optimizer_scheduler_pair):
self.assertTrue(is_torch_optimizer(type(optimizer_scheduler_pair["optimizer"])))
self.assertTrue(
is_torch_lr_scheduler(type(optimizer_scheduler_pair["lr_scheduler"]))
)
def test_make_optimizer_with_step_lr_scheduler(self):
self._verify_optimizer(
Adam(
lr=0.001, lr_schedulers=[StepLR(gamma=0.1, step_size=0.01)]
).make_optimizer_scheduler(self.model.parameters())
)
def test_make_optimizer_with_multistep_lr_scheduler(self):
self._verify_optimizer(
Adam(
lr=0.001,
lr_schedulers=[MultiStepLR(gamma=0.2, milestones=[1000, 2000])],
).make_optimizer_scheduler(self.model.parameters())
)
def test_make_optimizer_with_exponential_lr_scheduler(self):
self._verify_optimizer(
Adam(
lr=0.001, lr_schedulers=[ExponentialLR(gamma=0.9)]
).make_optimizer_scheduler(self.model.parameters())
)
def test_make_optimizer_with_cosine_annealing_lr_scheduler(self):
self._verify_optimizer(
Adam(
lr=0.001, lr_schedulers=[CosineAnnealingLR(T_max=1)]
).make_optimizer_scheduler(self.model.parameters())
)
def test_make_optimizer_with_one_cycle_lr_scheduler(self):
self._verify_optimizer(
Adam(
lr=0.001,
lr_schedulers=[
OneCycleLR(max_lr=0.1, base_momentum=0.8, total_steps=1000)
],
).make_optimizer_scheduler(self.model.parameters())
)
def test_make_optimizer_with_cosine_annealing_warm_restarts_lr_scheduler(self):
self._verify_optimizer(
Adam(
lr=0.001, lr_schedulers=[CosineAnnealingWarmRestarts(T_0=1)]
).make_optimizer_scheduler(self.model.parameters())
)
| 2,523 | 33.108108 | 88 | py |
ReAgent | ReAgent-master/reagent/test/workflow/reagent_sql_test_base.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import os
import random
import shutil
import numpy as np
import torch
from pyspark import SparkConf
from reagent.data.spark_utils import DEFAULT_SPARK_CONFIG
# pyre-fixme[21]: Could not find `sparktestingbase`.
from sparktestingbase.sqltestcase import SQLTestCase
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
# path to local hive metastore
HIVE_METASTORE = "metastore_db"
# for setting seeds
GLOBAL_TEST_CLASS_COUNTER = 0
# pyre-fixme[11]: Annotation `SQLTestCase` is not defined as a type.
class ReagentSQLTestBase(SQLTestCase):
def getConf(self):
conf = SparkConf()
for k, v in DEFAULT_SPARK_CONFIG.items():
conf.set(k, v)
return conf
@classmethod
def setUpClass(cls):
super().setUpClass()
# set up the seed for the class to prevent
# clashing random table names for example
global GLOBAL_TEST_CLASS_COUNTER
cls.test_class_seed = GLOBAL_TEST_CLASS_COUNTER
logger.info(f"Allocating seed {cls.test_class_seed} to {cls.__name__}.")
GLOBAL_TEST_CLASS_COUNTER += 1
def setUp(self):
super().setUp()
assert not os.path.isdir(
HIVE_METASTORE
), f"{HIVE_METASTORE} already exists! Try deleting it."
random.seed(self.test_class_seed)
torch.manual_seed(self.test_class_seed)
np.random.seed(self.test_class_seed)
logging.basicConfig()
def assertEq(self, series_a, arr_b):
"""Assert panda series is equal to np array"""
arr_a = np.array(series_a.tolist())
np.testing.assert_equal(arr_a, arr_b)
def assertAllClose(self, series_a, arr_b):
"""Assert panda series is allclose to np array"""
arr_a = np.array(series_a.tolist())
np.testing.assert_allclose(arr_a, arr_b)
def assertEqWithPresence(self, series_a, presence, arr_b):
"""Assert panda series given presence array is equal to np array"""
arr_a = np.array(series_a.tolist())
present_a = arr_a[presence]
present_b = arr_b[presence]
np.testing.assert_equal(present_a, present_b)
def tearDown(self):
super().tearDown()
# removes Derby from last runs
if os.path.isdir(HIVE_METASTORE):
shutil.rmtree(HIVE_METASTORE)
| 2,464 | 29.060976 | 80 | py |
ReAgent | ReAgent-master/reagent/test/workflow/test_oss_workflows.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import json
import os
import unittest
import zipfile
from typing import Dict
from unittest.mock import patch
import reagent
# pyre-fixme[21]: Could not find module `reagent.workflow.cli`.
import reagent.workflow.cli as cli
import torch
from click.testing import CliRunner
from reagent.core.parameters import NormalizationParameters
from reagent.test.base.horizon_test_base import HorizonTestBase
from reagent.workflow.types import Dataset
from ruamel.yaml import YAML
base_dir = os.path.abspath(os.path.dirname(reagent.__file__))
curr_dir = os.path.abspath(os.path.dirname(__file__))
CARTPOLE_NORMALIZATION_JSON = os.path.join(
curr_dir, "test_data/discrete_action/cartpole_norm.json"
)
DQN_WORKFLOW_PARQUET_ZIP = os.path.join(
curr_dir, "test_data/discrete_action/dqn_workflow.zip"
)
DQN_WORKFLOW_PARQUET_REL_PATH = "dqn_workflow"
DQN_WORKFLOW_YAML = os.path.join(
base_dir, "workflow/sample_configs/discrete_dqn_cartpole_offline.yaml"
)
# where to store config for testing cli
NEW_CONFIG_NAME = "config.yaml"
# module to patch
DISCRETE_DQN_BASE = "reagent.model_managers.discrete_dqn_base"
def get_test_workflow_config(path_to_config: str, use_gpu: bool):
"""Loads and modifies config to fun fast."""
yaml = YAML(typ="safe")
with open(path_to_config, "r") as f:
config = yaml.load(f)
config["use_gpu"] = use_gpu
config["num_train_epochs"] = 1
config["num_eval_episodes"] = 1
# minimum score is 0
config["passing_score_bar"] = -0.0001
# both table and eval_table will be referenced to our mocked parquet
config["input_table_spec"]["table_sample"] = 50.0
config["input_table_spec"]["eval_table_sample"] = 50.0
return config
def mock_cartpole_normalization() -> Dict[int, NormalizationParameters]:
"""Get mock normalization from our local file."""
with open(CARTPOLE_NORMALIZATION_JSON, "r") as f:
norm = json.load(f)
norm_params_dict = {}
for k, v in norm.items():
norm_params_dict[k] = NormalizationParameters(**json.loads(v))
return norm_params_dict
class TestOSSWorkflows(HorizonTestBase):
"""Run workflow to ensure no crashes, correctness/performance not tested."""
def _test_dqn_workflow(self, use_gpu=False, use_all_avail_gpus=False):
runner = CliRunner()
config = get_test_workflow_config(
path_to_config=DQN_WORKFLOW_YAML, use_gpu=use_gpu
)
# create new altered config (for faster testing)
with runner.isolated_filesystem():
yaml = YAML(typ="safe")
with open(NEW_CONFIG_NAME, "w") as f:
yaml.dump(config, f)
# unzip zipped parquet folder into cwd
with zipfile.ZipFile(DQN_WORKFLOW_PARQUET_ZIP, "r") as zip_ref:
zip_ref.extractall()
# patch the two calls to spark
# dataset points to the unzipped parquet folder
# normalization points to mocked norm extracted from json
mock_dataset = Dataset(
parquet_url=f"file://{os.path.abspath(DQN_WORKFLOW_PARQUET_REL_PATH)}"
)
mock_normalization = mock_cartpole_normalization()
with patch(
"reagent.data.oss_data_fetcher.OssDataFetcher.query_data",
return_value=mock_dataset,
), patch(
f"{DISCRETE_DQN_BASE}.identify_normalization_parameters",
return_value=mock_normalization,
):
# call the cli test
result = runner.invoke(
cli.run,
[
"reagent.workflow.training.identify_and_train_network",
NEW_CONFIG_NAME,
],
catch_exceptions=False,
)
print(result.output)
assert result.exit_code == 0, f"result = {result}"
def test_dqn_workflow(self):
self._test_dqn_workflow()
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_dqn_workflow_gpu(self):
self._test_dqn_workflow(use_gpu=True)
if __name__ == "__main__":
unittest.main()
| 4,310 | 32.944882 | 86 | py |
ReAgent | ReAgent-master/reagent/test/replay_memory/extra_replay_buffer_test.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import numpy as np
import numpy.testing as npt
import torch
from reagent.replay_memory.circular_replay_buffer import ReplayBuffer
from reagent.test.base.horizon_test_base import HorizonTestBase
logger = logging.getLogger(__name__)
torch.set_printoptions(profile="full")
OBS_SHAPE = (3, 3)
OBS_TYPE = np.float32
"""
Everything about the MDP, except terminal states, are defined here.
Terminal states are derived from Trajectory lengths array.
"""
def get_add_transition(i):
"""For adding into RB"""
return {
"state": np.ones(OBS_SHAPE) * i,
"action": int(i),
"reward": float(2 * i),
"extra1": float(3 * i),
}
ZERO_FEATURES = {
"state": np.zeros(OBS_SHAPE),
"action": int(0),
"reward": float(0),
"extra1": float(0),
}
def get_stacked_transition(i, stack_size, traj_start_idx):
"""For getting expected stacked state of i"""
res = {k: [] for k in ["state", "action", "reward", "extra1"]}
# must pad with some zero states
for idx in range(i - stack_size + 1, i + 1):
trans = ZERO_FEATURES if idx < traj_start_idx else get_add_transition(idx)
for k in res:
res[k].append(trans[k])
return {k: np.stack(v, axis=-1) for k, v in res.items()}
def setup_buffer(buffer_size, trajectory_lengths, stack_size=None, multi_steps=None):
"""We will insert one trajectory into the RB."""
stack_size = stack_size if stack_size is not None else 1
update_horizon = multi_steps if multi_steps is not None else 1
memory = ReplayBuffer(
stack_size=stack_size,
replay_capacity=buffer_size,
batch_size=1,
update_horizon=update_horizon,
return_everything_as_stack=stack_size is not None,
return_as_timeline_format=multi_steps is not None,
)
i = 0
for traj_len in trajectory_lengths:
for j in range(traj_len):
trans = get_add_transition(i)
terminal = bool(j == traj_len - 1)
memory.add(
observation=trans["state"],
action=trans["action"],
reward=trans["reward"],
terminal=terminal,
extra1=trans["extra1"],
)
i += 1
return memory.sample_all_valid_transitions()
def generic_stack_test_helper(buffer_size, trajectory_lengths, stack_size):
batch = setup_buffer(buffer_size, trajectory_lengths, stack_size=stack_size)
expected = {k: [] for k in ["state", "action", "reward", "extra1"]}
terminal_array = []
actual_rb_index = stack_size - 1
i = 0
for traj_len in trajectory_lengths:
traj_start = i
for j in range(traj_len):
cur = get_stacked_transition(i, stack_size, traj_start)
for k in expected:
expected[k].append(cur[k])
terminal_array.append(bool(j == traj_len - 1))
actual_rb_index += 1
i += 1
actual_rb_index += stack_size - 1
expected["terminal"] = np.expand_dims(terminal_array, axis=1)
for k in expected:
expected[k] = torch.tensor(expected[k])
for k in expected:
batch_val = getattr(batch, k)
npt.assert_array_equal(
batch_val,
expected[k],
err_msg=f"key {k}; expected {expected[k]}, got {batch_val}",
verbose=True,
)
def generic_stack_multi_steps_test_helper(
buffer_size, trajectory_lengths, stack_size, multi_steps
):
batch = setup_buffer(
buffer_size, trajectory_lengths, stack_size=stack_size, multi_steps=multi_steps
)
# start with state, action, extra1 (these are single)
expected = {k: [] for k in ["state", "action", "extra1"]}
terminal_array = []
actual_rb_index = stack_size - 1
i = 0
for traj_len in trajectory_lengths:
traj_start = i
for j in range(traj_len):
cur = get_stacked_transition(i, stack_size, traj_start)
for k in expected:
expected[k].append(cur[k])
terminal_array.append(bool(j >= traj_len - multi_steps))
actual_rb_index += 1
i += 1
actual_rb_index += stack_size - 1
assert (
actual_rb_index <= buffer_size
), f"{actual_rb_index} is larger than {buffer_size}"
expected["terminal"] = np.expand_dims(terminal_array, axis=1)
for k in expected:
expected[k] = torch.tensor(expected[k])
batch_size = expected["state"].shape[0]
for k in expected:
batch_val = getattr(batch, k)
npt.assert_array_equal(
batch_val,
expected[k],
err_msg=f"key {k}; expected {expected[k]}, got {batch_val}",
verbose=True,
)
# now examine reward, next_state, next_action, next_extra1, which would be lists of size step
expected = {k: [] for k in ["reward", "next_state", "next_action", "next_extra1"]}
i = 0
for traj_len in trajectory_lengths:
traj_start = i
for _ in range(traj_len):
multistep_trans = {k: [] for k in expected}
# rewards start at current
traj_end = traj_start + traj_len
for j in range(i, i + multi_steps):
if j < traj_end:
stacked_trans = get_stacked_transition(j, stack_size, traj_start)
multistep_trans["reward"].append(stacked_trans["reward"])
# next features start at current + 1
for j in range(i + 1, i + multi_steps + 1):
if j <= traj_end:
stacked_trans = get_stacked_transition(j, stack_size, traj_start)
for k in ["next_state", "next_action", "next_extra1"]:
stripped_k = k[len("next_") :]
multistep_trans[k].append(stacked_trans[stripped_k])
multistep_trans = {k: torch.tensor(v) for k, v in multistep_trans.items()}
for k in expected:
expected[k].append(multistep_trans[k])
i += 1
# validate the lengths
for k in expected:
batch_val = getattr(batch, k)
assert isinstance(batch_val, list), f"batch[{k}] has type {type(batch_val)}"
assert len(batch_val) == batch_size
for i in range(batch_size):
assert isinstance(
batch_val[i], torch.Tensor
), f"batch[{k}][{i}] has type {type(batch_val[i])};\n{batch_val}"
assert batch_val[i].shape[0] == batch.step[i], (
f"batch[{k}][{i}] {batch_val[i].shape} doesn't start "
f"with {batch.step[i]};\n{batch_val}"
)
# sanity check
assert len(expected[k][i]) == batch.step[i], (
f"expected[{k}][{i}] {expected[k][i]} with len {len(expected[k][i])} should have len "
f"{batch.step[i]};\n{expected[k]}, {batch_val}"
)
for k in expected:
batch_val = getattr(batch, k)
for i in range(batch_size):
# NOTE: the last transition for terminals is undefined
A = batch_val[i]
B = expected[k][i]
if batch.terminal[i]:
A = A[:-1]
B = B[:-1]
npt.assert_array_equal(A, B)
MAX_TRAJ_LEN = 100
NUM_TRAJ_LIMIT = 10
class ExtraReplayBufferTest(HorizonTestBase):
"""Stress tests for the replay buffer, especially for new flags."""
def test_stack_slaughter(self):
stack_size = 7
for i in range(1, NUM_TRAJ_LIMIT):
traj_lengths = torch.randint(1, MAX_TRAJ_LEN, (i,))
buffer_size = (traj_lengths.sum() + (i + 1) * (stack_size - 1)).item()
logger.info(
f"Inserting {i} trajectories...\nArguments are: "
f"buffer_size:{buffer_size}, "
f"traj_lengths:{traj_lengths}, "
f"stack_size:{stack_size}"
)
generic_stack_test_helper(buffer_size, traj_lengths.tolist(), stack_size)
logger.info(f"Inserting {i} trajectories passed...")
def test_stack_multistep_flags_slaughter(self):
stack_size = 5
multi_steps = 6
for i in range(1, NUM_TRAJ_LIMIT):
traj_lengths = torch.randint(1, MAX_TRAJ_LEN, (i,))
buffer_size = (traj_lengths.sum() + (i + 1) * (stack_size - 1)).item()
# handle edge case which would raise ValueError
if buffer_size < stack_size + multi_steps:
buffer_size = stack_size + multi_steps
logger.info(
f"Inserting {i} trajectories...\nArguments are: "
f"buffer_size:{buffer_size}, "
f"traj_lengths:{traj_lengths}, "
f"stack_size:{stack_size}, "
f"multi_steps:{multi_steps}"
)
generic_stack_multi_steps_test_helper(
buffer_size, traj_lengths.tolist(), stack_size, multi_steps
)
logger.info(f"Inserting {i} trajectories passed...")
def test_replay_overflow(self):
"""
hard to make a stress test for this, since tracking which indices
gets replaced would be effectively building a second RB
so instead opt for simple test...
stack_size = 2 so there's 1 padding.
"""
multi_steps = 2
stack_size = 2
memory = ReplayBuffer(
stack_size=stack_size,
replay_capacity=6,
batch_size=1,
update_horizon=multi_steps,
return_everything_as_stack=None,
return_as_timeline_format=True,
)
def trans(i):
return {
"observation": np.ones(OBS_SHAPE, dtype=OBS_TYPE),
"action": int(2 * i),
"reward": float(3 * i),
}
# Contents of RB
# start: [X, X, X, X, X, X]
npt.assert_array_equal(
memory._is_index_valid, [False, False, False, False, False, False]
)
# t0: [X, s0, X, X, X, X]
memory.add(**trans(0), terminal=False)
npt.assert_array_equal(
memory._is_index_valid, [False, False, False, False, False, False]
)
# t1: [X, s0, s1, X, X, X]
memory.add(**trans(1), terminal=False)
npt.assert_array_equal(
memory._is_index_valid, [False, False, False, False, False, False]
)
# t2: [X, s0, s1, s2, X, X]
# s0 finally becomes valid as its next state was added
memory.add(**trans(2), terminal=False)
npt.assert_array_equal(
memory._is_index_valid, [False, True, False, False, False, False]
)
batch = memory.sample_all_valid_transitions()
npt.assert_array_equal(batch.action, [[0, 0]])
npt.assert_array_equal(batch.next_action[0], [[0, 2], [2, 4]])
# t3: [X, s0, s1, s2, s3, X]
# episode termination validates whole episode
memory.add(**trans(3), terminal=True)
npt.assert_array_equal(
memory._is_index_valid, [False, True, True, True, True, False]
)
batch = memory.sample_all_valid_transitions()
npt.assert_array_equal(batch.action, [[0, 0], [0, 2], [2, 4], [4, 6]])
npt.assert_array_equal(batch.next_action[0], [[0, 2], [2, 4]])
npt.assert_array_equal(batch.next_action[1], [[2, 4], [4, 6]])
# batch.next_action[2][1] is garbage
npt.assert_array_equal(batch.next_action[2][0], [4, 6])
# batch.next_action[3] is [garbage]
# t4: [s4, s0, s1, s2, s3, X]
# s0 invalidated as its previous frame is corrupted
memory.add(**trans(4), terminal=False)
npt.assert_array_equal(
memory._is_index_valid, [False, False, True, True, True, False]
)
batch = memory.sample_all_valid_transitions()
npt.assert_array_equal(batch.action, [[0, 2], [2, 4], [4, 6]])
npt.assert_array_equal(batch.next_action[0], [[2, 4], [4, 6]])
npt.assert_array_equal(batch.next_action[1][0], [4, 6])
# t5: [s4, s5, s1, s2, s3, X]
memory.add(**trans(5), terminal=False)
npt.assert_array_equal(
memory._is_index_valid, [False, False, False, True, True, False]
)
batch = memory.sample_all_valid_transitions()
npt.assert_array_equal(batch.action, [[2, 4], [4, 6]])
npt.assert_array_equal(batch.next_action[0][0], [4, 6])
# t6: [s4, s5, s6, s2, s3, X]
memory.add(**trans(6), terminal=True)
npt.assert_array_equal(
memory._is_index_valid, [True, True, True, False, True, False]
)
batch = memory.sample_all_valid_transitions()
npt.assert_array_equal(batch.action, [[0, 8], [8, 10], [10, 12], [4, 6]])
npt.assert_array_equal(batch.next_action[0], [[8, 10], [10, 12]])
npt.assert_array_equal(batch.next_action[1][0], [10, 12])
# batch.next_action[2] is [garbage]
# batch.next_action[3] is [garbage]
logger.info("Overflow test passes!")
def test_sparse_input(self):
replay_capacity = 100
num_transitions = replay_capacity // 2
memory = ReplayBuffer(
stack_size=1, replay_capacity=replay_capacity, update_horizon=1
)
def trans(i):
sparse_feat1 = list(range(0, i % 4))
sparse_feat2 = list(range(i % 4, 4))
id_list = {"sparse_feat1": sparse_feat1, "sparse_feat2": sparse_feat2}
sparse_feat3 = (list(range(0, i % 7)), [k + 0.5 for k in range(0, i % 7)])
sparse_feat4 = (list(range(i % 7, 7)), [k + 0.5 for k in range(i % 7, 7)])
id_score_list = {"sparse_feat3": sparse_feat3, "sparse_feat4": sparse_feat4}
return {
"observation": np.ones(OBS_SHAPE, dtype=OBS_TYPE),
"action": int(2 * i),
"reward": float(3 * i),
"terminal": i % 4,
"id_list": id_list,
"id_score_list": id_score_list,
}
for i in range(num_transitions):
memory.add(**trans(i))
indices = list(range(num_transitions - 1))
batch = memory.sample_transition_batch(len(indices), torch.tensor(indices))
# calculate expected
res = {
"id_list": {"sparse_feat1": ([], []), "sparse_feat2": ([], [])},
"id_score_list": {
"sparse_feat3": ([], [], []),
"sparse_feat4": ([], [], []),
},
"next_id_list": {"sparse_feat1": ([], []), "sparse_feat2": ([], [])},
"next_id_score_list": {
"sparse_feat3": ([], [], []),
"sparse_feat4": ([], [], []),
},
}
for i in range(num_transitions - 1):
feats_i = trans(i)
feats_next = trans(i + 1)
for k in ["id_list", "id_score_list"]:
for feat_id in res[k]:
res[k][feat_id][0].append(len(res[k][feat_id][1]))
if k == "id_list":
res[k][feat_id][1].extend(feats_i[k][feat_id])
else:
res[k][feat_id][1].extend(feats_i[k][feat_id][0])
res[k][feat_id][2].extend(feats_i[k][feat_id][1])
for k in ["next_id_list", "next_id_score_list"]:
for feat_id in res[k]:
res[k][feat_id][0].append(len(res[k][feat_id][1]))
orig_k = k[len("next_") :]
if k == "next_id_list":
res[k][feat_id][1].extend(feats_next[orig_k][feat_id])
else:
res[k][feat_id][1].extend(feats_next[orig_k][feat_id][0])
res[k][feat_id][2].extend(feats_next[orig_k][feat_id][1])
for k in ["id_list", "id_score_list", "next_id_list", "next_id_score_list"]:
for feat_id in res[k]:
if k in ["id_list", "next_id_list"]:
npt.assert_array_equal(
res[k][feat_id][0], getattr(batch, k)[feat_id][0]
)
npt.assert_array_equal(
res[k][feat_id][1], getattr(batch, k)[feat_id][1]
)
else:
npt.assert_array_equal(
res[k][feat_id][0], getattr(batch, k)[feat_id][0]
)
npt.assert_array_equal(
res[k][feat_id][1], getattr(batch, k)[feat_id][1]
)
npt.assert_array_equal(
res[k][feat_id][2], getattr(batch, k)[feat_id][2]
)
# sample random
_ = memory.sample_transition_batch(10)
| 16,943 | 36.653333 | 102 | py |
ReAgent | ReAgent-master/reagent/test/replay_memory/circular_replay_buffer_test.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for circular_replay_buffer.py."""
import gzip
import os
import tempfile
import unittest
import numpy as np
import numpy.testing as npt
import torch
from reagent.replay_memory import circular_replay_buffer
# Default parameters used when creating the replay memory.
OBSERVATION_SHAPE = (84, 84)
OBS_DTYPE = np.uint8
STACK_SIZE = 4
BATCH_SIZE = 32
class CheckpointableClass(object):
def __init__(self):
self.attribute = 0
class ReplayBufferTest(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
self._test_subdir = self.tmp_dir.name
num_dims = 10
self._test_observation = np.ones(num_dims) * 1
self._test_action = np.ones(num_dims) * 2
self._test_reward = np.ones(num_dims) * 3
self._test_terminal = np.ones(num_dims) * 4
self._test_add_count = np.array(7)
def tearDown(self):
self.tmp_dir.cleanup()
def testConstructor(self):
memory = circular_replay_buffer.ReplayBuffer(
stack_size=STACK_SIZE, replay_capacity=5, batch_size=BATCH_SIZE
)
self.assertEqual(memory.add_count, 0)
def testAdd(self):
memory = circular_replay_buffer.ReplayBuffer(
stack_size=STACK_SIZE, replay_capacity=5, batch_size=BATCH_SIZE
)
self.assertEqual(memory.cursor(), 0)
zeros = np.zeros(OBSERVATION_SHAPE)
memory.add(observation=zeros, action=0, reward=0, terminal=0)
# Check if the cursor moved STACK_SIZE -1 padding adds + 1, (the one above).
self.assertEqual(memory.cursor(), STACK_SIZE)
def testExtraAdd(self):
memory = circular_replay_buffer.ReplayBuffer(
stack_size=STACK_SIZE, replay_capacity=5, batch_size=BATCH_SIZE
)
self.assertEqual(memory.cursor(), 0)
zeros = np.zeros(OBSERVATION_SHAPE)
memory.add(
observation=zeros, action=0, reward=0, terminal=0, extra1=0, extra2=[0, 0]
)
with self.assertRaisesRegex(ValueError, "Add expects"):
memory.add(observation=zeros, action=0, reward=0, terminal=0)
# Check if the cursor moved STACK_SIZE -1 zeros adds + 1, (the one above).
self.assertEqual(memory.cursor(), STACK_SIZE)
def testLowCapacity(self):
with self.assertRaisesRegex(ValueError, "There is not enough capacity"):
circular_replay_buffer.ReplayBuffer(
stack_size=10,
replay_capacity=10,
batch_size=BATCH_SIZE,
update_horizon=1,
gamma=1.0,
)
with self.assertRaisesRegex(ValueError, "There is not enough capacity"):
circular_replay_buffer.ReplayBuffer(
stack_size=5,
replay_capacity=10,
batch_size=BATCH_SIZE,
update_horizon=10,
gamma=1.0,
)
# We should be able to create a buffer that contains just enough for a
# transition.
circular_replay_buffer.ReplayBuffer(
stack_size=5,
replay_capacity=10,
batch_size=BATCH_SIZE,
update_horizon=5,
gamma=1.0,
)
def testNSteprewardum(self):
memory = circular_replay_buffer.ReplayBuffer(
stack_size=STACK_SIZE,
replay_capacity=10,
batch_size=BATCH_SIZE,
update_horizon=5,
gamma=1.0,
)
for i in range(50):
memory.add(
observation=np.full(OBSERVATION_SHAPE, i, dtype=OBS_DTYPE),
action=0,
reward=2.0,
terminal=0,
)
for _i in range(100):
batch = memory.sample_transition_batch()
# Make sure the total reward is reward per step x update_horizon.
self.assertEqual(batch[2][0], 10.0)
def testSampleTransitionBatch(self):
replay_capacity = 10
memory = circular_replay_buffer.ReplayBuffer(
stack_size=1, replay_capacity=replay_capacity, batch_size=2
)
num_adds = 50 # The number of transitions to add to the memory.
for i in range(num_adds):
memory.add(
observation=np.full(OBSERVATION_SHAPE, i, OBS_DTYPE),
action=0,
reward=0,
terminal=i % 4,
) # Every 4 transitions is terminal.
# Test sampling with default batch size.
for _i in range(1000):
batch = memory.sample_transition_batch()
self.assertEqual(batch[0].shape[0], 2)
# Test changing batch sizes.
for _i in range(1000):
batch = memory.sample_transition_batch(BATCH_SIZE)
self.assertEqual(batch[0].shape[0], BATCH_SIZE)
# Verify we revert to default batch size.
for _i in range(1000):
batch = memory.sample_transition_batch()
self.assertEqual(batch[0].shape[0], 2)
# Verify we can specify what indices to sample.
indices = [1, 2, 3, 5, 8]
expected_states = np.array(
[np.full(OBSERVATION_SHAPE, i, dtype=OBS_DTYPE) for i in indices]
)
expected_next_states = (expected_states + 1) % replay_capacity
# Because the replay buffer is circular, we can exactly compute what the
# states will be at the specified indices by doing a little mod math:
expected_states += num_adds - replay_capacity
expected_next_states += num_adds - replay_capacity
# This is replicating the formula that was used above to determine what
# transitions are terminal when adding observation (i % 4).
expected_terminal = np.expand_dims(
np.array([min((x + num_adds - replay_capacity) % 4, 1) for x in indices]), 1
).astype(bool)
batch = memory.sample_transition_batch(
batch_size=len(indices), indices=torch.tensor(indices)
)
npt.assert_array_equal(batch.state, expected_states)
npt.assert_array_equal(batch.action, np.zeros((len(indices), 1)))
npt.assert_array_equal(batch.reward, np.zeros((len(indices), 1)))
npt.assert_array_equal(batch.next_action, np.zeros((len(indices), 1)))
npt.assert_array_equal(batch.next_reward, np.zeros((len(indices), 1)))
npt.assert_array_equal(batch.next_state, expected_next_states)
npt.assert_array_equal(batch.terminal, expected_terminal)
npt.assert_array_equal(batch.indices, np.expand_dims(np.array(indices), 1))
def testSampleTransitionBatchExtra(self):
replay_capacity = 10
memory = circular_replay_buffer.ReplayBuffer(
stack_size=1, replay_capacity=replay_capacity, batch_size=2
)
num_adds = 50 # The number of transitions to add to the memory.
for i in range(num_adds):
memory.add(
observation=np.full(OBSERVATION_SHAPE, i, dtype=OBS_DTYPE),
action=0,
reward=0,
terminal=i % 4,
extra1=i % 2,
extra2=[i % 2, 0],
) # Every 4 transitions is terminal.
# Test sampling with default batch size.
for _i in range(1000):
batch = memory.sample_transition_batch()
self.assertEqual(batch[0].shape[0], 2)
# Test changing batch sizes.
for _i in range(1000):
batch = memory.sample_transition_batch(BATCH_SIZE)
self.assertEqual(batch[0].shape[0], BATCH_SIZE)
# Verify we revert to default batch size.
for _i in range(1000):
batch = memory.sample_transition_batch()
self.assertEqual(batch[0].shape[0], 2)
# Verify we can specify what indices to sample.
indices = [1, 2, 3, 5, 8]
expected_states = np.array(
[np.full(OBSERVATION_SHAPE, i, dtype=OBS_DTYPE) for i in indices]
)
expected_next_states = (expected_states + 1) % replay_capacity
# Because the replay buffer is circular, we can exactly compute what the
# states will be at the specified indices by doing a little mod math:
expected_states += num_adds - replay_capacity
expected_next_states += num_adds - replay_capacity
# This is replicating the formula that was used above to determine what
# transitions are terminal when adding observation (i % 4).
expected_terminal = np.expand_dims(
np.array([min((x + num_adds - replay_capacity) % 4, 1) for x in indices]), 1
).astype(bool)
expected_extra1 = np.expand_dims(
np.array([(x + num_adds - replay_capacity) % 2 for x in indices]), 1
)
expected_next_extra1 = np.expand_dims(
np.array([(x + 1 + num_adds - replay_capacity) % 2 for x in indices]), 1
)
expected_extra2 = np.stack(
[
[(x + num_adds - replay_capacity) % 2 for x in indices],
np.zeros((len(indices),)),
],
axis=1,
)
expected_next_extra2 = np.stack(
[
[(x + 1 + num_adds - replay_capacity) % 2 for x in indices],
np.zeros((len(indices),)),
],
axis=1,
)
batch = memory.sample_transition_batch(
batch_size=len(indices), indices=torch.tensor(indices)
)
npt.assert_array_equal(batch.state, expected_states)
npt.assert_array_equal(batch.action, np.zeros((len(indices), 1)))
npt.assert_array_equal(batch.reward, np.zeros((len(indices), 1)))
npt.assert_array_equal(batch.next_action, np.zeros((len(indices), 1)))
npt.assert_array_equal(batch.next_reward, np.zeros((len(indices), 1)))
npt.assert_array_equal(batch.next_state, expected_next_states)
npt.assert_array_equal(batch.terminal, expected_terminal)
npt.assert_array_equal(batch.indices, np.expand_dims(np.array(indices), 1))
npt.assert_array_equal(batch.extra1, expected_extra1)
npt.assert_array_equal(batch.next_extra1, expected_next_extra1)
npt.assert_array_equal(batch.extra2, expected_extra2)
npt.assert_array_equal(batch.next_extra2, expected_next_extra2)
def testSamplingWithterminalInTrajectory(self):
replay_capacity = 10
update_horizon = 3
memory = circular_replay_buffer.ReplayBuffer(
stack_size=1,
replay_capacity=replay_capacity,
batch_size=2,
update_horizon=update_horizon,
gamma=1.0,
)
for i in range(replay_capacity):
memory.add(
observation=np.full(OBSERVATION_SHAPE, i, dtype=OBS_DTYPE),
action=i * 2,
reward=i,
terminal=1 if i == 3 else 0,
)
indices = [2, 3, 4]
batch = memory.sample_transition_batch(
batch_size=len(indices), indices=torch.tensor(indices)
)
# In commone shape, state is 2-D unless stack_size > 1.
expected_states = np.array(
[np.full(OBSERVATION_SHAPE, i, dtype=OBS_DTYPE) for i in indices]
)
# The reward in the replay buffer will be (an asterisk marks the terminal
# state):
# [0 1 2 3* 4 5 6 7 8 9]
# Since we're setting the update_horizon to 3, the accumulated trajectory
# reward starting at each of the replay buffer positions will be:
# [3 6 5 3 15 18 21 24]
# Since indices = [2, 3, 4], our expected reward are [5, 3, 15].
expected_reward = np.array([[5], [3], [15]])
# Because update_horizon = 3, both indices 2 and 3 include terminal.
expected_terminal = np.array([[1], [1], [0]]).astype(bool)
npt.assert_array_equal(batch.state, expected_states)
npt.assert_array_equal(
batch.action, np.expand_dims(np.array(indices) * 2, axis=1)
)
npt.assert_array_equal(batch.reward, expected_reward)
npt.assert_array_equal(batch.terminal, expected_terminal)
npt.assert_array_equal(batch.indices, np.expand_dims(np.array(indices), 1))
def testIsTransitionValid(self):
memory = circular_replay_buffer.ReplayBuffer(
stack_size=STACK_SIZE, replay_capacity=10, batch_size=2
)
memory.add(
observation=np.full(OBSERVATION_SHAPE, 0, dtype=OBS_DTYPE),
action=0,
reward=0,
terminal=0,
)
memory.add(
observation=np.full(OBSERVATION_SHAPE, 0, dtype=OBS_DTYPE),
action=0,
reward=0,
terminal=0,
)
memory.add(
observation=np.full(OBSERVATION_SHAPE, 0, dtype=OBS_DTYPE),
action=0,
reward=0,
terminal=1,
)
# These valids account for the automatically applied padding (3 blanks each
# episode.
# correct_valids = [0, 0, 0, 1, 1, 0, 0, 0, 0, 0]
# The above comment is for the original Dopamine buffer, which doesn't
# account for terminal frames within the update_horizon frames before
# the cursor. In this case, the frame right before the cursor
# is terminal, so even though it is within [c-update_horizon, c],
# it should still be valid for sampling, as next state doesn't matter.
correct_valids = [0, 0, 0, 1, 1, 1, 0, 0, 0, 0]
# The cursor is: ^\
for i in range(10):
self.assertEqual(
correct_valids[i],
memory.is_valid_transition(i),
"Index %i should be %s" % (i, bool(correct_valids[i])),
)
"""
Since we don't use saving, not maintaining for now
def testSave(self):
memory = circular_replay_buffer.ReplayBuffer(
stack_size=STACK_SIZE, replay_capacity=5, batch_size=BATCH_SIZE
)
memory.observation = self._test_observation
memory.action = self._test_action
memory.reward = self._test_reward
memory.terminal = self._test_terminal
current_iteration = 5
stale_iteration = current_iteration - circular_replay_buffer.CHECKPOINT_DURATION
memory.save(self._test_subdir, stale_iteration)
for attr in memory.__dict__:
if attr.startswith("_"):
continue
stale_filename = os.path.join(
self._test_subdir, "{}_ckpt.{}.gz".format(attr, stale_iteration)
)
self.assertTrue(os.path.exists(stale_filename))
memory.save(self._test_subdir, current_iteration)
for attr in memory.__dict__:
if attr.startswith("_"):
continue
filename = os.path.join(
self._test_subdir, "{}_ckpt.{}.gz".format(attr, current_iteration)
)
self.assertTrue(os.path.exists(filename))
# The stale version file should have been deleted.
self.assertFalse(os.path.exists(stale_filename))
def testSaveNonNDArrayAttributes(self):
# Tests checkpointing an attribute which is not a numpy array.
memory = circular_replay_buffer.ReplayBuffer(
stack_size=STACK_SIZE, replay_capacity=5, batch_size=BATCH_SIZE
)
# Add some non-numpy data: an int, a string, an object.
memory.dummy_attribute_1 = 4753849
memory.dummy_attribute_2 = "String data"
memory.dummy_attribute_3 = CheckpointableClass()
current_iteration = 5
stale_iteration = current_iteration - circular_replay_buffer.CHECKPOINT_DURATION
memory.save(self._test_subdir, stale_iteration)
for attr in memory.__dict__:
if attr.startswith("_"):
continue
stale_filename = os.path.join(
self._test_subdir, "{}_ckpt.{}.gz".format(attr, stale_iteration)
)
self.assertTrue(os.path.exists(stale_filename))
memory.save(self._test_subdir, current_iteration)
for attr in memory.__dict__:
if attr.startswith("_"):
continue
filename = os.path.join(
self._test_subdir, "{}_ckpt.{}.gz".format(attr, current_iteration)
)
self.assertTrue(os.path.exists(filename))
# The stale version file should have been deleted.
self.assertFalse(os.path.exists(stale_filename))
def testLoadFromNonexistentDirectory(self):
memory = circular_replay_buffer.ReplayBuffer(
stack_size=STACK_SIZE, replay_capacity=5, batch_size=BATCH_SIZE
)
# We are trying to load from a non-existent directory, so a NotFoundError
# will be raised.
with self.assertRaises(FileNotFoundError):
memory.load("/does/not/exist", "3")
self.assertNotEqual(memory._store["observation"], self._test_observation)
self.assertNotEqual(memory._store["action"], self._test_action)
self.assertNotEqual(memory._store["reward"], self._test_reward)
self.assertNotEqual(memory._store["terminal"], self._test_terminal)
self.assertNotEqual(memory.add_count, self._test_add_count)
def testPartialLoadFails(self):
memory = circular_replay_buffer.ReplayBuffer(
stack_size=STACK_SIZE, replay_capacity=5, batch_size=BATCH_SIZE
)
self.assertNotEqual(memory._store["observation"], self._test_observation)
self.assertNotEqual(memory._store["action"], self._test_action)
self.assertNotEqual(memory._store["reward"], self._test_reward)
self.assertNotEqual(memory._store["terminal"], self._test_terminal)
self.assertNotEqual(memory.add_count, self._test_add_count)
numpy_arrays = {
"observation": self._test_observation,
"action": self._test_action,
"terminal": self._test_terminal,
"add_count": self._test_add_count,
}
for attr in numpy_arrays:
filename = os.path.join(self._test_subdir, "{}_ckpt.3.gz".format(attr))
with open(filename, "wb") as f:
with gzip.GzipFile(fileobj=f) as outfile:
np.save(outfile, numpy_arrays[attr], allow_pickle=False)
# We are are missing the reward file, so a NotFoundError will be raised.
with self.assertRaises(FileNotFoundError):
memory.load(self._test_subdir, "3")
# Since we are missing the reward file, it should not have loaded any of
# the other files.
self.assertNotEqual(memory._store["observation"], self._test_observation)
self.assertNotEqual(memory._store["action"], self._test_action)
self.assertNotEqual(memory._store["reward"], self._test_reward)
self.assertNotEqual(memory._store["terminal"], self._test_terminal)
self.assertNotEqual(memory.add_count, self._test_add_count)
def testLoad(self):
memory = circular_replay_buffer.ReplayBuffer(
observation_shape=OBSERVATION_SHAPE,
stack_size=STACK_SIZE,
replay_capacity=5,
batch_size=BATCH_SIZE,
)
self.assertNotEqual(memory._store["observation"], self._test_observation)
self.assertNotEqual(memory._store["action"], self._test_action)
self.assertNotEqual(memory._store["reward"], self._test_reward)
self.assertNotEqual(memory._store["terminal"], self._test_terminal)
self.assertNotEqual(memory.add_count, self._test_add_count)
store_prefix = "$store$_"
numpy_arrays = {
store_prefix + "observation": self._test_observation,
store_prefix + "action": self._test_action,
store_prefix + "reward": self._test_reward,
store_prefix + "terminal": self._test_terminal,
"add_count": self._test_add_count,
}
for attr in numpy_arrays:
filename = os.path.join(self._test_subdir, "{}_ckpt.3.gz".format(attr))
with open(filename, "wb") as f:
with gzip.GzipFile(fileobj=f) as outfile:
np.save(outfile, numpy_arrays[attr], allow_pickle=False)
memory.load(self._test_subdir, "3")
npt.assert_allclose(memory._store["observation"], self._test_observation)
npt.assert_allclose(memory._store["action"], self._test_action)
npt.assert_allclose(memory._store["reward"], self._test_reward)
npt.assert_allclose(memory._store["terminal"], self._test_terminal)
self.assertEqual(memory.add_count, self._test_add_count)
"""
| 21,389 | 42.038229 | 88 | py |
ReAgent | ReAgent-master/reagent/test/ranking/test_seq2slate_inference.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import random
import unittest
import numpy as np
import torch
import torch
from reagent.core.parameters import (
NormalizationData,
NormalizationParameters,
)
from reagent.model_utils.seq2slate_utils import (
Seq2SlateOutputArch,
)
from reagent.models.seq2slate import Seq2SlateTransformerModel, Seq2SlateTransformerNet
from reagent.prediction.predictor_wrapper import Seq2SlateWithPreprocessor
from reagent.preprocessing.identify_types import DO_NOT_PREPROCESS
from reagent.preprocessing.preprocessor import Preprocessor
logger = logging.getLogger(__name__)
class TestSeq2SlateInference(unittest.TestCase):
def setUp(self):
np.random.seed(0)
random.seed(0)
torch.manual_seed(0)
def test_seq2slate_scriptable(self):
state_dim = 2
candidate_dim = 3
num_stacked_layers = 2
num_heads = 2
dim_model = 128
dim_feedforward = 128
candidate_size = 8
slate_size = 8
output_arch = Seq2SlateOutputArch.AUTOREGRESSIVE
temperature = 1.0
greedy_serving = True
# test the raw Seq2Slate model is script-able
seq2slate = Seq2SlateTransformerModel(
state_dim=state_dim,
candidate_dim=candidate_dim,
num_stacked_layers=num_stacked_layers,
num_heads=num_heads,
dim_model=dim_model,
dim_feedforward=dim_feedforward,
max_src_seq_len=candidate_size,
max_tgt_seq_len=slate_size,
output_arch=output_arch,
temperature=temperature,
)
seq2slate_scripted = torch.jit.script(seq2slate)
seq2slate_net = Seq2SlateTransformerNet(
state_dim=state_dim,
candidate_dim=candidate_dim,
num_stacked_layers=num_stacked_layers,
num_heads=num_heads,
dim_model=dim_model,
dim_feedforward=dim_feedforward,
max_src_seq_len=candidate_size,
max_tgt_seq_len=slate_size,
output_arch=output_arch,
temperature=temperature,
)
state_normalization_data = NormalizationData(
dense_normalization_parameters={
0: NormalizationParameters(feature_type=DO_NOT_PREPROCESS),
1: NormalizationParameters(feature_type=DO_NOT_PREPROCESS),
}
)
candidate_normalization_data = NormalizationData(
dense_normalization_parameters={
5: NormalizationParameters(feature_type=DO_NOT_PREPROCESS),
6: NormalizationParameters(feature_type=DO_NOT_PREPROCESS),
7: NormalizationParameters(feature_type=DO_NOT_PREPROCESS),
}
)
state_preprocessor = Preprocessor(
state_normalization_data.dense_normalization_parameters, False
)
candidate_preprocessor = Preprocessor(
candidate_normalization_data.dense_normalization_parameters, False
)
# test seq2slate with preprocessor is scriptable
seq2slate_with_preprocessor = Seq2SlateWithPreprocessor(
seq2slate_net.eval(),
state_preprocessor,
candidate_preprocessor,
greedy_serving,
)
torch.jit.script(seq2slate_with_preprocessor)
| 3,424 | 32.578431 | 87 | py |
ReAgent | ReAgent-master/reagent/test/ranking/seq2slate_utils.py | import logging
import math
import tempfile
from itertools import permutations
import pytorch_lightning as pl
import reagent.core.types as rlt
import torch
import torch.nn as nn
from reagent.core.parameters import Seq2SlateParameters
from reagent.core.parameters_seq2slate import LearningMethod, SimulationParameters
from reagent.core.torch_utils import gather
from reagent.model_utils.seq2slate_utils import Seq2SlateOutputArch
from reagent.models.seq2slate import Seq2SlateMode, Seq2SlateTransformerNet
from reagent.optimizer.union import Optimizer__Union
from reagent.training.ranking.seq2slate_sim_trainer import Seq2SlateSimulationTrainer
from reagent.training.ranking.seq2slate_trainer import Seq2SlateTrainer
from torch.utils.data import DataLoader
logger = logging.getLogger(__name__)
MODEL_TRANSFORMER = "transformer"
ON_POLICY = "on_policy"
OFF_POLICY = "off_policy"
SIMULATION = "simulation"
class TSPRewardModel(nn.Module):
def forward(self, state, candidates, ranked_cities, src_src_mask, tgt_out_idx):
reward = compute_reward(ranked_cities)
# negate because we want to minimize
return -reward
def post_preprocess_batch(seq2slate_net, candidate_num, batch, device, epoch):
model_propensity, model_action, reward = rank_on_policy_and_eval(
seq2slate_net, batch, candidate_num, greedy=False
)
batch = rlt.PreprocessedRankingInput.from_input(
state=batch.state.float_features,
candidates=batch.src_seq.float_features,
device=device,
action=model_action,
logged_propensities=model_propensity,
# negate because we want to minimize
slate_reward=-reward,
)
logger.info(f"Epoch {epoch} mean on_policy reward: {torch.mean(reward)}")
logger.info(f"Epoch {epoch} mean model_propensity: {torch.mean(model_propensity)}")
return batch
class Seq2SlateOnPolicyTrainer(Seq2SlateTrainer):
def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
new_batch = post_preprocess_batch(
self.seq2slate_net,
self.seq2slate_net.max_src_seq_len,
batch,
batch.state.float_features.device,
self.current_epoch,
)
for attr in dir(new_batch):
if not callable(getattr(new_batch, attr)) and not attr.startswith("__"):
setattr(batch, attr, getattr(new_batch, attr))
super().on_train_batch_start(batch, batch_idx, dataloader_idx)
def create_trainer(
seq2slate_net,
learning_method,
batch_size,
learning_rate,
policy_gradient_interval,
device,
):
if learning_method == ON_POLICY:
seq2slate_params = Seq2SlateParameters(
on_policy=True, learning_method=LearningMethod.REINFORCEMENT_LEARNING
)
trainer_cls = Seq2SlateOnPolicyTrainer
elif learning_method == OFF_POLICY:
seq2slate_params = Seq2SlateParameters(
on_policy=False,
learning_method=LearningMethod.REINFORCEMENT_LEARNING,
)
trainer_cls = Seq2SlateTrainer
elif learning_method == SIMULATION:
temp_reward_model_path = tempfile.mkstemp(suffix=".pt")[1]
reward_model = torch.jit.script(TSPRewardModel())
torch.jit.save(reward_model, temp_reward_model_path)
seq2slate_params = Seq2SlateParameters(
on_policy=True,
learning_method=LearningMethod.SIMULATION,
simulation=SimulationParameters(
reward_name_weight={"tour_length": 1.0},
reward_name_power={"tour_length": 1.0},
reward_name_path={"tour_length": temp_reward_model_path},
),
)
trainer_cls = Seq2SlateSimulationTrainer
param_dict = {
"seq2slate_net": seq2slate_net,
"params": seq2slate_params,
"policy_optimizer": Optimizer__Union.default(lr=learning_rate),
"print_interval": 1,
"policy_gradient_interval": policy_gradient_interval,
}
return trainer_cls(**param_dict)
def create_seq2slate_net(
model_str,
candidate_num,
candidate_dim,
hidden_size,
output_arch,
temperature,
device,
):
if model_str == MODEL_TRANSFORMER:
return Seq2SlateTransformerNet(
state_dim=1,
candidate_dim=candidate_dim,
num_stacked_layers=2,
num_heads=2,
dim_model=hidden_size,
dim_feedforward=hidden_size,
max_src_seq_len=candidate_num,
max_tgt_seq_len=candidate_num,
output_arch=output_arch,
temperature=temperature,
state_embed_dim=1,
).to(device)
else:
raise NotImplementedError(f"unknown model type {model_str}")
FIX_CANDIDATES = None
@torch.no_grad()
def create_batch(
batch_size,
candidate_num,
candidate_dim,
device,
learning_method,
diverse_input=False,
):
# fake state, we only use candidates
state = torch.zeros(batch_size, 1)
if diverse_input:
# city coordinates are spread in [0, 4]
candidates = torch.randint(
5, (batch_size, candidate_num, candidate_dim)
).float()
else:
# every training data has the same nodes as the input cities
global FIX_CANDIDATES
if FIX_CANDIDATES is None or FIX_CANDIDATES.shape != (
batch_size,
candidate_num,
candidate_dim,
):
candidates = torch.randint(
5, (batch_size, candidate_num, candidate_dim)
).float()
candidates[1:] = candidates[0]
FIX_CANDIDATES = candidates
else:
candidates = FIX_CANDIDATES
batch_dict = {
"state": state,
"candidates": candidates,
"device": device,
}
if learning_method == OFF_POLICY:
# using data from a uniform sampling policy
action = torch.stack([torch.randperm(candidate_num) for _ in range(batch_size)])
propensity = torch.full((batch_size, 1), 1.0 / math.factorial(candidate_num))
ranked_cities = gather(candidates, action)
reward = compute_reward(ranked_cities)
batch_dict["action"] = action
batch_dict["logged_propensities"] = propensity
batch_dict["slate_reward"] = -reward
batch = rlt.PreprocessedRankingInput.from_input(**batch_dict)
logger.info("Generate one batch")
return batch
def create_train_and_test_batches(
batch_size,
candidate_num,
candidate_dim,
device,
num_train_batches,
learning_method,
diverse_input,
):
train_batches = [
create_batch(
batch_size,
candidate_num,
candidate_dim,
device,
learning_method,
diverse_input=diverse_input,
)
for _ in range(num_train_batches)
]
if diverse_input:
test_batch = create_batch(
batch_size,
candidate_num,
candidate_dim,
device,
learning_method,
diverse_input=diverse_input,
)
else:
test_batch = train_batches[0]
return train_batches, test_batch
def compute_reward(ranked_cities):
assert len(ranked_cities.shape) == 3
ranked_cities_offset = torch.roll(ranked_cities, shifts=1, dims=1)
return (
torch.sqrt(((ranked_cities_offset - ranked_cities) ** 2).sum(-1))
.sum(-1)
.unsqueeze(1)
)
def compute_best_reward(input_cities):
batch_size, candidate_num, _ = input_cities.shape
all_perm = torch.tensor(
list(permutations(torch.arange(candidate_num), candidate_num))
)
res = [
compute_reward(gather(input_cities, perm.repeat(batch_size, 1)))
for perm in all_perm
]
# res shape: batch_size, num_perm
res = torch.cat(res, dim=1)
best_possible_reward = torch.min(res, dim=1).values
best_possible_reward_mean = torch.mean(best_possible_reward)
return best_possible_reward_mean
# pyre-ignore
@torch.no_grad()
def rank_on_policy(
model, batch: rlt.PreprocessedRankingInput, tgt_seq_len: int, greedy: bool
):
model.eval()
rank_output = model(
batch, mode=Seq2SlateMode.RANK_MODE, tgt_seq_len=tgt_seq_len, greedy=greedy
)
ranked_slate_prob = rank_output.ranked_per_seq_probs
ranked_order = rank_output.ranked_tgt_out_idx - 2
model.train()
return ranked_slate_prob, ranked_order
# pyre-ignore
@torch.no_grad()
def rank_on_policy_and_eval(
seq2slate_net, batch: rlt.PreprocessedRankingInput, tgt_seq_len: int, greedy: bool
):
model_propensity, model_action = rank_on_policy(
seq2slate_net, batch, tgt_seq_len, greedy=greedy
)
ranked_cities = gather(batch.src_seq.float_features, model_action)
reward = compute_reward(ranked_cities)
return model_propensity, model_action, reward
def run_seq2slate_tsp(
model_str,
batch_size,
epochs,
candidate_num,
num_batches,
hidden_size,
diverse_input,
learning_rate,
expect_reward_threshold,
learning_method,
policy_gradient_interval,
device,
):
pl.seed_everything(0)
candidate_dim = 2
eval_sample_size = 1
train_batches, test_batch = create_train_and_test_batches(
batch_size,
candidate_num,
candidate_dim,
device,
num_batches,
learning_method,
diverse_input,
)
best_test_possible_reward = compute_best_reward(test_batch.src_seq.float_features)
seq2slate_net = create_seq2slate_net(
model_str,
candidate_num,
candidate_dim,
hidden_size,
Seq2SlateOutputArch.AUTOREGRESSIVE,
1.0,
device,
)
trainer = create_trainer(
seq2slate_net,
learning_method,
batch_size,
learning_rate,
policy_gradient_interval,
device,
)
def evaluate():
best_test_reward = torch.full((batch_size,), 1e9).to(device)
for _ in range(eval_sample_size):
model_propensities, _, reward = rank_on_policy_and_eval(
seq2slate_net.to(device), test_batch, candidate_num, greedy=True
)
best_test_reward = torch.where(
reward < best_test_reward, reward, best_test_reward
)
logger.info(
f"Test mean model_propensities {torch.mean(model_propensities)}, "
f"Test mean reward: {torch.mean(best_test_reward)}, "
f"best possible reward {best_test_possible_reward}"
)
if torch.any(torch.isnan(model_propensities)):
raise Exception("Model propensities contain NaNs")
ratio = torch.mean(best_test_reward) / best_test_possible_reward
return ratio < expect_reward_threshold, ratio
evaluate()
training_data = DataLoader(train_batches, collate_fn=lambda x: x[0])
pl_trainer = pl.Trainer(
max_epochs=epochs,
gpus=None if device == torch.device("cpu") else 1,
logger=False,
)
pl_trainer.fit(trainer, training_data)
result, ratio = evaluate()
assert result, (
f"Test failed because it did not reach expected test reward, "
f"{ratio} > {expect_reward_threshold}."
)
| 11,291 | 29.518919 | 88 | py |
ReAgent | ReAgent-master/reagent/test/ranking/test_seq2slate_on_policy.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import itertools
import logging
import random
import unittest
from collections import defaultdict
from itertools import permutations
import numpy as np
import pytest
import reagent.core.types as rlt
import torch
import torch.nn.functional as F
from parameterized import parameterized
from reagent.model_utils.seq2slate_utils import (
DECODER_START_SYMBOL,
Seq2SlateMode,
Seq2SlateOutputArch,
mask_logits_by_idx,
per_symbol_to_per_seq_log_probs,
per_symbol_to_per_seq_probs,
subsequent_mask,
pytorch_decoder_mask,
)
from reagent.test.ranking.seq2slate_utils import (
MODEL_TRANSFORMER,
ON_POLICY,
create_batch,
create_seq2slate_net,
rank_on_policy,
run_seq2slate_tsp,
)
logger = logging.getLogger(__name__)
output_arch_list = [
Seq2SlateOutputArch.FRECHET_SORT,
Seq2SlateOutputArch.AUTOREGRESSIVE,
]
temperature_list = [1.0, 2.0]
class TestSeq2SlateOnPolicy(unittest.TestCase):
def setUp(self):
np.random.seed(0)
random.seed(0)
torch.manual_seed(0)
def test_pytorch_decoder_mask(self):
batch_size = 3
src_seq_len = 4
num_heads = 2
memory = torch.randn(batch_size, src_seq_len, num_heads)
tgt_in_idx = torch.tensor([[1, 2, 3], [1, 4, 2], [1, 5, 4]]).long()
tgt_tgt_mask, tgt_src_mask = pytorch_decoder_mask(memory, tgt_in_idx, num_heads)
expected_tgt_tgt_mask = (
torch.tensor(
[
[False, True, True],
[False, False, True],
[False, False, False],
],
)
.unsqueeze(0)
.repeat(batch_size * num_heads, 1, 1)
)
expected_tgt_src_mask = torch.tensor(
[
[
[False, False, False, False],
[True, False, False, False],
[True, True, False, False],
],
[
[False, False, False, False],
[False, False, True, False],
[True, False, True, False],
],
[
[False, False, False, False],
[False, False, False, True],
[False, False, True, True],
],
]
).repeat_interleave(num_heads, dim=0)
assert torch.all(tgt_tgt_mask == expected_tgt_tgt_mask)
assert torch.all(tgt_src_mask == expected_tgt_src_mask)
def test_per_symbol_to_per_seq_log_probs(self):
"""
Test per_symbol_to_per_seq_log_probs method
"""
batch_size = 1
seq_len = 3
candidate_size = seq_len + 2
tgt_out_idx = torch.tensor([[0, 2, 1]]) + 2
per_symbol_log_probs = torch.randn(batch_size, seq_len, candidate_size)
per_symbol_log_probs[0, :, :2] = float("-inf")
per_symbol_log_probs[0, 1, 2] = float("-inf")
per_symbol_log_probs[0, 2, 2] = float("-inf")
per_symbol_log_probs[0, 2, 4] = float("-inf")
per_symbol_log_probs = F.log_softmax(per_symbol_log_probs, dim=2)
expect_per_seq_log_probs = (
per_symbol_log_probs[0, 0, 2]
+ per_symbol_log_probs[0, 1, 4]
+ per_symbol_log_probs[0, 2, 3]
)
computed_per_seq_log_probs = per_symbol_to_per_seq_log_probs(
per_symbol_log_probs, tgt_out_idx
)
np.testing.assert_allclose(
expect_per_seq_log_probs, computed_per_seq_log_probs, atol=0.001, rtol=0.0
)
def test_per_symbol_to_per_seq_probs(self):
batch_size = 1
seq_len = 3
candidate_size = seq_len + 2
tgt_out_idx = torch.tensor([[0, 2, 1]]) + 2
per_symbol_log_probs = torch.randn(batch_size, seq_len, candidate_size)
per_symbol_log_probs[0, :, :2] = float("-inf")
per_symbol_log_probs[0, 1, 2] = float("-inf")
per_symbol_log_probs[0, 2, 2] = float("-inf")
per_symbol_log_probs[0, 2, 4] = float("-inf")
per_symbol_log_probs = F.log_softmax(per_symbol_log_probs, dim=2)
per_symbol_probs = torch.exp(per_symbol_log_probs)
expect_per_seq_probs = (
per_symbol_probs[0, 0, 2]
* per_symbol_probs[0, 1, 4]
* per_symbol_probs[0, 2, 3]
)
computed_per_seq_probs = per_symbol_to_per_seq_probs(
per_symbol_probs, tgt_out_idx
)
np.testing.assert_allclose(
expect_per_seq_probs, computed_per_seq_probs, atol=0.001, rtol=0.0
)
def test_subsequent_mask(self):
expect_mask = torch.tensor([[1, 0, 0], [1, 1, 0], [1, 1, 1]])
mask = subsequent_mask(3, torch.device("cpu"))
assert torch.all(torch.eq(mask, expect_mask))
def test_mask_logits_by_idx(self):
logits = torch.tensor(
[
[
[1.0, 2.0, 3.0, 4.0, 5.0],
[2.0, 3.0, 4.0, 5.0, 6.0],
[3.0, 4.0, 5.0, 6.0, 7.0],
],
[
[5.0, 4.0, 3.0, 2.0, 1.0],
[6.0, 5.0, 4.0, 3.0, 2.0],
[7.0, 6.0, 5.0, 4.0, 3.0],
],
]
)
tgt_in_idx = torch.tensor(
[[DECODER_START_SYMBOL, 2, 3], [DECODER_START_SYMBOL, 4, 3]]
)
masked_logits = mask_logits_by_idx(logits, tgt_in_idx)
expected_logits = torch.tensor(
[
[
[float("-inf"), float("-inf"), 3.0, 4.0, 5.0],
[float("-inf"), float("-inf"), float("-inf"), 5.0, 6.0],
[float("-inf"), float("-inf"), float("-inf"), float("-inf"), 7.0],
],
[
[float("-inf"), float("-inf"), 3.0, 2.0, 1.0],
[float("-inf"), float("-inf"), 4.0, 3.0, float("-inf")],
[float("-inf"), float("-inf"), 5.0, float("-inf"), float("-inf")],
],
]
)
assert torch.all(torch.eq(masked_logits, expected_logits))
@parameterized.expand(itertools.product(output_arch_list, temperature_list))
@torch.no_grad()
def test_seq2slate_transformer_propensity_computation(
self, output_arch, temperature
):
"""
Test propensity computation of seq2slate net
"""
candidate_num = 4
candidate_dim = 2
hidden_size = 32
all_perm = torch.tensor(
list(permutations(torch.arange(candidate_num), candidate_num))
)
batch_size = len(all_perm)
device = torch.device("cpu")
seq2slate_net = create_seq2slate_net(
MODEL_TRANSFORMER,
candidate_num,
candidate_dim,
hidden_size,
output_arch,
temperature,
device,
)
batch = create_batch(
batch_size,
candidate_num,
candidate_dim,
device,
ON_POLICY,
diverse_input=False,
)
batch = rlt.PreprocessedRankingInput.from_input(
state=batch.state.float_features,
candidates=batch.src_seq.float_features,
device=device,
action=all_perm,
)
per_symbol_log_prob = seq2slate_net(
batch, mode=Seq2SlateMode.PER_SYMBOL_LOG_PROB_DIST_MODE
).log_probs
per_seq_log_prob = seq2slate_net(
batch, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE
).log_probs
per_seq_log_prob_computed = per_symbol_to_per_seq_log_probs(
per_symbol_log_prob, all_perm + 2
)
# probabilities of two modes should match
np.testing.assert_allclose(
per_seq_log_prob, per_seq_log_prob_computed, atol=0.00001
)
# probabilities of all possible permutations should sum up to 1
np.testing.assert_allclose(
torch.sum(torch.exp(per_seq_log_prob)), 1.0, atol=0.00001
)
@parameterized.expand(itertools.product(output_arch_list, temperature_list))
def test_seq2slate_transformer_onpolicy_basic_logic(self, output_arch, temperature):
"""
Test basic logic of seq2slate on policy sampling
"""
device = torch.device("cpu")
candidate_num = 4
candidate_dim = 2
batch_size = 4096
hidden_size = 32
seq2slate_net = create_seq2slate_net(
MODEL_TRANSFORMER,
candidate_num,
candidate_dim,
hidden_size,
output_arch,
temperature,
device,
)
batch = create_batch(
batch_size,
candidate_num,
candidate_dim,
device,
ON_POLICY,
diverse_input=False,
)
action_to_propensity_map = {}
action_count = defaultdict(int)
total_count = 0
for i in range(50):
model_propensity, model_action = rank_on_policy(
seq2slate_net, batch, candidate_num, greedy=False
)
for propensity, action in zip(model_propensity, model_action):
action_str = ",".join(map(str, action.numpy().tolist()))
# Same action always leads to same propensity
if action_to_propensity_map.get(action_str) is None:
action_to_propensity_map[action_str] = float(propensity)
else:
np.testing.assert_allclose(
action_to_propensity_map[action_str],
float(propensity),
atol=0.001,
rtol=0.0,
)
action_count[action_str] += 1
total_count += 1
logger.info(f"Finish {i} round, {total_count} data counts")
# Check action distribution
for action_str, count in action_count.items():
empirical_propensity = count / total_count
computed_propensity = action_to_propensity_map[action_str]
logger.info(
f"action={action_str}, empirical propensity={empirical_propensity}, "
f"computed propensity={computed_propensity}"
)
np.testing.assert_allclose(
computed_propensity, empirical_propensity, atol=0.01, rtol=0.0
)
def test_seq2slate_transformer_on_policy_simple_tsp(self):
"""
Solve Traveling Salesman Problem. Cities comes from a fixed set of nodes (cities).
Easily hit reward threshold after one batch training
"""
device = torch.device("cpu")
batch_size = 4096
epochs = 1
num_batches = 50
expect_reward_threshold = 1.12
hidden_size = 32
num_candidates = 6
diverse_input = False
learning_rate = 0.001
learning_method = ON_POLICY
policy_gradient_interval = 1
run_seq2slate_tsp(
MODEL_TRANSFORMER,
batch_size,
epochs,
num_candidates,
num_batches,
hidden_size,
diverse_input,
learning_rate,
expect_reward_threshold,
learning_method,
policy_gradient_interval,
device,
)
@pytest.mark.seq2slate_long
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_seq2slate_transformer_on_policy_hard_tsp(self):
"""
Solve Traveling Salesman Problem. Data comes from different sets of cities.
"""
device = torch.device("cuda")
batch_size = 4096
epochs = 3
num_batches = 300
expect_reward_threshold = 1.05
hidden_size = 32
num_candidates = 6
diverse_input = True
learning_rate = 0.001
learning_method = ON_POLICY
policy_gradient_interval = 1
run_seq2slate_tsp(
MODEL_TRANSFORMER,
batch_size,
epochs,
num_candidates,
num_batches,
hidden_size,
diverse_input,
learning_rate,
expect_reward_threshold,
learning_method,
policy_gradient_interval,
device,
)
| 12,496 | 32.414439 | 90 | py |
ReAgent | ReAgent-master/reagent/test/ranking/test_seq2slate_trainer.py | import copy
import itertools
import logging
import random
import unittest
from itertools import permutations
import numpy as np
import numpy.testing as npt
import pytorch_lightning as pl
import reagent.core.types as rlt
import torch
from parameterized import parameterized
from reagent.core.parameters import Seq2SlateParameters
from reagent.core.parameters_seq2slate import IPSClamp, IPSClampMethod
from reagent.model_utils.seq2slate_utils import Seq2SlateOutputArch
from reagent.models.seq2slate import Seq2SlateMode, Seq2SlateTransformerNet
from reagent.optimizer.union import Optimizer__Union, classes
from reagent.samplers.frechet import FrechetSort
from reagent.training.ranking.helper import ips_clamp
from reagent.training.ranking.seq2slate_trainer import Seq2SlateTrainer
from torch.utils.data import DataLoader
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
output_arch_list = [
Seq2SlateOutputArch.FRECHET_SORT,
Seq2SlateOutputArch.AUTOREGRESSIVE,
]
policy_gradient_interval_list = [1, 5]
clamp_method_list = [IPSClampMethod.UNIVERSAL, IPSClampMethod.UNIVERSAL]
clamp_max_list = [1.0, 10.0]
frechet_sort_shape_list = [0.1, 0.5, 1.0]
def create_trainer(
seq2slate_net,
learning_rate,
seq2slate_params,
policy_gradient_interval,
):
return Seq2SlateTrainer(
seq2slate_net=seq2slate_net,
params=seq2slate_params,
policy_optimizer=Optimizer__Union(SGD=classes["SGD"](lr=learning_rate)),
policy_gradient_interval=policy_gradient_interval,
print_interval=1,
)
def create_seq2slate_transformer(
state_dim, candidate_num, candidate_dim, hidden_size, output_arch
):
return Seq2SlateTransformerNet(
state_dim=state_dim,
candidate_dim=candidate_dim,
num_stacked_layers=2,
num_heads=2,
dim_model=hidden_size,
dim_feedforward=hidden_size,
max_src_seq_len=candidate_num,
max_tgt_seq_len=candidate_num,
output_arch=output_arch,
temperature=0.5,
)
def create_on_policy_batch(
seq2slate, batch_size, state_dim, candidate_num, candidate_dim, rank_seed, device
):
state = torch.randn(batch_size, state_dim).to(device)
candidates = torch.randn(batch_size, candidate_num, candidate_dim).to(device)
reward = torch.rand(batch_size, 1).to(device)
batch = rlt.PreprocessedRankingInput.from_input(
state=state, candidates=candidates, device=device
)
# Reset seed here so that gradients can be replicated.
torch.manual_seed(rank_seed)
rank_output = seq2slate(
batch, mode=Seq2SlateMode.RANK_MODE, tgt_seq_len=candidate_num, greedy=False
)
ranked_order = rank_output.ranked_tgt_out_idx - 2
ranked_slate_prob = rank_output.ranked_per_seq_probs
on_policy_batch = rlt.PreprocessedRankingInput.from_input(
state=state,
candidates=candidates,
device=device,
action=ranked_order,
logged_propensities=ranked_slate_prob.detach(),
slate_reward=reward,
)
return on_policy_batch
def create_off_policy_batch(
seq2slate, batch_size, state_dim, candidate_num, candidate_dim, device
):
state = torch.randn(batch_size, state_dim)
candidates = torch.randn(batch_size, candidate_num, candidate_dim)
reward = torch.rand(batch_size, 1)
action = torch.stack([torch.randperm(candidate_num) for _ in range(batch_size)])
logged_slate_prob = torch.rand(batch_size, 1) / 1e12
off_policy_batch = rlt.PreprocessedRankingInput.from_input(
state=state,
candidates=candidates,
device=device,
action=action,
logged_propensities=logged_slate_prob,
slate_reward=reward,
)
return off_policy_batch
class TestSeq2SlateTrainer(unittest.TestCase):
def setUp(self):
np.random.seed(0)
random.seed(0)
torch.manual_seed(0)
def assert_correct_gradient(
self,
net_with_gradient,
net_after_gradient,
policy_gradient_interval,
learning_rate,
):
for (n_c, w_c), (n, w) in zip(
net_with_gradient.named_parameters(), net_after_gradient.named_parameters()
):
assert n_c == n
if w_c.grad is not None:
assert torch.allclose(
w_c - policy_gradient_interval * learning_rate * w_c.grad,
w,
rtol=1e-4,
atol=2e-6,
)
def test_ips_clamp(self):
importance_sampling = torch.tensor([0.5, 0.3, 3.0, 10.0, 40.0])
assert torch.all(ips_clamp(importance_sampling, None) == importance_sampling)
assert torch.all(
ips_clamp(importance_sampling, IPSClamp(IPSClampMethod.AGGRESSIVE, 3.0))
== torch.tensor([0.5, 0.3, 3.0, 0.0, 0.0])
)
assert torch.all(
ips_clamp(importance_sampling, IPSClamp(IPSClampMethod.UNIVERSAL, 3.0))
== torch.tensor([0.5, 0.3, 3.0, 3.0, 3.0])
)
@parameterized.expand(
itertools.product(policy_gradient_interval_list, output_arch_list)
)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_seq2slate_trainer_on_policy_gpu(
self, policy_gradient_interval, output_arch
):
self._test_seq2slate_trainer_on_policy(
policy_gradient_interval, output_arch, device=torch.device("cuda")
)
@parameterized.expand(
itertools.product(policy_gradient_interval_list, output_arch_list)
)
def test_seq2slate_trainer_on_policy_cpu(
self, policy_gradient_interval, output_arch
):
self._test_seq2slate_trainer_on_policy(
policy_gradient_interval, output_arch, device=torch.device("cpu")
)
def _test_seq2slate_trainer_on_policy(
self, policy_gradient_interval, output_arch, device
):
batch_size = 32
state_dim = 2
candidate_num = 15
candidate_dim = 4
hidden_size = 16
learning_rate = 1.0
on_policy = True
rank_seed = 111
seq2slate_params = Seq2SlateParameters(on_policy=on_policy)
seq2slate_net = create_seq2slate_transformer(
state_dim, candidate_num, candidate_dim, hidden_size, output_arch
).to(device)
seq2slate_net_copy = copy.deepcopy(seq2slate_net).to(device)
seq2slate_net_copy_copy = copy.deepcopy(seq2slate_net).to(device)
trainer = create_trainer(
seq2slate_net,
learning_rate,
seq2slate_params,
policy_gradient_interval,
)
batch = create_on_policy_batch(
seq2slate_net,
batch_size,
state_dim,
candidate_num,
candidate_dim,
rank_seed,
device,
)
training_data = DataLoader([batch], collate_fn=lambda x: x[0])
pl_trainer = pl.Trainer(
max_epochs=policy_gradient_interval,
gpus=None if device == torch.device("cpu") else 1,
logger=False,
)
pl_trainer.fit(trainer, training_data)
seq2slate_net = trainer.seq2slate_net.to(device)
# manual compute gradient
torch.manual_seed(rank_seed)
rank_output = seq2slate_net_copy(
batch, mode=Seq2SlateMode.RANK_MODE, tgt_seq_len=candidate_num, greedy=False
)
loss = -(
torch.mean(torch.log(rank_output.ranked_per_seq_probs) * batch.slate_reward)
)
loss.backward()
self.assert_correct_gradient(
seq2slate_net_copy, seq2slate_net, policy_gradient_interval, learning_rate
)
# another way to compute gradient manually
torch.manual_seed(rank_seed)
ranked_per_seq_probs = seq2slate_net_copy_copy(
batch, mode=Seq2SlateMode.RANK_MODE, tgt_seq_len=candidate_num, greedy=False
).ranked_per_seq_probs
loss = -(
torch.mean(
ranked_per_seq_probs
/ ranked_per_seq_probs.detach()
* batch.slate_reward
)
)
loss.backward()
self.assert_correct_gradient(
seq2slate_net_copy_copy,
seq2slate_net,
policy_gradient_interval,
learning_rate,
)
@parameterized.expand(
itertools.product(policy_gradient_interval_list, output_arch_list)
)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_seq2slate_trainer_off_policy_gpu(
self, policy_gradient_interval, output_arch
):
self._test_seq2slate_trainer_off_policy(
policy_gradient_interval, output_arch, device=torch.device("cuda")
)
@parameterized.expand(
itertools.product(policy_gradient_interval_list, output_arch_list)
)
def test_seq2slate_trainer_off_policy_cpu(
self, policy_gradient_interval, output_arch
):
self._test_seq2slate_trainer_off_policy(
policy_gradient_interval, output_arch, device=torch.device("cpu")
)
def _test_seq2slate_trainer_off_policy(
self, policy_gradient_interval, output_arch, device
):
batch_size = 32
state_dim = 2
candidate_num = 15
candidate_dim = 4
hidden_size = 16
learning_rate = 1.0
on_policy = False
seq2slate_params = Seq2SlateParameters(on_policy=on_policy)
seq2slate_net = create_seq2slate_transformer(
state_dim, candidate_num, candidate_dim, hidden_size, output_arch
).to(device)
seq2slate_net_copy = copy.deepcopy(seq2slate_net).to(device)
seq2slate_net_copy_copy = copy.deepcopy(seq2slate_net).to(device)
trainer = create_trainer(
seq2slate_net,
learning_rate,
seq2slate_params,
policy_gradient_interval,
)
batch = create_off_policy_batch(
seq2slate_net, batch_size, state_dim, candidate_num, candidate_dim, device
)
training_data = DataLoader([batch], collate_fn=lambda x: x[0])
pl_trainer = pl.Trainer(
max_epochs=policy_gradient_interval,
gpus=None if device == torch.device("cpu") else 1,
logger=False,
)
pl_trainer.fit(trainer, training_data)
seq2slate_net = trainer.seq2slate_net.to(device)
# manual compute gradient
ranked_per_seq_log_probs = seq2slate_net_copy(
batch, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE
).log_probs
loss = -(
torch.mean(
ranked_per_seq_log_probs
* torch.exp(ranked_per_seq_log_probs).detach()
/ batch.tgt_out_probs
* batch.slate_reward
)
)
loss.backward()
self.assert_correct_gradient(
seq2slate_net_copy, seq2slate_net, policy_gradient_interval, learning_rate
)
# another way to compute gradient manually
ranked_per_seq_probs = torch.exp(
seq2slate_net_copy_copy(
batch, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE
).log_probs
)
loss = -(
torch.mean(ranked_per_seq_probs / batch.tgt_out_probs * batch.slate_reward)
)
loss.backward()
self.assert_correct_gradient(
seq2slate_net_copy_copy,
seq2slate_net,
policy_gradient_interval,
learning_rate,
)
@parameterized.expand(itertools.product(clamp_method_list, output_arch_list))
def test_seq2slate_trainer_off_policy_with_clamp(self, clamp_method, output_arch):
batch_size = 32
state_dim = 2
candidate_num = 15
candidate_dim = 4
hidden_size = 16
learning_rate = 1.0
device = torch.device("cpu")
policy_gradient_interval = 1
seq2slate_params = Seq2SlateParameters(
on_policy=False,
ips_clamp=IPSClamp(clamp_method=clamp_method, clamp_max=0.3),
)
seq2slate_net = create_seq2slate_transformer(
state_dim, candidate_num, candidate_dim, hidden_size, output_arch
)
seq2slate_net_copy = copy.deepcopy(seq2slate_net)
trainer = create_trainer(
seq2slate_net,
learning_rate,
seq2slate_params,
policy_gradient_interval,
)
batch = create_off_policy_batch(
seq2slate_net, batch_size, state_dim, candidate_num, candidate_dim, device
)
training_data = DataLoader([batch], collate_fn=lambda x: x[0])
pl_trainer = pl.Trainer(max_epochs=policy_gradient_interval, logger=False)
pl_trainer.fit(trainer, training_data)
# manual compute gradient
ranked_per_seq_probs = torch.exp(
seq2slate_net_copy(
batch, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE
).log_probs
)
logger.info(f"ips ratio={ranked_per_seq_probs / batch.tgt_out_probs}")
loss = -(
torch.mean(
ips_clamp(
ranked_per_seq_probs / batch.tgt_out_probs,
seq2slate_params.ips_clamp,
)
* batch.slate_reward
)
)
loss.backward()
self.assert_correct_gradient(
seq2slate_net_copy, seq2slate_net, policy_gradient_interval, learning_rate
)
@parameterized.expand(
itertools.product(
output_arch_list, clamp_method_list, clamp_max_list, frechet_sort_shape_list
)
)
def test_compute_impt_smpl(self, output_arch, clamp_method, clamp_max, shape):
logger.info(f"output arch: {output_arch}")
logger.info(f"clamp method: {clamp_method}")
logger.info(f"clamp max: {clamp_max}")
logger.info(f"frechet shape: {shape}")
candidate_num = 5
candidate_dim = 2
state_dim = 1
hidden_size = 32
device = torch.device("cpu")
learning_rate = 0.001
policy_gradient_interval = 1
candidates = torch.randint(5, (candidate_num, candidate_dim)).float()
candidate_scores = torch.sum(candidates, dim=1)
seq2slate_params = Seq2SlateParameters(
on_policy=False,
ips_clamp=IPSClamp(clamp_method=clamp_method, clamp_max=clamp_max),
)
seq2slate_net = create_seq2slate_transformer(
state_dim, candidate_num, candidate_dim, hidden_size, output_arch
)
trainer = create_trainer(
seq2slate_net,
learning_rate,
seq2slate_params,
policy_gradient_interval,
)
all_permt = torch.tensor(
list(permutations(range(candidate_num), candidate_num))
)
sampler = FrechetSort(shape=shape, topk=candidate_num)
sum_of_logged_propensity = 0
sum_of_model_propensity = 0
sum_of_ips_ratio = 0
for i in range(len(all_permt)):
sample_action = all_permt[i]
logged_propensity = torch.exp(
sampler.log_prob(candidate_scores, sample_action)
)
batch = rlt.PreprocessedRankingInput.from_input(
state=torch.zeros(1, state_dim),
candidates=candidates.unsqueeze(0),
device=device,
action=sample_action.unsqueeze(0),
logged_propensities=logged_propensity.reshape(1, 1),
)
model_propensities = torch.exp(
seq2slate_net(batch, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE).log_probs
)
impt_smpl, clamped_impt_smpl = trainer._compute_impt_smpl(
model_propensities, logged_propensity
)
if impt_smpl > clamp_max:
if clamp_method == IPSClampMethod.AGGRESSIVE:
npt.asset_allclose(clamped_impt_smpl.detach().numpy(), 0, rtol=1e-5)
else:
npt.assert_allclose(
clamped_impt_smpl.detach().numpy(), clamp_max, rtol=1e-5
)
sum_of_model_propensity += model_propensities
sum_of_logged_propensity += logged_propensity
sum_of_ips_ratio += model_propensities / logged_propensity
logger.info(
f"shape={shape}, sample_action={sample_action}, logged_propensity={logged_propensity},"
f" model_propensity={model_propensities}"
)
logger.info(
f"shape {shape}, sum_of_logged_propensity={sum_of_logged_propensity}, "
f"sum_of_model_propensity={sum_of_model_propensity}, "
f"mean sum_of_ips_ratio={sum_of_ips_ratio / len(all_permt)}"
)
npt.assert_allclose(sum_of_logged_propensity.detach().numpy(), 1, rtol=1e-5)
npt.assert_allclose(sum_of_model_propensity.detach().numpy(), 1, rtol=1e-5)
@parameterized.expand(itertools.product(output_arch_list, frechet_sort_shape_list))
def test_ips_ratio_mean(self, output_arch, shape):
output_arch = Seq2SlateOutputArch.FRECHET_SORT
shape = 0.1
logger.info(f"output arch: {output_arch}")
logger.info(f"frechet shape: {shape}")
candidate_num = 5
candidate_dim = 2
state_dim = 1
hidden_size = 8
device = torch.device("cpu")
batch_size = 1024
num_batches = 400
learning_rate = 0.001
policy_gradient_interval = 1
state = torch.zeros(batch_size, state_dim)
# all data have same candidates
candidates = torch.randint(
5, (batch_size, candidate_num, candidate_dim)
).float()
candidates[1:] = candidates[0]
candidate_scores = torch.sum(candidates, dim=-1)
seq2slate_params = Seq2SlateParameters(
on_policy=False,
)
seq2slate_net = create_seq2slate_transformer(
state_dim, candidate_num, candidate_dim, hidden_size, output_arch
)
trainer = create_trainer(
seq2slate_net,
learning_rate,
seq2slate_params,
policy_gradient_interval,
)
sampler = FrechetSort(shape=shape, topk=candidate_num)
sum_of_ips_ratio = 0
for i in range(num_batches):
sample_outputs = [
sampler.sample_action(candidate_scores[j : j + 1])
for j in range(batch_size)
]
action = torch.stack(
list(map(lambda x: x.action.squeeze(0), sample_outputs))
)
logged_propensity = torch.stack(
list(map(lambda x: torch.exp(x.log_prob), sample_outputs))
)
batch = rlt.PreprocessedRankingInput.from_input(
state=state,
candidates=candidates,
device=device,
action=action,
logged_propensities=logged_propensity,
)
model_propensities = torch.exp(
seq2slate_net(batch, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE).log_probs
)
impt_smpl, _ = trainer._compute_impt_smpl(
model_propensities, logged_propensity
)
sum_of_ips_ratio += torch.mean(impt_smpl).detach().numpy()
mean_of_ips_ratio = sum_of_ips_ratio / (i + 1)
logger.info(f"{i}-th batch, mean ips ratio={mean_of_ips_ratio}")
if i > 100 and np.allclose(mean_of_ips_ratio, 1, atol=0.03):
return
raise Exception(f"Mean ips ratio {mean_of_ips_ratio} is not close to 1")
| 19,844 | 34.628366 | 103 | py |
ReAgent | ReAgent-master/reagent/test/ranking/test_seq2slate_simulation.py | import random
import unittest
import numpy as np
import pytest
import torch
from reagent.test.ranking.seq2slate_utils import (
MODEL_TRANSFORMER,
SIMULATION,
run_seq2slate_tsp,
)
class TestSeq2SlateSimulation(unittest.TestCase):
def setUp(self):
np.random.seed(0)
random.seed(0)
torch.manual_seed(0)
def test_seq2slate_transformer_simulation_simple_tsp(self):
"""
Solve Traveling Salesman Problem. Data comes from one set of nodes (cities).
"""
device = torch.device("cpu")
batch_size = 4096
epochs = 1
num_batches = 50
expect_reward_threshold = 1.12
hidden_size = 32
num_candidates = 6
diverse_input = False
learning_rate = 0.001
learning_method = SIMULATION
policy_gradient_interval = 1
run_seq2slate_tsp(
MODEL_TRANSFORMER,
batch_size,
epochs,
num_candidates,
num_batches,
hidden_size,
diverse_input,
learning_rate,
expect_reward_threshold,
learning_method,
policy_gradient_interval,
device,
)
@pytest.mark.seq2slate_long
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_seq2slate_transformer_simulation_hard_tsp(self):
"""
Solve Traveling Salesman Problem. Data comes from multiple sets of cities.
"""
device = torch.device("cuda")
batch_size = 4096
epochs = 8
num_batches = 300
expect_reward_threshold = 1.02
hidden_size = 32
num_candidates = 6
diverse_input = True
learning_rate = 0.001
learning_method = SIMULATION
policy_gradient_interval = 1
run_seq2slate_tsp(
MODEL_TRANSFORMER,
batch_size,
epochs,
num_candidates,
num_batches,
hidden_size,
diverse_input,
learning_rate,
expect_reward_threshold,
learning_method,
policy_gradient_interval,
device,
)
| 2,194 | 26.098765 | 84 | py |
ReAgent | ReAgent-master/reagent/test/ranking/test_seq2slate_off_policy.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import random
import unittest
import numpy as np
import pytest
import torch
from reagent.test.ranking.seq2slate_utils import (
MODEL_TRANSFORMER,
OFF_POLICY,
run_seq2slate_tsp,
)
logger = logging.getLogger(__name__)
class TestSeq2SlateOffPolicy(unittest.TestCase):
def setUp(self):
np.random.seed(0)
random.seed(0)
torch.manual_seed(0)
def test_seq2slate_transformer_off_policy_simple_tsp(self):
"""
Solve Traveling Salesman Problem. Data comes from one set of nodes (cities).
"""
device = torch.device("cpu")
batch_size = 4096
epochs = 1
num_batches = 100
expect_reward_threshold = 1.02
hidden_size = 32
num_candidates = 6
diverse_input = False
learning_rate = 0.001
learning_method = OFF_POLICY
policy_gradient_interval = 1
run_seq2slate_tsp(
MODEL_TRANSFORMER,
batch_size,
epochs,
num_candidates,
num_batches,
hidden_size,
diverse_input,
learning_rate,
expect_reward_threshold,
learning_method,
policy_gradient_interval,
device,
)
@pytest.mark.seq2slate_long
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_seq2slate_transformer_off_policy_hard_tsp(self):
"""
Solve Traveling Salesman Problem. Data comes from multiple sets of cities.
"""
device = torch.device("cuda")
batch_size = 4096
epochs = 3
num_batches = 300
expect_reward_threshold = 1.02
hidden_size = 32
num_candidates = 4
diverse_input = True
learning_rate = 0.001
learning_method = OFF_POLICY
policy_gradient_interval = 20
run_seq2slate_tsp(
MODEL_TRANSFORMER,
batch_size,
epochs,
num_candidates,
num_batches,
hidden_size,
diverse_input,
learning_rate,
expect_reward_threshold,
learning_method,
policy_gradient_interval,
device,
)
| 2,344 | 25.954023 | 84 | py |
ReAgent | ReAgent-master/reagent/test/net_builder/test_value_net_builder.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
from reagent.core.parameters import NormalizationData, NormalizationParameters
from reagent.core.types import FeatureData
from reagent.net_builder import value
from reagent.net_builder.unions import ValueNetBuilder__Union
from reagent.preprocessing.identify_types import CONTINUOUS
class TestValueNetBuilder(unittest.TestCase):
def test_fully_connected(self):
chooser = ValueNetBuilder__Union(
FullyConnected=value.fully_connected.FullyConnected()
)
builder = chooser.value
state_dim = 3
normalization_data = NormalizationData(
dense_normalization_parameters={
i: NormalizationParameters(feature_type=CONTINUOUS)
for i in range(state_dim)
}
)
value_network = builder.build_value_network(normalization_data)
batch_size = 5
x = FeatureData(float_features=torch.randn(batch_size, state_dim))
y = value_network(x)
self.assertEqual(y.shape, (batch_size, 1))
| 1,141 | 34.6875 | 78 | py |
ReAgent | ReAgent-master/reagent/test/net_builder/test_synthetic_reward_net_builder.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import numpy.testing as npt
import torch
from reagent.core import parameters as rlp
from reagent.core import types as rlt
from reagent.core.fb_checker import IS_FB_ENVIRONMENT
from reagent.core.parameters import NormalizationData, NormalizationParameters
from reagent.net_builder.synthetic_reward.ngram_synthetic_reward import (
NGramSyntheticReward,
NGramConvNetSyntheticReward,
)
from reagent.net_builder.synthetic_reward.sequence_synthetic_reward import (
SequenceSyntheticReward,
)
from reagent.net_builder.synthetic_reward.single_step_synthetic_reward import (
SingleStepSyntheticReward,
)
from reagent.net_builder.synthetic_reward.transformer_synthetic_reward import (
TransformerSyntheticReward,
)
from reagent.net_builder.synthetic_reward_net_builder import SyntheticRewardNetBuilder
from reagent.net_builder.unions import SyntheticRewardNetBuilder__Union
from reagent.preprocessing.identify_types import CONTINUOUS
from reagent.preprocessing.preprocessor import Preprocessor
if IS_FB_ENVIRONMENT:
from reagent.fb.prediction.synthetic_reward.synthetic_reward_predictor_wrapper import (
FbSyntheticRewardPredictorWrapper as SyntheticRewardPredictorWrapper,
)
else:
from reagent.prediction.synthetic_reward.synthetic_reward_predictor_wrapper import (
SyntheticRewardPredictorWrapper,
)
STATE_DIM = 3
ACTION_DIM = 2
BATCH_SIZE = 2
SEQ_LEN = 4
def _create_norm(dim, offset=0):
normalization_data = NormalizationData(
dense_normalization_parameters={
i: NormalizationParameters(feature_type=CONTINUOUS, mean=0.0, stddev=1.0)
for i in range(offset, dim + offset)
}
)
return normalization_data
def _create_input():
state = torch.randn(SEQ_LEN, BATCH_SIZE, STATE_DIM)
# generate valid_step with shape (BATCH_SIZE, 1), values ranging from [1, SEQ_LEN] (inclusive)
valid_step = torch.randint(1, SEQ_LEN + 1, size=(BATCH_SIZE, 1))
# create one-hot action value
action_label = torch.LongTensor(SEQ_LEN * BATCH_SIZE, 1) % ACTION_DIM
action = torch.FloatTensor(SEQ_LEN * BATCH_SIZE, ACTION_DIM)
action.zero_()
action.scatter_(1, action_label, 1)
action = action.reshape(SEQ_LEN, BATCH_SIZE, ACTION_DIM)
input = rlt.MemoryNetworkInput(
state=rlt.FeatureData(state),
action=action,
valid_step=valid_step,
# the rest fields will not be used
next_state=torch.tensor([]),
reward=torch.tensor([]),
step=torch.tensor([]),
not_terminal=torch.tensor([]),
time_diff=torch.tensor([]),
)
return input
def _create_preprocessed_input(
input: rlt.MemoryNetworkInput,
state_preprocessor: Preprocessor,
action_preprocessor: Preprocessor,
):
preprocessed_state = state_preprocessor(
input.state.float_features.reshape(SEQ_LEN * BATCH_SIZE, STATE_DIM),
torch.ones(SEQ_LEN * BATCH_SIZE, STATE_DIM),
).reshape(SEQ_LEN, BATCH_SIZE, STATE_DIM)
preprocessed_action = action_preprocessor(
input.action.reshape(SEQ_LEN * BATCH_SIZE, ACTION_DIM),
torch.ones(SEQ_LEN * BATCH_SIZE, ACTION_DIM),
).reshape(SEQ_LEN, BATCH_SIZE, ACTION_DIM)
return rlt.MemoryNetworkInput(
state=rlt.FeatureData(preprocessed_state),
action=preprocessed_action,
valid_step=input.valid_step,
next_state=input.next_state,
reward=input.reward,
step=input.step,
not_terminal=input.not_terminal,
time_diff=input.time_diff,
)
class TestSyntheticRewardNetBuilder(unittest.TestCase):
def test_single_step_synthetic_reward_net_builder_discrete_actions(
self,
):
builder = SyntheticRewardNetBuilder__Union(
SingleStepSyntheticReward=SingleStepSyntheticReward()
).value
self._test_synthetic_reward_net_builder_discrete_actions(builder)
def test_ngram_fc_synthetic_reward_net_builder_discrete_actions(
self,
):
builder = SyntheticRewardNetBuilder__Union(
NGramSyntheticReward=NGramSyntheticReward()
).value
self._test_synthetic_reward_net_builder_discrete_actions(builder)
def test_ngram_conv_net_synthetic_reward_net_builder_discrete_actions(
self,
):
conv_net_params = rlp.ConvNetParameters(
conv_dims=[256, 128],
conv_height_kernels=[1, 1],
pool_types=["max", "max"],
pool_kernel_sizes=[1, 1],
)
builder = SyntheticRewardNetBuilder__Union(
NGramConvNetSyntheticReward=NGramConvNetSyntheticReward(
conv_net_params=conv_net_params
)
).value
self._test_synthetic_reward_net_builder_discrete_actions(builder)
def test_lstm_synthetic_reward_net_builder_discrete_actions(
self,
):
builder = SyntheticRewardNetBuilder__Union(
SequenceSyntheticReward=SequenceSyntheticReward()
).value
self._test_synthetic_reward_net_builder_discrete_actions(builder)
def test_transformer_synthetic_reward_net_builder_discrete_actions(
self,
):
builder = SyntheticRewardNetBuilder__Union(
TransformerSyntheticReward=TransformerSyntheticReward()
).value
self._test_synthetic_reward_net_builder_discrete_actions(builder)
def _test_synthetic_reward_net_builder_discrete_actions(
self, builder: SyntheticRewardNetBuilder
):
state_normalization_data = _create_norm(STATE_DIM)
discrete_action_names = ["1", "2"]
reward_net = builder.build_synthetic_reward_network(
state_normalization_data, discrete_action_names=discrete_action_names
)
input = _create_input()
output = reward_net(input).predicted_reward
assert output.shape == (BATCH_SIZE, 1)
# TO IMPLEMENT
# predictor_wrapper = builder.build_serving_module(
# reward_net,
# state_normalization_data,
# discrete_action_names=discrete_action_names,
# )
# self.assertIsInstance(
# predictor_wrapper, DiscreteSingleStepSyntheticRewardPredictorWrapper
# )
def test_single_step_synthetic_reward_net_builder_continuous_actions(self):
builder = SyntheticRewardNetBuilder__Union(
SingleStepSyntheticReward=SingleStepSyntheticReward()
).value
self._test_synthetic_reward_net_builder_continuous_actions(builder)
def test_ngram_fc_synthetic_reward_net_builder_continuous_actions(
self,
):
builder = SyntheticRewardNetBuilder__Union(
NGramSyntheticReward=NGramSyntheticReward()
).value
self._test_synthetic_reward_net_builder_continuous_actions(builder)
def test_ngram_conv_net_synthetic_reward_net_builder_continuous_actions(
self,
):
conv_net_params = rlp.ConvNetParameters(
conv_dims=[256, 128],
conv_height_kernels=[1, 1],
pool_types=["max", "max"],
pool_kernel_sizes=[1, 1],
)
builder = SyntheticRewardNetBuilder__Union(
NGramConvNetSyntheticReward=NGramConvNetSyntheticReward(
conv_net_params=conv_net_params
)
).value
self._test_synthetic_reward_net_builder_continuous_actions(builder)
def test_lstm_synthetic_reward_net_builder_continuous_actions(
self,
):
builder = SyntheticRewardNetBuilder__Union(
SequenceSyntheticReward=SequenceSyntheticReward()
).value
self._test_synthetic_reward_net_builder_continuous_actions(builder)
def test_transformer_synthetic_reward_net_builder_continuous_actions(
self,
):
builder = SyntheticRewardNetBuilder__Union(
TransformerSyntheticReward=TransformerSyntheticReward()
).value
self._test_synthetic_reward_net_builder_continuous_actions(builder)
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def _test_synthetic_reward_net_builder_continuous_actions(
self, builder: SyntheticRewardNetBuilder
):
"""
This test does the following steps:
1. create a net builder
2. use the net builder to create a synthetic reward network
3. export the synthetic reward network
4. use the exported network to create a predictor wrapper
5. create raw input and preprocessed inputs
6. compare if the results between the following matches:
a. synthetic reward network on preprocessed input
b. export network on preprocessed input
c. predictor wrapper on raw input
"""
state_normalization_data = _create_norm(STATE_DIM)
action_normalization_data = _create_norm(ACTION_DIM, offset=STATE_DIM)
state_preprocessor = Preprocessor(
state_normalization_data.dense_normalization_parameters
)
action_preprocessor = Preprocessor(
action_normalization_data.dense_normalization_parameters
)
reward_net = builder.build_synthetic_reward_network(
state_normalization_data,
action_normalization_data=action_normalization_data,
).eval()
input = _create_input()
preprocessed_input = _create_preprocessed_input(
input, state_preprocessor, action_preprocessor
)
output = reward_net(preprocessed_input).predicted_reward
assert output.shape == (BATCH_SIZE, 1)
# pyre-fixme[29]: `Union[torch.Tensor, torch.nn.Module]` is not a function.
export_net = reward_net.export_mlp().cpu().eval()
export_output = export_net(
preprocessed_input.state.float_features, preprocessed_input.action
)
predictor_wrapper = builder.build_serving_module(
SEQ_LEN,
reward_net,
state_normalization_data,
action_normalization_data=action_normalization_data,
)
self.assertIsInstance(predictor_wrapper, SyntheticRewardPredictorWrapper)
for i in range(BATCH_SIZE):
input_to_predictor = torch.cat(
(input.state.float_features[:, i, :], input.action[:, i, :]), dim=1
)
input_to_predictor_presence = torch.ones(SEQ_LEN, STATE_DIM + ACTION_DIM)
predictor_output = predictor_wrapper(
(input_to_predictor, input_to_predictor_presence)
)
if IS_FB_ENVIRONMENT:
predictor_output = predictor_output[1][2]
npt.assert_array_almost_equal(predictor_output, export_output[i], decimal=4)
npt.assert_almost_equal(
torch.sum(predictor_output[-input.valid_step[i] :]),
output[i],
decimal=4,
)
| 11,061 | 37.409722 | 98 | py |
ReAgent | ReAgent-master/reagent/test/preprocessing/test_sparse_to_dense.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
from reagent.preprocessing import normalization
from reagent.preprocessing.sparse_to_dense import (
PythonSparseToDenseProcessor,
StringKeySparseToDenseProcessor,
)
class TestSparseToDense(unittest.TestCase):
def setUp(self):
self.sorted_features = [1, 2, 5, 4]
self.str_keyed_sparse_data = [
{},
{"2": 0.3},
{"4": 0.5, "5": 0.4},
{"5": 0.3, "1": 0.5, "2": 0.1, "4": 0.7},
]
self.int_keyed_sparse_data = [
{int(k): v for k, v in d.items()} for d in self.str_keyed_sparse_data
]
self.expected_value_0 = torch.tensor(
[[0, 0, 0, 0], [0, 0.3, 0, 0], [0, 0, 0.4, 0.5], [0.5, 0.1, 0.3, 0.7]]
)
self.expected_presence_0 = torch.ones(4, 4).bool()
MISSING = normalization.MISSING_VALUE
self.expected_value_missing = torch.tensor(
[
[MISSING, MISSING, MISSING, MISSING],
[MISSING, 0.3, MISSING, MISSING],
[MISSING, MISSING, 0.4, 0.5],
[0.5, 0.1, 0.3, 0.7],
]
)
self.expected_presence_missing = torch.tensor(
[
[False, False, False, False],
[False, True, False, False],
[False, False, True, True],
[True, True, True, True],
]
)
def test_int_key_sparse_to_dense(self):
# int keys, set_missing_value_to_zero=False
processor = PythonSparseToDenseProcessor(
self.sorted_features, set_missing_value_to_zero=False
)
value, presence = processor.process(self.int_keyed_sparse_data)
assert torch.allclose(value, self.expected_value_missing)
assert torch.all(presence == self.expected_presence_missing)
def test_str_key_sparse_to_dense(self):
# string keys, set_missing_value_to_zero=True
processor = StringKeySparseToDenseProcessor(
self.sorted_features, set_missing_value_to_zero=True
)
value, presence = processor.process(self.str_keyed_sparse_data)
assert torch.allclose(value, self.expected_value_0)
assert torch.all(presence == self.expected_presence_0)
# string keys, set_missing_value_to_zero=False
processor = StringKeySparseToDenseProcessor(
self.sorted_features, set_missing_value_to_zero=False
)
value, presence = processor.process(self.str_keyed_sparse_data)
assert torch.allclose(value, self.expected_value_missing)
assert torch.all(presence == self.expected_presence_missing)
| 2,754 | 36.739726 | 82 | py |
ReAgent | ReAgent-master/reagent/test/preprocessing/test_preprocessing.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import numpy as np
import numpy.testing as npt
import six
import torch
from reagent.preprocessing import identify_types, normalization, transforms
from reagent.preprocessing.identify_types import BOXCOX, CONTINUOUS, ENUM
from reagent.preprocessing.normalization import (
MISSING_VALUE,
NormalizationParameters,
sort_features_by_normalization,
)
from reagent.preprocessing.preprocessor import Preprocessor
from reagent.test.base.utils import NumpyFeatureProcessor
from reagent.test.preprocessing.preprocessing_util import (
CONTINUOUS_FEATURE_ID,
BOXCOX_FEATURE_ID,
ENUM_FEATURE_ID,
PROBABILITY_FEATURE_ID,
id_to_type,
read_data,
)
from scipy import special
class TestPreprocessing(unittest.TestCase):
def _feature_type_override(self, feature_id):
"""
This should only be used to test CONTINUOUS_ACTION
"""
if id_to_type(feature_id) == identify_types.CONTINUOUS_ACTION:
return identify_types.CONTINUOUS_ACTION
return None
def test_prepare_normalization_and_normalize(self):
feature_value_map = read_data()
normalization_parameters = {}
for name, values in feature_value_map.items():
normalization_parameters[name] = normalization.identify_parameter(
name, values, 10, feature_type=self._feature_type_override(name)
)
for k, v in normalization_parameters.items():
if id_to_type(k) == CONTINUOUS:
self.assertEqual(v.feature_type, CONTINUOUS)
self.assertIs(v.boxcox_lambda, None)
self.assertIs(v.boxcox_shift, None)
elif id_to_type(k) == BOXCOX:
self.assertEqual(v.feature_type, BOXCOX)
self.assertIsNot(v.boxcox_lambda, None)
self.assertIsNot(v.boxcox_shift, None)
else:
assert v.feature_type == id_to_type(k)
preprocessor = Preprocessor(normalization_parameters, False)
sorted_features, _ = sort_features_by_normalization(normalization_parameters)
input_matrix = torch.zeros([10000, len(sorted_features)])
for i, feature in enumerate(sorted_features):
input_matrix[:, i] = torch.from_numpy(feature_value_map[feature])
normalized_feature_matrix = preprocessor(
input_matrix, (input_matrix != MISSING_VALUE)
)
normalized_features = {}
on_column = 0
for feature in sorted_features:
norm = normalization_parameters[feature]
if norm.feature_type == ENUM:
column_size = len(norm.possible_values)
else:
column_size = 1
normalized_features[feature] = normalized_feature_matrix[
:, on_column : (on_column + column_size)
]
on_column += column_size
self.assertTrue(
all(
[
np.isfinite(parameter.stddev) and np.isfinite(parameter.mean)
for parameter in normalization_parameters.values()
]
)
)
for k, v in six.iteritems(normalized_features):
v = v.numpy()
self.assertTrue(np.all(np.isfinite(v)))
feature_type = normalization_parameters[k].feature_type
if feature_type == identify_types.PROBABILITY:
sigmoidv = special.expit(v)
self.assertTrue(
np.all(
np.logical_and(np.greater(sigmoidv, 0), np.less(sigmoidv, 1))
)
)
elif feature_type == identify_types.ENUM:
possible_values = normalization_parameters[k].possible_values
self.assertEqual(v.shape[0], len(feature_value_map[k]))
self.assertEqual(v.shape[1], len(possible_values))
possible_value_map = {}
for i, possible_value in enumerate(possible_values):
possible_value_map[possible_value] = i
for i, row in enumerate(v):
original_feature = feature_value_map[k][i]
if abs(original_feature - MISSING_VALUE) < 0.01:
self.assertEqual(0.0, np.sum(row))
else:
self.assertEqual(
possible_value_map[original_feature],
np.where(row == 1)[0][0],
)
elif feature_type == identify_types.QUANTILE:
for i, feature in enumerate(v[0]):
original_feature = feature_value_map[k][i]
expected = NumpyFeatureProcessor.value_to_quantile(
original_feature, normalization_parameters[k].quantiles
)
self.assertAlmostEqual(feature, expected, 2)
elif feature_type == identify_types.BINARY:
pass
elif (
feature_type == identify_types.CONTINUOUS
or feature_type == identify_types.BOXCOX
):
one_stddev = np.isclose(np.std(v, ddof=1), 1, atol=0.01)
zero_stddev = np.isclose(np.std(v, ddof=1), 0, atol=0.01)
zero_mean = np.isclose(np.mean(v), 0, atol=0.01)
self.assertTrue(
np.all(zero_mean),
"mean of feature {} is {}, not 0".format(k, np.mean(v)),
)
self.assertTrue(np.all(np.logical_or(one_stddev, zero_stddev)))
elif feature_type == identify_types.CONTINUOUS_ACTION:
less_than_max = v < 1
more_than_min = v > -1
self.assertTrue(
np.all(less_than_max),
"values are not less than 1: {}".format(v[less_than_max == False]),
)
self.assertTrue(
np.all(more_than_min),
"values are not more than -1: {}".format(v[more_than_min == False]),
)
else:
raise NotImplementedError()
def test_normalize_dense_matrix_enum(self):
normalization_parameters = {
1: NormalizationParameters(
identify_types.ENUM,
None,
None,
None,
None,
[12, 4, 2],
None,
None,
None,
),
2: NormalizationParameters(
identify_types.CONTINUOUS, None, 0, 0, 1, None, None, None, None
),
3: NormalizationParameters(
identify_types.ENUM, None, None, None, None, [15, 3], None, None, None
),
}
preprocessor = Preprocessor(normalization_parameters, False)
inputs = np.zeros([4, 3], dtype=np.float32)
feature_ids = [2, 1, 3] # Sorted according to feature type
inputs[:, feature_ids.index(1)] = [12, 4, 2, 2]
inputs[:, feature_ids.index(2)] = [1.0, 2.0, 3.0, 3.0]
inputs[:, feature_ids.index(3)] = [15, 3, 15, normalization.MISSING_VALUE]
inputs = torch.from_numpy(inputs)
normalized_feature_matrix = preprocessor(inputs, (inputs != MISSING_VALUE))
np.testing.assert_allclose(
np.array(
[
[1.0, 1, 0, 0, 1, 0],
[2.0, 0, 1, 0, 0, 1],
[3.0, 0, 0, 1, 1, 0],
[3.0, 0, 0, 1, 0, 0], # Missing values should go to all 0
]
),
normalized_feature_matrix,
)
def test_persistency(self):
feature_value_map = read_data()
normalization_parameters = {}
for name, values in feature_value_map.items():
normalization_parameters[name] = normalization.identify_parameter(
name, values, feature_type=self._feature_type_override(name)
)
values[0] = MISSING_VALUE # Set one entry to MISSING_VALUE to test that
s = normalization.serialize(normalization_parameters)
read_parameters = normalization.deserialize(s)
# Unfortunately, Thrift serializatin seems to lose a bit of precision.
# Using `==` will be false.
self.assertEqual(read_parameters.keys(), normalization_parameters.keys())
for k in normalization_parameters:
self.assertEqual(
read_parameters[k].feature_type,
normalization_parameters[k].feature_type,
)
self.assertEqual(
read_parameters[k].possible_values,
normalization_parameters[k].possible_values,
)
for field in [
"boxcox_lambda",
"boxcox_shift",
"mean",
"stddev",
"quantiles",
"min_value",
"max_value",
]:
if getattr(normalization_parameters[k], field) is None:
self.assertEqual(
getattr(read_parameters[k], field),
getattr(normalization_parameters[k], field),
)
else:
npt.assert_allclose(
getattr(read_parameters[k], field),
getattr(normalization_parameters[k], field),
)
def test_quantile_boundary_logic(self):
"""Test quantile logic when feaure value == quantile boundary."""
input = torch.tensor([[0.0], [80.0], [100.0]])
norm_params = NormalizationParameters(
feature_type="QUANTILE",
boxcox_lambda=None,
boxcox_shift=None,
mean=0,
stddev=1,
possible_values=None,
quantiles=[0.0, 80.0, 100.0],
min_value=0.0,
max_value=100.0,
)
preprocessor = Preprocessor({1: norm_params}, False)
output = preprocessor._preprocess_QUANTILE(0, input.float(), [norm_params])
expected_output = torch.tensor([[0.0], [0.5], [1.0]])
self.assertTrue(np.all(np.isclose(output, expected_output)))
def test_preprocessing_network(self):
feature_value_map = read_data()
normalization_parameters = {}
name_preprocessed_blob_map = {}
for feature_name, feature_values in feature_value_map.items():
normalization_parameters[feature_name] = normalization.identify_parameter(
feature_name,
feature_values,
feature_type=self._feature_type_override(feature_name),
)
feature_values[
0
] = MISSING_VALUE # Set one entry to MISSING_VALUE to test that
preprocessor = Preprocessor(
{feature_name: normalization_parameters[feature_name]}, False
)
feature_values_matrix = torch.from_numpy(np.expand_dims(feature_values, -1))
normalized_feature_values = preprocessor(
feature_values_matrix, (feature_values_matrix != MISSING_VALUE)
)
name_preprocessed_blob_map[feature_name] = normalized_feature_values.numpy()
test_features = NumpyFeatureProcessor.preprocess(
feature_value_map, normalization_parameters
)
for feature_name in feature_value_map:
normalized_features = name_preprocessed_blob_map[feature_name]
if feature_name != ENUM_FEATURE_ID:
normalized_features = np.squeeze(normalized_features, -1)
tolerance = 0.01
if feature_name == BOXCOX_FEATURE_ID:
# At the limit, boxcox has some numerical instability
tolerance = 0.5
non_matching = np.where(
np.logical_not(
np.isclose(
normalized_features.flatten(),
test_features[feature_name].flatten(),
rtol=tolerance,
atol=tolerance,
)
)
)
self.assertTrue(
np.all(
np.isclose(
normalized_features.flatten(),
test_features[feature_name].flatten(),
rtol=tolerance,
atol=tolerance,
)
),
"{} does not match: {} \n!=\n {}".format(
feature_name,
normalized_features.flatten()[non_matching],
test_features[feature_name].flatten()[non_matching],
),
)
def test_type_override_binary(self):
# Take a feature that should be identified as probability
feature_value_map = read_data()
probability_values = feature_value_map[PROBABILITY_FEATURE_ID]
# And ask for a binary anyways
parameter = normalization.identify_parameter(
"_", probability_values, feature_type=identify_types.BINARY
)
self.assertEqual(parameter.feature_type, "BINARY")
def test_type_override_continuous(self):
# Take a feature that should be identified as BOXCOX
feature_value_map = read_data()
probability_values = feature_value_map[BOXCOX_FEATURE_ID]
# And ask for a CONTINUOUS anyways
parameter = normalization.identify_parameter(
"_", probability_values, feature_type=identify_types.CONTINUOUS
)
self.assertEqual(parameter.feature_type, "CONTINUOUS")
def test_type_override_boxcox(self):
# Take a feature that should be identified as CONTINUOUS
feature_value_map = read_data()
probability_values = feature_value_map[CONTINUOUS_FEATURE_ID]
# And ask for a BOXCOX anyways
parameter = normalization.identify_parameter(
"_", probability_values, feature_type=identify_types.BOXCOX
)
self.assertEqual(parameter.feature_type, "BOXCOX")
def test_type_override_quantile(self):
# Take a feature that should be identified as CONTINUOUS
feature_value_map = read_data()
probability_values = feature_value_map[BOXCOX_FEATURE_ID]
# And ask for a QUANTILE anyways
parameter = normalization.identify_parameter(
"_", probability_values, feature_type=identify_types.QUANTILE
)
self.assertEqual(parameter.feature_type, "QUANTILE")
def test_columnvector(self):
def format_input2output(test_keys, inp_form):
test_data = {}
for ky in test_keys:
test_data[ky] = inp_form
test_instance = transforms.ColumnVector(test_keys)
output_data = test_instance(test_data)
return output_data
test_values = range(0, 5)
test_keys = []
for k in test_values:
test_keys.append(str(k))
# Possible input formats: tuple, list, torch.Tensor
for n_len in [1, 3]:
test_input_forms = [
(np.ones((n_len, 1)), 0),
n_len * [1],
torch.tensor(np.ones((n_len, 1))),
]
for inp_form in test_input_forms:
output_data = format_input2output(test_keys, inp_form)
for ky in test_keys:
self.assertEqual(output_data[ky].shape[0], n_len)
self.assertEqual(output_data[ky].shape[1], 1)
# Input as in row format
test_data = {}
for ky in test_keys:
test_data[ky] = (np.ones((1, 3)), 0)
test_instance = transforms.ColumnVector(test_keys)
with self.assertRaisesRegex(AssertionError, "Invalid shape for key"):
output_data = test_instance(test_data)
# Input as unimplemented type (number)
test_data = {}
for ky in test_keys:
test_data[ky] = 1
test_instance = transforms.ColumnVector(test_keys)
with self.assertRaisesRegex(NotImplementedError, "value of type"):
output_data = test_instance(test_data)
| 16,423 | 39.156479 | 88 | py |
ReAgent | ReAgent-master/reagent/test/preprocessing/test_postprocessing.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import numpy.testing as npt
import torch
from reagent.preprocessing.identify_types import CONTINUOUS_ACTION, DO_NOT_PREPROCESS
from reagent.preprocessing.normalization import NormalizationParameters
from reagent.preprocessing.postprocessor import Postprocessor
from reagent.preprocessing.preprocessor import Preprocessor
class TestPostprocessing(unittest.TestCase):
def test_continuous_action(self):
normalization_params = {
i: NormalizationParameters(
feature_type=CONTINUOUS_ACTION, min_value=-5.0 * i, max_value=10.0 * i
)
for i in range(1, 5)
}
preprocessor = Preprocessor(normalization_params, use_gpu=False)
postprocessor = Postprocessor(normalization_params, use_gpu=False)
x = torch.rand(3, 4) * torch.tensor([15, 30, 45, 60]) + torch.tensor(
[-5, -10, -15, -20]
)
presence = torch.ones_like(x, dtype=torch.uint8)
y = postprocessor(preprocessor(x, presence))
npt.assert_allclose(x, y, rtol=1e-4)
def test_do_not_preprocess(self):
normalization_params = {
i: NormalizationParameters(feature_type=DO_NOT_PREPROCESS)
for i in range(1, 5)
}
preprocessor = Preprocessor(normalization_params, use_gpu=False)
postprocessor = Postprocessor(normalization_params, use_gpu=False)
x = torch.randn(3, 4)
presence = torch.ones_like(x, dtype=torch.uint8)
y = postprocessor(preprocessor(x, presence))
npt.assert_allclose(x, y)
| 1,669 | 36.954545 | 86 | py |
ReAgent | ReAgent-master/reagent/test/preprocessing/test_transforms.py | import unittest
from copy import deepcopy
from typing import List
from unittest.mock import Mock, patch
import numpy as np
import reagent.core.types as rlt
import torch
from reagent.preprocessing import transforms
from reagent.preprocessing.types import InputColumn
class TestTransforms(unittest.TestCase):
def setUp(self):
# add custom compare function for torch.Tensor
self.addTypeEqualityFunc(torch.Tensor, TestTransforms.are_torch_tensor_equal)
@staticmethod
def are_torch_tensor_equal(tensor_0, tensor_1, msg=None):
if torch.all(tensor_0 == tensor_1):
return True
raise TestTransforms.failureException("non-equal pytorch tensors found", msg)
def assertTorchTensorEqual(self, tensor_0, tensor_1, msg=None):
self.assertIsInstance(
tensor_0, torch.Tensor, "first argument is not a torch.Tensor"
)
self.assertIsInstance(
tensor_1, torch.Tensor, "second argument is not a torch.Tensor"
)
self.assertEqual(tensor_0, tensor_1, msg=msg)
def assertDictComparatorEqual(self, a, b, cmp):
"""
assertDictEqual() compares args with ==. This allows caller to override
comparator via cmp argument.
"""
self.assertIsInstance(a, dict, "First argument is not a dictionary")
self.assertIsInstance(b, dict, "Second argument is not a dictionary")
self.assertSequenceEqual(a.keys(), b.keys())
for key in a.keys():
self.assertTrue(cmp(a[key], b[key]), msg=f"Different at key {key}")
def assertDictOfTensorEqual(self, a, b):
"""
Helper method to compare dicts with values of type Tensor.
Cannot use assertDictEqual when values are of type Tensor since
tensor1 == tensor2 results in a tensor of bools. Use this instead.
"""
def _tensor_cmp(a, b):
return torch.all(a == b)
self.assertDictComparatorEqual(a, b, _tensor_cmp)
def test_Compose(self):
t1, t2 = Mock(return_value=2), Mock(return_value=3)
compose = transforms.Compose(t1, t2)
data = 1
out = compose(data)
t1.assert_called_with(1)
t2.assert_called_with(2)
self.assertEqual(out, 3)
def test_ValuePresence(self):
vp = transforms.ValuePresence()
d1 = {"a": 1, "a_presence": 0, "b": 2}
d2 = {"a_presence": 0, "b": 2}
o1 = vp(d1)
o2 = vp(d2)
self.assertEqual(o1, {"a": (1, 0), "b": 2})
self.assertEqual(o2, {"a_presence": 0, "b": 2})
def test_MaskByPresence(self):
keys = ["a", "b"]
mbp = transforms.MaskByPresence(keys)
data = {
"a": (torch.tensor(1), torch.tensor(0)),
"b": (torch.tensor(3), torch.tensor(1)),
}
expected = {"a": torch.tensor(0), "b": torch.tensor(3)}
out = mbp(data)
self.assertEqual(out["a"], expected["a"])
self.assertEqual(out["b"], expected["b"])
with self.assertRaisesRegex(Exception, "Not valid value"):
data2 = {
"a": torch.tensor(1),
"b": (torch.tensor(3), torch.tensor(1)),
}
out = mbp(data2)
with self.assertRaisesRegex(Exception, "Unmatching value shape"):
data3 = {
"a": (torch.tensor(1), torch.tensor([0, 2])),
"b": (torch.tensor(3), torch.tensor(1)),
}
out = mbp(data3)
def test_StackDenseFixedSizeArray(self):
# happy path: value is type Tensor; check cast to float
value = torch.eye(4).to(dtype=torch.int) # start as int
data = {"a": value}
out = transforms.StackDenseFixedSizeArray(data.keys(), size=4)(data)
expected = {"a": value.to(dtype=torch.float)}
self.assertDictOfTensorEqual(out, expected)
self.assertTrue(out["a"].dtype == torch.float, msg="dtype != float")
# happy path: value is list w/ elements type Tuple[Tensor, Tensor]
presence = torch.tensor([[1, 1, 1], [1, 1, 1]])
data = {
"a": [
(torch.tensor([[0, 0, 0], [1, 1, 1]]), presence),
(torch.tensor([[2, 2, 2], [3, 3, 3]]), presence),
],
"b": [
(torch.tensor([[3, 3, 3], [2, 2, 2]]), presence),
(torch.tensor([[1, 1, 1], [0, 0, 0]]), presence),
],
}
out = transforms.StackDenseFixedSizeArray(data.keys(), size=3)(data)
expected = {
"a": torch.tile(torch.arange(4).view(-1, 1).to(dtype=torch.float), (1, 3)),
"b": torch.tile(
torch.arange(4).flip(dims=(0,)).view(-1, 1).to(dtype=torch.float),
(1, 3),
),
}
self.assertDictOfTensorEqual(out, expected)
# raise for tensor wrong shape
with self.assertRaisesRegex(ValueError, "Wrong shape"):
sdf = transforms.StackDenseFixedSizeArray(["a"], size=3)
sdf({"a": torch.ones(2)})
# raise for tensor wrong ndim
with self.assertRaisesRegex(ValueError, "Wrong shape"):
sdf = transforms.StackDenseFixedSizeArray(["a"], size=2)
sdf({"a": torch.zeros(2, 2, 2)})
def test_Lambda(self):
lam = transforms.Lambda(keys=["a", "b", "c"], fn=lambda x: x + 1)
data = {"a": 1, "b": 2, "c": 3, "d": 4}
out = lam(data)
self.assertEqual(out, {"a": 2, "b": 3, "c": 4, "d": 4})
def test_SelectValuePresenceColumns(self):
block = np.reshape(np.arange(16), (4, 4))
data = {"a": (block, block + 16), "c": 1}
svp = transforms.SelectValuePresenceColumns(
source="a", dest="b", indices=[1, 2]
)
out = svp(data)
expected = {
"a": (block, block + 16),
"b": (block[:, [1, 2]], block[:, [1, 2]] + 16),
"c": 1,
}
for key in ["a", "b"]:
self.assertTrue(np.all(out[key][0] == expected[key][0]))
self.assertTrue(np.all(out[key][1] == expected[key][1]))
self.assertEqual(out["c"], expected["c"])
@patch("reagent.preprocessing.transforms.Preprocessor")
def test_DenseNormalization(self, Preprocessor):
a_out = torch.tensor(1)
b_out = torch.tensor(2)
c_out = torch.tensor(3.0)
preprocessor = Mock(side_effect=[a_out, b_out])
Preprocessor.return_value = preprocessor
# of form (value, presence)
a_in = (torch.tensor([1, torch.nan, 2]), torch.tensor([1, 1, 1]))
b_in = (torch.tensor([1, 2, torch.nan]), torch.tensor([0, 1, 1]))
data = {"a": a_in, "b": b_in, "c": c_out}
normalization_data = Mock()
dn = transforms.DenseNormalization(
keys=["a", "b"], normalization_data=normalization_data
)
out = dn(data)
self.assertEqual(out["a"], a_out.float())
self.assertEqual(out["b"], b_out.float())
# ensure unnamed variables not changed
self.assertEqual(out["c"], c_out)
in_1, in_2 = [call_args.args for call_args in preprocessor.call_args_list]
self.assertEqual(torch.stack(in_1), torch.stack(a_in))
self.assertEqual(torch.stack(in_2), torch.stack(b_in))
@patch("reagent.preprocessing.transforms.Preprocessor")
def test_FixedLengthSequenceDenseNormalization(self, Preprocessor):
# test key mapping
rand_gen = torch.Generator().manual_seed(0)
a_batch_size = 2
b_batch_size = 3
a_dim = 13
b_dim = 11
expected_length = 7
a_T = (
torch.rand(
a_batch_size * expected_length, a_dim, generator=rand_gen
), # value
torch.rand(a_batch_size * expected_length, a_dim, generator=rand_gen)
> 0.5, # presence
)
b_T = (
torch.rand(
b_batch_size * expected_length, b_dim, generator=rand_gen
), # value
torch.rand(b_batch_size * expected_length, b_dim, generator=rand_gen)
> 0.5, # presence
)
# expected values after preprocessing
a_TN = a_T[0] + 1
b_TN = b_T[0] + 1
# copy used for checking inplace modifications
a_TN_copy = deepcopy(a_TN)
b_TN_copy = deepcopy(b_TN)
a_offsets = torch.arange(0, a_batch_size * expected_length, expected_length)
b_offsets = torch.arange(0, b_batch_size * expected_length, expected_length)
a_in = {1: (a_offsets, a_T), 2: 0}
b_in = {1: (b_offsets, b_T), 2: 1}
c_out = 2
# input data
data = {"a": a_in, "b": b_in, "c": c_out}
# copy used for checking inplace modifications
data_copy = deepcopy(data)
Preprocessor.return_value = Mock(side_effect=[a_TN, b_TN])
flsdn = transforms.FixedLengthSequenceDenseNormalization(
keys=["a", "b"],
sequence_id=1,
normalization_data=Mock(),
)
out = flsdn(data)
# data is modified inplace and returned
self.assertEqual(data, out)
# check preprocessor number of calls
self.assertEqual(Preprocessor.call_count, 1)
self.assertEqual(Preprocessor.return_value.call_count, 2)
# result contains original keys and new processed keys
self.assertSetEqual(set(out.keys()), {"a", "b", "c", "a:1", "b:1"})
def assertKeySeqIdItem(item_0, item_1):
self.assertTorchTensorEqual(item_0[0], item_1[0])
self.assertTorchTensorEqual(item_0[1][0], item_1[1][0])
self.assertTorchTensorEqual(item_0[1][1], item_1[1][1])
# original keys should keep their value
for key in ("a", "b"):
# no change in the output
assertKeySeqIdItem(out[key][1], data_copy[key][1])
# no change in untouched seq id
self.assertEqual(out[key][2], data_copy[key][2])
# no change in the non-processed key
self.assertEqual(out["c"], data_copy["c"])
# check output shapes
self.assertListEqual(
[*out["a:1"].shape], [a_batch_size, expected_length, a_dim]
)
self.assertListEqual(
[*out["b:1"].shape], [b_batch_size, expected_length, b_dim]
)
# no inplace change in normalized tensors
self.assertTorchTensorEqual(a_TN, a_TN_copy)
self.assertTorchTensorEqual(b_TN, b_TN_copy)
# check if output has been properly slated
self.assertTorchTensorEqual(
out["a:1"], a_TN.view(a_batch_size, expected_length, a_dim)
)
self.assertTorchTensorEqual(
out["b:1"], b_TN.view(b_batch_size, expected_length, b_dim)
)
@patch("reagent.preprocessing.transforms.make_sparse_preprocessor")
def test_MapIDListFeatures(self, mock_make_sparse_preprocessor):
data = {
InputColumn.STATE_ID_LIST_FEATURES: {0: [torch.tensor(1), torch.tensor(2)]},
InputColumn.STATE_ID_SCORE_LIST_FEATURES: {
1: [
torch.tensor(1),
torch.tensor(2),
torch.tensor(3),
]
},
}
mock_make_sparse_preprocessor.return_value.preprocess_id_list.return_value = {
InputColumn.STATE_ID_LIST_FEATURES: [torch.tensor(2), torch.tensor(3)]
}
mock_make_sparse_preprocessor.return_value.preprocess_id_score_list.return_value = {
InputColumn.STATE_ID_SCORE_LIST_FEATURES: [
torch.tensor(4),
torch.tensor(5),
torch.tensor(6),
]
}
state_id_list_columns: List[str] = [
InputColumn.STATE_ID_LIST_FEATURES,
InputColumn.NEXT_STATE_ID_LIST_FEATURES,
]
state_id_score_list_columns: List[str] = [
InputColumn.STATE_ID_SCORE_LIST_FEATURES,
InputColumn.NEXT_STATE_ID_SCORE_LIST_FEATURES,
]
state_feature_config = rlt.ModelFeatureConfig(
id_list_feature_configs=[
rlt.IdListFeatureConfig(
name=InputColumn.STATE_ID_LIST_FEATURES,
feature_id=0,
id_mapping_name="state_id_list_features_mapping",
)
],
id_score_list_feature_configs=[
rlt.IdScoreListFeatureConfig(
name=InputColumn.STATE_ID_SCORE_LIST_FEATURES,
feature_id=1,
id_mapping_name="state_id_score_list_features_mapping",
)
],
id_mapping_config={
"state_id_list_features_mapping": rlt.IdMappingUnion(
explicit_mapping=rlt.ExplicitMapping(ids=[0, 1, 2])
),
"state_id_score_list_features_mapping": rlt.IdMappingUnion(
explicit_mapping=rlt.ExplicitMapping(ids=[3, 4, 5])
),
},
)
map_id_list_features = transforms.MapIDListFeatures(
id_list_keys=state_id_list_columns,
id_score_list_keys=state_id_score_list_columns,
feature_config=state_feature_config,
device=torch.device("cpu"),
)
out = map_id_list_features(data)
# output should contain all k in id_list_keys & id_score_list_keys
self.assertEqual(len(out), 4)
# The key should contain none if data don't have it
self.assertIsNone(
out[InputColumn.NEXT_STATE_ID_LIST_FEATURES], "It should be filtered out"
)
# The value of data changed based on sparse-preprocess mapping
self.assertEqual(
out[InputColumn.STATE_ID_LIST_FEATURES],
{InputColumn.STATE_ID_LIST_FEATURES: [torch.tensor(2), torch.tensor(3)]},
)
# Testing assertion in the call method
wrong_data = {
InputColumn.STATE_ID_LIST_FEATURES: [torch.tensor(1), torch.tensor(2)],
InputColumn.STATE_ID_SCORE_LIST_FEATURES: [
torch.tensor(1),
torch.tensor(2),
torch.tensor(3),
],
}
with self.assertRaises(AssertionError):
map_id_list_features(wrong_data)
# Testing assertion in the constructor
state_id_list_columns: List[str] = [
InputColumn.STATE_ID_LIST_FEATURES,
InputColumn.NEXT_STATE_ID_LIST_FEATURES,
]
state_id_score_list_columns: List[str] = [
InputColumn.STATE_ID_LIST_FEATURES,
InputColumn.NEXT_STATE_ID_LIST_FEATURES,
]
with self.assertRaises(AssertionError):
transforms.MapIDListFeatures(
id_list_keys=state_id_list_columns,
id_score_list_keys=state_id_score_list_columns,
feature_config=state_feature_config,
device=torch.device("cpu"),
)
def test_OneHotActions(self):
keys = ["0", "1", "2"]
num_actions = 2
oha = transforms.OneHotActions(keys, num_actions)
data_in = {"0": torch.tensor(0), "1": torch.tensor(1), "2": torch.tensor(2)}
data_out = oha(data_in)
expected = {
"0": torch.tensor([1, 0]),
"1": torch.tensor([0, 1]),
"2": torch.tensor([0, 0]),
}
self.assertDictOfTensorEqual(data_out, expected)
def test_FixedLengthSequences(self):
# of form {sequence_id: (offsets, Tuple(Tensor, Tensor))}
a_T = (torch.tensor([0, 1]), torch.tensor([1, 0]))
b_T = (torch.tensor([1, 1]), torch.tensor([1, 0]))
a_in = {1: (torch.tensor([0]), a_T)}
b_in = {1: (torch.tensor([0, 2]), b_T)}
fls1 = transforms.FixedLengthSequences(keys=["a", "b"], sequence_id=1)
fls2 = transforms.FixedLengthSequences(
keys=["a", "b"], sequence_id=1, expected_length=2
)
fls3 = transforms.FixedLengthSequences(
keys=["a", "b"], sequence_id=1, expected_length=2, to_keys=["to_a", "to_b"]
)
o1 = fls1({"a": a_in, "b": b_in})
o2 = fls2({"a": a_in, "b": b_in})
o3 = fls3({"a": a_in, "b": b_in})
# o1, o2 should contain only keys
self.assertEqual(len(o1), 2)
self.assertEqual(len(o2), 2)
# o3 should contain keys & to_keys
self.assertEqual(len(o3), 4)
# ensure `T` is set back to key
self.assertTrue(
torch.all(o1["a"][0] == a_T[0]) and torch.all(o1["a"][1] == a_T[1])
)
self.assertTrue(
torch.all(o1["b"][0] == b_T[0]) and torch.all(o1["b"][1] == b_T[1])
)
self.assertTrue(
torch.all(o2["a"][0] == a_T[0]) and torch.all(o2["a"][1] == a_T[1])
)
self.assertTrue(
torch.all(o2["b"][0] == b_T[0]) and torch.all(o2["b"][1] == b_T[1])
)
# ensure keys not changed
self.assertEqual(o3["a"], a_in)
self.assertEqual(o3["b"], b_in)
# # ensure `T` is set to_key
self.assertTrue(
torch.all(o3["to_a"][0] == a_T[0]) and torch.all(o3["to_a"][1] == a_T[1])
)
self.assertTrue(
torch.all(o3["to_b"][0] == b_T[0]) and torch.all(o3["to_b"][1] == b_T[1])
)
# Testing assertions in the call method
# TODO testing assert regarding offsets length compared to value
c_T = (torch.tensor([0, 1]), torch.tensor([1, 1]))
with self.assertRaisesRegex(Exception, "Unexpected offsets"):
# wrong expected length
fls = transforms.FixedLengthSequences(
keys=["a", "b"], sequence_id=1, expected_length=1
)
fls({"a": a_in, "b": b_in})
with self.assertRaisesRegex(Exception, "Unexpected offsets"):
# wrong offsets
c_in = {1: (torch.tensor([0, 1]), c_T)}
fls = transforms.FixedLengthSequences(keys=["a", "b", "c"], sequence_id=1)
fls({"a": a_in, "b": b_in, "c": c_in})
# Testing assertion in the constructor
with self.assertRaises(AssertionError):
transforms.FixedLengthSequences(
keys=["a", "b"], sequence_id=1, to_keys=["to_a"]
)
def test_SlateView(self):
# Unit tests for the SlateView class
sv = transforms.SlateView(keys=["a"], slate_size=-1)
# GIVEN a SlateView with keys = ["a"]
# WHEN data is passed in under a key "b"
# THEN the value for "b" should not be unflattened since the key "b" is not in SlateView.keys!
sv.slate_size = 1
sv.keys = ["a"]
a_in = torch.tensor([[1, 2], [3, 4], [5, 6], [7, 8]])
b_in = torch.tensor([[1, 2], [3, 4], [5, 6], [7, 8]])
data = {"a": a_in, "b": b_in}
out = sv(data)
self.assertEqual(out["b"].shape, torch.Size([4, 2]))
self.assertTorchTensorEqual(out["b"], b_in)
# GIVEN slate.size = 1 and keys = ["a", "b"]
# WHEN input shape is [4, 2]
# THEN output shape should be [4, 1, 2] for all keys
sv.slate_size = 1
sv.keys = ["a", "b"]
a_in = torch.tensor([[1, 2], [3, 4], [5, 6], [7, 8]])
b_in = torch.tensor([[10, 20], [30, 40], [50, 60], [70, 80]])
data = {"a": a_in, "b": b_in}
out = sv(data)
a_out_412 = torch.tensor([[[1, 2]], [[3, 4]], [[5, 6]], [[7, 8]]])
b_out_412 = torch.tensor([[[10, 20]], [[30, 40]], [[50, 60]], [[70, 80]]])
self.assertEqual(out["a"].shape, torch.Size([4, 1, 2]))
self.assertEqual(out["b"].shape, torch.Size([4, 1, 2]))
self.assertDictOfTensorEqual({"a": a_out_412, "b": b_out_412}, out)
# GIVEN a SlateView with keys = ["a", "b"]
# WHEN data is passed in missing one or more of those keys
# THEN a KeyError should be raised
sv.keys = ["a", "b"]
a_in = torch.tensor([[1, 2], [3, 4]])
c_in = torch.tensor([[1, 2], [3, 4]])
data = {"a": a_in, "c": c_in}
with self.assertRaises(KeyError):
out = sv(data)
# GIVEN a SlateView with keys = ["a"]
# WHEN data is passed in that is of an invalid shape
# THEN a RuntimeError should be raised
sv.slate_size = 2
sv.keys = ["a"]
a_in = torch.tensor([[1, 2]])
data = {"a": a_in}
with self.assertRaises(RuntimeError):
out = sv(data)
# GIVEN slate.size = 2 and keys = ["a"]
# WHEN input shape is [4, 3]
# THEN output shape should be [2, 2, 3]
sv.slate_size = 2
sv.keys = ["a"]
a_in = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
data = {"a": a_in}
out = sv(data)
a_out_223 = torch.tensor([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
self.assertEqual(out["a"].shape, torch.Size([2, 2, 3]))
self.assertDictOfTensorEqual({"a": a_out_223}, out)
| 21,051 | 38.349533 | 102 | py |
ReAgent | ReAgent-master/reagent/test/prediction/test_predictor_wrapper.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import random
import unittest
import numpy.testing as npt
import reagent.core.types as rlt
import reagent.models as models
import torch
from reagent.model_utils.seq2slate_utils import Seq2SlateMode, Seq2SlateOutputArch
from reagent.models.seq2slate import Seq2SlateTransformerNet
from reagent.prediction.predictor_wrapper import (
ActorPredictorWrapper,
ActorWithPreprocessor,
DiscreteDqnPredictorWrapper,
DiscreteDqnWithPreprocessor,
ParametricDqnPredictorWrapper,
ParametricDqnWithPreprocessor,
Seq2SlatePredictorWrapper,
Seq2SlateWithPreprocessor,
)
from reagent.prediction.ranking.predictor_wrapper import (
DeterminantalPointProcessPredictorWrapper,
Kernel,
)
from reagent.preprocessing.postprocessor import Postprocessor
from reagent.preprocessing.preprocessor import Preprocessor
from reagent.test.prediction.test_prediction_utils import _cont_norm, _cont_action_norm
from reagent.test.prediction.test_prediction_utils import (
change_cand_size_slate_ranking,
)
def seq2slate_input_prototype_to_ranking_input(
state_input_prototype,
candidate_input_prototype,
state_preprocessor,
candidate_preprocessor,
):
batch_size, candidate_size, candidate_dim = candidate_input_prototype[0].shape
preprocessed_state = state_preprocessor(
state_input_prototype[0], state_input_prototype[1]
)
preprocessed_candidates = candidate_preprocessor(
candidate_input_prototype[0].view(batch_size * candidate_size, candidate_dim),
candidate_input_prototype[1].view(batch_size * candidate_size, candidate_dim),
).view(batch_size, candidate_size, -1)
return rlt.PreprocessedRankingInput.from_tensors(
state=preprocessed_state,
src_seq=preprocessed_candidates,
)
class TestPredictorWrapper(unittest.TestCase):
def test_discrete_wrapper(self):
ids = range(1, 5)
state_normalization_parameters = {i: _cont_norm() for i in ids}
state_preprocessor = Preprocessor(state_normalization_parameters, False)
action_dim = 2
dqn = models.FullyConnectedDQN(
state_dim=len(state_normalization_parameters),
action_dim=action_dim,
sizes=[16],
activations=["relu"],
)
state_feature_config = rlt.ModelFeatureConfig(
float_feature_infos=[
rlt.FloatFeatureInfo(feature_id=i, name=f"feat_{i}") for i in ids
]
)
dqn_with_preprocessor = DiscreteDqnWithPreprocessor(
dqn, state_preprocessor, state_feature_config
)
action_names = ["L", "R"]
wrapper = DiscreteDqnPredictorWrapper(
dqn_with_preprocessor, action_names, state_feature_config
)
input_prototype = dqn_with_preprocessor.input_prototype()[0]
output_action_names, q_values = wrapper(input_prototype)
self.assertEqual(action_names, output_action_names)
self.assertEqual(q_values.shape, (1, 2))
state_with_presence = input_prototype.float_features_with_presence
expected_output = dqn(rlt.FeatureData(state_preprocessor(*state_with_presence)))
self.assertTrue((expected_output == q_values).all())
def test_discrete_wrapper_with_id_list(self):
state_normalization_parameters = {i: _cont_norm() for i in range(1, 5)}
state_preprocessor = Preprocessor(state_normalization_parameters, False)
action_dim = 2
state_feature_config = rlt.ModelFeatureConfig(
float_feature_infos=[
rlt.FloatFeatureInfo(name=str(i), feature_id=i) for i in range(1, 5)
],
id_list_feature_configs=[
rlt.IdListFeatureConfig(
name="A", feature_id=10, id_mapping_name="A_mapping"
)
],
id_mapping_config={
"A_mapping": rlt.IdMappingUnion(
explicit_mapping=rlt.ExplicitMapping(ids=[0, 1, 2])
)
},
)
embedding_concat = models.EmbeddingBagConcat(
state_dim=len(state_normalization_parameters),
model_feature_config=state_feature_config,
embedding_dim=8,
)
dqn = models.Sequential(
embedding_concat,
rlt.TensorFeatureData(),
models.FullyConnectedDQN(
embedding_concat.output_dim,
action_dim=action_dim,
sizes=[16],
activations=["relu"],
),
)
dqn_with_preprocessor = DiscreteDqnWithPreprocessor(
dqn, state_preprocessor, state_feature_config
)
action_names = ["L", "R"]
wrapper = DiscreteDqnPredictorWrapper(
dqn_with_preprocessor, action_names, state_feature_config
)
input_prototype = dqn_with_preprocessor.input_prototype()[0]
output_action_names, q_values = wrapper(input_prototype)
self.assertEqual(action_names, output_action_names)
self.assertEqual(q_values.shape, (1, 2))
feature_id_to_name = {
config.feature_id: config.name
for config in state_feature_config.id_list_feature_configs
}
state_id_list_features = {
feature_id_to_name[k]: v
for k, v in input_prototype.id_list_features.items()
}
state_with_presence = input_prototype.float_features_with_presence
expected_output = dqn(
rlt.FeatureData(
float_features=state_preprocessor(*state_with_presence),
id_list_features=state_id_list_features,
)
)
self.assertTrue((expected_output == q_values).all())
def test_parametric_wrapper(self):
state_normalization_parameters = {i: _cont_norm() for i in range(1, 5)}
action_normalization_parameters = {i: _cont_norm() for i in range(5, 9)}
state_preprocessor = Preprocessor(state_normalization_parameters, False)
action_preprocessor = Preprocessor(action_normalization_parameters, False)
dqn = models.FullyConnectedCritic(
state_dim=len(state_normalization_parameters),
action_dim=len(action_normalization_parameters),
sizes=[16],
activations=["relu"],
)
dqn_with_preprocessor = ParametricDqnWithPreprocessor(
dqn,
state_preprocessor=state_preprocessor,
action_preprocessor=action_preprocessor,
)
wrapper = ParametricDqnPredictorWrapper(dqn_with_preprocessor)
input_prototype = dqn_with_preprocessor.input_prototype()
output_action_names, q_value = wrapper(*input_prototype)
self.assertEqual(output_action_names, ["Q"])
self.assertEqual(q_value.shape, (1, 1))
expected_output = dqn(
rlt.FeatureData(state_preprocessor(*input_prototype[0])),
rlt.FeatureData(action_preprocessor(*input_prototype[1])),
)
self.assertTrue((expected_output == q_value).all())
def test_actor_wrapper(self):
state_normalization_parameters = {i: _cont_norm() for i in range(1, 5)}
action_normalization_parameters = {
i: _cont_action_norm() for i in range(101, 105)
}
state_preprocessor = Preprocessor(state_normalization_parameters, False)
postprocessor = Postprocessor(action_normalization_parameters, False)
# Test with FullyConnectedActor to make behavior deterministic
actor = models.FullyConnectedActor(
state_dim=len(state_normalization_parameters),
action_dim=len(action_normalization_parameters),
sizes=[16],
activations=["relu"],
)
state_feature_config = rlt.ModelFeatureConfig()
actor_with_preprocessor = ActorWithPreprocessor(
actor, state_preprocessor, state_feature_config, postprocessor
)
wrapper = ActorPredictorWrapper(actor_with_preprocessor, state_feature_config)
input_prototype = actor_with_preprocessor.input_prototype()[0]
action, _log_prob = wrapper(input_prototype)
self.assertEqual(action.shape, (1, len(action_normalization_parameters)))
expected_output = postprocessor(
actor(rlt.FeatureData(state_preprocessor(*input_prototype[0]))).action
)
self.assertTrue((expected_output == action).all())
def validate_seq2slate_output(self, expected_output, wrapper_output):
ranked_per_seq_probs, ranked_tgt_out_idx = (
expected_output.ranked_per_seq_probs,
expected_output.ranked_tgt_out_idx,
)
# -2 to offset padding symbol and decoder start symbol
ranked_tgt_out_idx -= 2
self.assertTrue(ranked_per_seq_probs == wrapper_output[0])
self.assertTrue(torch.all(torch.eq(ranked_tgt_out_idx, wrapper_output[1])))
def test_seq2slate_transformer_frechet_sort_wrapper(self):
self._test_seq2slate_wrapper(
model="transformer", output_arch=Seq2SlateOutputArch.FRECHET_SORT
)
def test_seq2slate_transformer_autoregressive_wrapper(self):
self._test_seq2slate_wrapper(
model="transformer", output_arch=Seq2SlateOutputArch.AUTOREGRESSIVE
)
def _test_seq2slate_wrapper(self, model: str, output_arch: Seq2SlateOutputArch):
state_normalization_parameters = {i: _cont_norm() for i in range(1, 5)}
candidate_normalization_parameters = {i: _cont_norm() for i in range(101, 106)}
state_preprocessor = Preprocessor(state_normalization_parameters, False)
candidate_preprocessor = Preprocessor(candidate_normalization_parameters, False)
candidate_size = 10
slate_size = 4
seq2slate = None
if model == "transformer":
seq2slate = Seq2SlateTransformerNet(
state_dim=len(state_normalization_parameters),
candidate_dim=len(candidate_normalization_parameters),
num_stacked_layers=2,
num_heads=2,
dim_model=10,
dim_feedforward=10,
max_src_seq_len=candidate_size,
max_tgt_seq_len=slate_size,
output_arch=output_arch,
temperature=0.5,
)
else:
raise NotImplementedError(f"model type {model} is unknown")
seq2slate_with_preprocessor = Seq2SlateWithPreprocessor(
seq2slate, state_preprocessor, candidate_preprocessor, greedy=True
)
wrapper = Seq2SlatePredictorWrapper(seq2slate_with_preprocessor)
(
state_input_prototype,
candidate_input_prototype,
) = seq2slate_with_preprocessor.input_prototype()
wrapper_output = wrapper(state_input_prototype, candidate_input_prototype)
ranking_input = seq2slate_input_prototype_to_ranking_input(
state_input_prototype,
candidate_input_prototype,
state_preprocessor,
candidate_preprocessor,
)
expected_output = seq2slate(
ranking_input,
mode=Seq2SlateMode.RANK_MODE,
tgt_seq_len=candidate_size,
greedy=True,
)
self.validate_seq2slate_output(expected_output, wrapper_output)
# Test Seq2SlatePredictorWrapper can handle variable lengths of inputs
random_length = random.randint(candidate_size + 1, candidate_size * 2)
(
state_input_prototype,
candidate_input_prototype,
) = change_cand_size_slate_ranking(
seq2slate_with_preprocessor.input_prototype(), random_length
)
wrapper_output = wrapper(state_input_prototype, candidate_input_prototype)
ranking_input = seq2slate_input_prototype_to_ranking_input(
state_input_prototype,
candidate_input_prototype,
state_preprocessor,
candidate_preprocessor,
)
expected_output = seq2slate(
ranking_input,
mode=Seq2SlateMode.RANK_MODE,
tgt_seq_len=random_length,
greedy=True,
)
self.validate_seq2slate_output(expected_output, wrapper_output)
def test_determinantal_point_process_wrapper_linear_kernel(self):
# The second and third items are identical (similarity=1)
# So the second and third items have strong repulsion
# The expected ranked indices should be 2, 0, 1
quality_scores = torch.tensor(
[
[4],
[5],
[8],
]
)
feature_vectors = torch.tensor([[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 1]])
wrapper = DeterminantalPointProcessPredictorWrapper(
alpha=1.0, kernel=Kernel.Linear
)
ranked_idx, determinants, L = wrapper(quality_scores, feature_vectors)
npt.assert_array_almost_equal(ranked_idx, [2, 0, 1])
npt.assert_array_almost_equal(
determinants,
torch.tensor(
[
[16, 25, 64],
[1024, 0, wrapper.MIN_VALUE],
[wrapper.MIN_VALUE, 0, wrapper.MIN_VALUE],
]
),
)
npt.assert_array_almost_equal(L, [[16, 0, 0], [0, 25, 40], [0, 40, 64]])
# Test shorter rerank positions
# All three items have different categories, so the final order is 1, 2, 0 if
# rerank the full slate. If rerank_topk=1, then the expected order is 1, 0, 2
quality_scores = torch.tensor(
[
[4],
[6],
[5],
]
)
feature_vectors = torch.tensor([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])
wrapper = DeterminantalPointProcessPredictorWrapper(
alpha=1.0, kernel=Kernel.Linear, rerank_topk=1
)
ranked_idx, _, _ = wrapper(quality_scores, feature_vectors)
npt.assert_array_almost_equal(ranked_idx, [1, 0, 2])
def test_determinantal_point_process_wrapper_rbf_kernel(self):
# The second and third items are identical (similarity=1)
# So the second and third items have strong repulsion
# The expected ranked indices should be 2, 0, 1
quality_scores = torch.tensor(
[
[4],
[5],
[8],
]
)
feature_vectors = torch.tensor([[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 1]])
wrapper = DeterminantalPointProcessPredictorWrapper(
alpha=1.0, kernel=Kernel.RBF
)
ranked_idx, determinants, L = wrapper(quality_scores, feature_vectors)
npt.assert_array_almost_equal(ranked_idx, [2, 0, 1])
npt.assert_array_almost_equal(
determinants,
torch.tensor(
[
[16, 25, 64],
[885.41766159, 0, wrapper.MIN_VALUE],
[wrapper.MIN_VALUE, 0, wrapper.MIN_VALUE],
]
),
decimal=3,
)
npt.assert_array_almost_equal(
L, [[16, 7.3576, 11.7721], [7.3576, 25, 40], [11.7721, 40, 64]], decimal=3
)
# Test shorter rerank positions
# All three items have different categories, so the final order is 1, 2, 0 if
# rerank the full slate. If rerank_topk=1, then the expected order is 1, 0, 2
quality_scores = torch.tensor(
[
[4],
[6],
[5],
]
)
feature_vectors = torch.tensor([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])
wrapper = DeterminantalPointProcessPredictorWrapper(
alpha=1.0, kernel=Kernel.RBF, rerank_topk=1
)
ranked_idx, _, _ = wrapper(quality_scores, feature_vectors)
npt.assert_array_almost_equal(ranked_idx, [1, 0, 2])
| 16,078 | 38.898263 | 88 | py |
ReAgent | ReAgent-master/reagent/test/prediction/test_model_with_preprocessor.py | import unittest
import numpy.testing as npt
import torch
from reagent.model_utils.seq2slate_utils import Seq2SlateOutputArch
from reagent.models.seq2slate import Seq2SlateTransformerNet
from reagent.prediction.predictor_wrapper import Seq2SlateWithPreprocessor
from reagent.preprocessing.preprocessor import Preprocessor
from reagent.test.prediction.test_prediction_utils import (
_cont_norm,
change_cand_size_slate_ranking,
)
class TestModelWithPreprocessor(unittest.TestCase):
def verify_results(self, expected_output, scripted_output):
for i, j in zip(expected_output, scripted_output):
npt.assert_array_equal(i.detach(), j.detach())
def test_seq2slate_transformer_frechet_sort_model_with_preprocessor(self):
self._test_seq2slate_model_with_preprocessor(
model="transformer", output_arch=Seq2SlateOutputArch.FRECHET_SORT
)
def test_seq2slate_transformer_autoregressive_model_with_preprocessor(self):
self._test_seq2slate_model_with_preprocessor(
model="transformer", output_arch=Seq2SlateOutputArch.AUTOREGRESSIVE
)
def _test_seq2slate_model_with_preprocessor(
self, model: str, output_arch: Seq2SlateOutputArch
):
state_normalization_parameters = {i: _cont_norm() for i in range(1, 5)}
candidate_normalization_parameters = {i: _cont_norm() for i in range(101, 106)}
state_preprocessor = Preprocessor(state_normalization_parameters, False)
candidate_preprocessor = Preprocessor(candidate_normalization_parameters, False)
candidate_size = 10
slate_size = 4
seq2slate = None
if model == "transformer":
seq2slate = Seq2SlateTransformerNet(
state_dim=len(state_normalization_parameters),
candidate_dim=len(candidate_normalization_parameters),
num_stacked_layers=2,
num_heads=2,
dim_model=10,
dim_feedforward=10,
max_src_seq_len=candidate_size,
max_tgt_seq_len=slate_size,
output_arch=output_arch,
temperature=0.5,
)
else:
raise NotImplementedError(f"model type {model} is unknown")
seq2slate_with_preprocessor = Seq2SlateWithPreprocessor(
seq2slate, state_preprocessor, candidate_preprocessor, greedy=True
)
input_prototype = seq2slate_with_preprocessor.input_prototype()
if seq2slate_with_preprocessor.can_be_traced():
seq2slate_with_preprocessor_jit = torch.jit.trace(
seq2slate_with_preprocessor,
seq2slate_with_preprocessor.input_prototype(),
)
else:
seq2slate_with_preprocessor_jit = torch.jit.script(
seq2slate_with_preprocessor
)
expected_output = seq2slate_with_preprocessor(*input_prototype)
jit_output = seq2slate_with_preprocessor_jit(*input_prototype)
self.verify_results(expected_output, jit_output)
# Test if scripted model can handle variable lengths of input
input_prototype = change_cand_size_slate_ranking(input_prototype, 20)
expected_output = seq2slate_with_preprocessor(*input_prototype)
jit_output = seq2slate_with_preprocessor_jit(*input_prototype)
self.verify_results(expected_output, jit_output)
| 3,422 | 41.7875 | 88 | py |
ReAgent | ReAgent-master/reagent/test/prediction/test_prediction_utils.py | import torch
from reagent.preprocessing.identify_types import CONTINUOUS, CONTINUOUS_ACTION
from reagent.preprocessing.normalization import NormalizationParameters
def _cont_norm():
return NormalizationParameters(feature_type=CONTINUOUS, mean=0.0, stddev=1.0)
def _cont_action_norm():
return NormalizationParameters(
feature_type=CONTINUOUS_ACTION, min_value=-3.0, max_value=3.0
)
def change_cand_size_slate_ranking(input_prototype, candidate_size_override):
state_prototype, candidate_prototype = input_prototype
candidate_prototype = (
candidate_prototype[0][:, :1, :].repeat(1, candidate_size_override, 1),
candidate_prototype[1][:, :1, :].repeat(1, candidate_size_override, 1),
)
return (
(torch.randn_like(state_prototype[0]), torch.ones_like(state_prototype[1])),
(
torch.randn_like(candidate_prototype[0]),
torch.ones_like(candidate_prototype[1]),
),
)
| 970 | 32.482759 | 84 | py |
ReAgent | ReAgent-master/reagent/scripts/hparam_tuning.py | #!/usr/bin/env python3
# (c) Facebook, Inc. and its affiliates. Confidential and proprietary.
import logging # isort:skip
logging.disable() # isort:skip
import copy
import json
import os
from typing import Any, Callable, Dict, List, Tuple, Optional
import numpy as np
import torch.multiprocessing as mp
from ax.service.ax_client import AxClient
def ax_evaluate_params(
params_list: List[Dict],
fixed_params: Dict,
eval_fn: Callable,
parse_params_fn: Optional[Callable] = None,
num_seeds: int = 10,
num_proc: int = 20,
) -> List[Dict[str, Tuple[float, float]]]:
"""
Evaluate a single set of hyperparameters for Ax search.
Args:
params_list: A list of hyperparameter configs to evaluate.
fixed_params: A dictionary of hyperparameters that are held fixed between evaluations.
eval_fn: Evaluation function that returns a dictionary of metric values.
parse_params_fn: A optional function applied to the hyperparameter dictionary to parse some elements. Can be useful
if the best representation for Ax doesn't match the format accepted by the eval_fn.
num_seeds: Number of random seeds among which the metrics are averaged.
num_proc: Number of processes to run in parallel.
Returns:
A list of average evaluation metrics (one per config)
"""
# create a list of full hyperparameter configurations to be evaluated
params_with_seed_list = []
for params in params_list:
for s in range(num_seeds):
params_s = copy.deepcopy(params)
params_s.update(fixed_params)
params_s["seed"] = s
if parse_params_fn is not None:
params_s = parse_params_fn(params_s)
params_with_seed_list.append(params_s)
# evaluate metrics in parallel using multiprocessing
if num_proc > 1:
with mp.get_context("spawn").Pool(
min(len(params_with_seed_list), num_proc)
) as p:
metrics = p.map(eval_fn, params_with_seed_list)
else:
metrics = list(map(eval_fn, params_with_seed_list))
# calculate the average metrics across different seeds
avg_metrics = []
num_params = len(params_list)
for i in range(num_params):
avg_metrics.append(
{
k: (
np.mean(
[m[k] for m in metrics[i * num_seeds : (i + 1) * num_seeds]]
),
np.std(
[m[k] for m in metrics[i * num_seeds : (i + 1) * num_seeds]]
),
)
for k in metrics[0].keys()
}
)
return avg_metrics
def run_ax_search(
fixed_params: Dict,
ax_params: List[Dict[str, Any]],
eval_fn: Callable,
obj_name: str,
minimize: bool,
id_: str,
parse_params_fn: Optional[Callable] = None,
ax_param_constraints: Optional[List[str]] = None,
num_ax_steps: int = 50,
num_concur_samples: int = 2,
num_seeds: int = 10,
num_proc: int = 20,
folder_name: Optional[str] = None,
verbose: bool = False,
) -> Tuple[Dict[str, Any], AxClient]:
"""
Run a search for best hyperparameter values using Ax.
Note that this requires the Ax package (https://ax.dev/) to be installed.
Args:
fixed_params: Fixed values of hyperparameters.
ax_params: Ax configuration for hyperparameters that are searched over. See docs for ax_client.create_experiment()
eval_fn: Evaluation function that returns a dictionary of metric values.
obj_name: Objective name (key of the dict returned by eval_fn)
minimize: If True, objective is minimized, if False it's maximized.
id_: An arbitrary string identifier of the search (used as part of filename where results are saved)
parse_params_fn: A function applied to the parameter dictionary to parse it. Can be used
if the best representation for Ax doesn't match the format accepted by the eval_fn.
ax_param_constraints: Constraints for the parameters that are searched over.
num_ax_steps: The number of ax steps to take.
num_concur_samples: Number of configurations to sample per ax step (in parallel)
num_seeds: Number of seeds to average over
num_proc: Number of processes to run in parallel.
folder_name: Folder where to save best found parameters
verbose: If True, some details are printed out
Returns:
A dict of best hyperparameters found by Ax
"""
for p in ax_params:
assert (
p["name"] not in fixed_params
), f'Parameter {p["name"]} appers in both fixed and search parameters'
if ax_param_constraints is None:
ax_param_constraints = []
ax_client = AxClient()
ax_client.create_experiment(
name=f"hparams_search_{id_}",
parameters=ax_params,
objective_name=obj_name,
minimize=minimize,
parameter_constraints=ax_param_constraints,
choose_generation_strategy_kwargs={
"max_parallelism_override": num_concur_samples,
"num_initialization_trials": max(num_concur_samples, 5, len(ax_params)),
},
)
best_params = None
all_considered_params = []
all_considered_metrics = []
try:
for i in range(1, num_ax_steps + 1):
if verbose:
print(f"ax step {i}/{num_ax_steps}")
params_list = []
trial_indices_list = []
for _ in range(num_concur_samples):
# sample several values (to be evaluated in parallel)
parameters, trial_index = ax_client.get_next_trial()
params_list.append(parameters)
trial_indices_list.append(trial_index)
res = ax_evaluate_params(
params_list,
fixed_params=fixed_params,
eval_fn=eval_fn,
parse_params_fn=parse_params_fn,
num_seeds=num_seeds,
num_proc=num_proc,
)
all_considered_params.extend(params_list)
all_considered_metrics.extend(res)
for t_i, v in zip(trial_indices_list, res):
ax_client.complete_trial(trial_index=t_i, raw_data=v)
best_params, predicted_metrics = ax_client.get_best_parameters()
predicted_metrics = predicted_metrics[0] # choose expected metric values
if verbose:
print(best_params, predicted_metrics)
# save at every iteration in case search is interrupted
if folder_name is not None:
with open(
os.path.join(
os.path.expanduser(folder_name),
f"ax_results_{id_}.json",
),
"w",
) as f:
json.dump(
{
"best_params": best_params,
"predicted_metrics": predicted_metrics,
"fixed_params": fixed_params,
"ax_params": ax_params,
"num_ax_steps": i,
"num_concur_samples": num_concur_samples,
"num_seeds": num_seeds,
"num_proc": num_proc,
"all_considered_params": all_considered_params,
"all_considered_metrics": all_considered_metrics,
},
f,
indent=4,
)
except KeyboardInterrupt:
# handle keyboard interruption to enable returning intermediate results if interrupted
pass
return best_params, ax_client
| 7,864 | 38.923858 | 123 | py |
ReAgent | ReAgent-master/reagent/publishers/file_system_publisher.py | #!/usr/bin/env python3
import logging
import os
from typing import Dict, Optional
from reagent.core.dataclasses import dataclass
from reagent.core.result_types import NoPublishingResults
from reagent.model_managers.model_manager import ModelManager
from reagent.publishers.model_publisher import ModelPublisher
from reagent.workflow.types import (
ModuleNameToEntityId,
RecurringPeriod,
RLTrainingOutput,
)
try:
from tinydb import Query, TinyDB
HAS_TINYDB = True
except ImportError:
HAS_TINYDB = False
class FileSystemPublisher:
pass
logger = logging.getLogger(__name__)
KEY_FIELD = "model_config"
VALUE_FIELD = "torchscript_path"
if HAS_TINYDB:
@dataclass
class FileSystemPublisher(ModelPublisher):
"""Uses a file to serve as a key-value store.
The key is the str/repr representation of the ModelManager.
The value is the path to the torchscipt model.
TODO: replace with redis (python) and hiredis (C) for better RASP support
"""
publishing_file: str = "/tmp/file_system_publisher"
def __post_init_post_parse__(self):
self.publishing_file = os.path.abspath(self.publishing_file)
self.db: TinyDB = TinyDB(self.publishing_file)
logger.info(f"Using TinyDB at {self.publishing_file}.")
def get_latest_published_model(
self, model_manager: ModelManager, module_name: str
) -> str:
Model = Query()
# TODO: make this take in a
key = f"{module_name}_{str(model_manager)}"
# pyre-fixme[16]: `FileSystemPublisher` has no attribute `db`.
results = self.db.search(Model[KEY_FIELD] == key)
if len(results) != 1:
if len(results) == 0:
raise ValueError(
"Publish a model with the same str representation first!"
)
else:
raise RuntimeError(
f"Got {len(results)} results for model_manager. {results}"
)
return results[0][VALUE_FIELD]
def do_publish(
self,
model_manager: ModelManager,
training_output: RLTrainingOutput,
setup_data: Optional[Dict[str, bytes]],
recurring_workflow_ids: ModuleNameToEntityId,
child_workflow_id: int,
recurring_period: Optional[RecurringPeriod],
) -> NoPublishingResults:
for module_name, path in training_output.output_paths.items():
assert os.path.exists(path), f"Given path {path} doesn't exist."
Model = Query()
# find if there's already been something stored
key = f"{module_name}_{str(model_manager)}"
# pyre-fixme[16]: `FileSystemPublisher` has no attribute `db`.
results = self.db.search(Model[KEY_FIELD] == key)
if len(results) == 0:
# this is a first
self.db.insert({KEY_FIELD: key, VALUE_FIELD: path})
else:
# replace it
if len(results) > 1:
raise RuntimeError(
f"Got {len(results)} results for model_manager. {results}"
)
self.db.update({VALUE_FIELD: path}, Model[KEY_FIELD] == key)
return NoPublishingResults(success=True)
| 3,502 | 34.383838 | 86 | py |
ReAgent | ReAgent-master/reagent/samplers/frechet.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import math
from typing import Optional
import reagent.core.types as rlt
import torch
import torch.nn.functional as F
from reagent.core.configuration import resolve_defaults
from reagent.gym.types import Sampler
from torch.distributions import Gumbel
logger = logging.getLogger(__name__)
class FrechetSort(Sampler):
EPS = 1e-12
@resolve_defaults
def __init__(
self,
shape: float = 1.0,
topk: Optional[int] = None,
equiv_len: Optional[int] = None,
log_scores: bool = False,
):
"""FréchetSort is a softer version of descending sort which samples all possible
orderings of items favoring orderings which resemble descending sort. This can
be used to convert descending sort by rank score into a differentiable,
stochastic policy amenable to policy gradient algorithms.
:param shape: parameter of Frechet Distribution. Lower values correspond to
aggressive deviations from descending sort.
:param topk: If specified, only the first topk actions are specified.
:param equiv_len: Orders are considered equivalent if the top equiv_len match. Used
in probability computations.
Essentially specifies the action space.
:param log_scores Scores passed in are already log-transformed. In this case, we would
simply add Gumbel noise.
For LearnVM, we set this to be True because we expect input and output scores
to be in the log space.
Example:
Consider the sampler:
sampler = FrechetSort(shape=3, topk=5, equiv_len=3)
Given a set of scores, this sampler will produce indices of items roughly
resembling a argsort by scores in descending order. The higher the shape,
the more it would resemble a descending argsort. `topk=5` means only the top
5 ranks will be output. The `equiv_len` determines what orders are considered
equivalent for probability computation. In this example, the sampler will
produce probability for the top 3 items appearing in a given order for the
`log_prob` call.
"""
self.shape = shape
self.topk = topk
self.upto = equiv_len
if topk is not None:
if equiv_len is None:
self.upto = topk
# pyre-fixme[58]: `>` is not supported for operand types `Optional[int]`
# and `Optional[int]`.
if self.upto > self.topk:
raise ValueError(f"Equiv length {equiv_len} cannot exceed topk={topk}.")
self.gumbel_noise = Gumbel(0, 1.0 / shape)
self.log_scores = log_scores
def sample_action(self, scores: torch.Tensor) -> rlt.ActorOutput:
"""Sample a ranking according to Frechet sort. Note that possible_actions_mask
is ignored as the list of rankings scales exponentially with slate size and
number of items and it can be difficult to enumerate them."""
assert scores.dim() == 2, "sample_action only accepts batches"
log_scores = scores if self.log_scores else torch.log(scores)
perturbed = log_scores + self.gumbel_noise.sample(scores.shape)
action = torch.argsort(perturbed.detach(), descending=True)
log_prob = self.log_prob(scores, action)
# Only truncate the action before returning
if self.topk is not None:
action = action[: self.topk]
return rlt.ActorOutput(action, log_prob)
def log_prob(
self,
scores: torch.Tensor,
action: torch.Tensor,
equiv_len_override: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""
What is the probability of a given set of scores producing the given
list of permutations only considering the top `equiv_len` ranks?
We may want to override the default equiv_len here when we know the having larger
action space doesn't matter. i.e. in Reels
"""
upto = self.upto
if equiv_len_override is not None:
assert equiv_len_override.shape == (
scores.shape[0],
), f"Invalid shape {equiv_len_override.shape}, compared to scores {scores.shape}. equiv_len_override {equiv_len_override}"
upto = equiv_len_override.long()
if self.topk is not None and torch.any(equiv_len_override > self.topk):
raise ValueError(
f"Override {equiv_len_override} cannot exceed topk={self.topk}."
)
squeeze = False
if len(scores.shape) == 1:
squeeze = True
scores = scores.unsqueeze(0)
action = action.unsqueeze(0)
assert len(action.shape) == len(scores.shape) == 2, "scores should be batch"
if action.shape[1] > scores.shape[1]:
raise ValueError(
f"action cardinality ({action.shape[1]}) is larger than the number of scores ({scores.shape[1]})"
)
elif action.shape[1] < scores.shape[1]:
raise NotImplementedError(
f"This semantic is ambiguous. If you have shorter slate, pad it with scores.shape[1] ({scores.shape[1]})"
)
log_scores = scores if self.log_scores else torch.log(scores)
n = log_scores.shape[-1]
# Add scores for the padding value
log_scores = torch.cat(
[
log_scores,
torch.full(
(log_scores.shape[0], 1), -math.inf, device=log_scores.device
),
],
dim=1,
)
log_scores = torch.gather(log_scores, 1, action) * self.shape
p = upto if upto is not None else n
# We should unsqueeze here
if isinstance(p, int):
log_prob = sum(
torch.nan_to_num(
F.log_softmax(log_scores[:, i:], dim=1)[:, 0], neginf=0.0
)
for i in range(p)
)
elif isinstance(p, torch.Tensor):
# do masked sum
log_prob = sum(
torch.nan_to_num(
F.log_softmax(log_scores[:, i:], dim=1)[:, 0], neginf=0.0
)
* (i < p).float()
for i in range(n)
)
else:
raise RuntimeError(f"p is {p}")
assert not torch.any(log_prob.isnan()), f"Nan in {log_prob}"
return log_prob
| 6,562 | 39.512346 | 134 | py |
ReAgent | ReAgent-master/reagent/model_managers/model_manager.py | #!/usr/bin/env python3
import abc
import logging
from typing import Dict, List, Optional, Tuple
import pytorch_lightning as pl
import torch
from reagent.core.dataclasses import dataclass
from reagent.core.parameters import NormalizationData
from reagent.data.reagent_data_module import ReAgentDataModule
from reagent.reporting.reporter_base import ReporterBase
from reagent.training import ReAgentLightningModule, MultiStageTrainer
from reagent.workflow.types import (
Dataset,
ReaderOptions,
ResourceOptions,
RewardOptions,
RLTrainingOutput,
TableSpec,
)
from reagent.workflow.types import RLTrainingReport
from reagent.workflow.utils import get_rank, train_eval_lightning
logger = logging.getLogger(__name__)
@dataclass
class ModelManager:
"""
ModelManager manages how to train models.
Each type of models can have their own config type, implemented as
`config_type()` class method. `__init__()` of the concrete class must take
this type.
To integrate training algorithms into the standard training workflow, you need:
1. `build_trainer()`: Builds the ReAgentLightningModule
2. `get_data_module()`: Defines how to create data module for this algorithm
3. `build_serving_modules()`: Creates the TorchScript modules for serving
4. `get_reporter()`: Returns the reporter to collect training/evaluation metrics
5. `create_policy()`: (Optional) Creates Policy object for to interact with Gym
"""
def __post_init_post_parse__(self):
"""
We use pydantic to parse raw config into typed (dataclass) config.
This method is called after everything is parsed, so you could
validate constraints that may not be captured with the type alone.
See https://pydantic-docs.helpmanual.io/usage/dataclasses/#initialize-hooks
"""
pass
def get_data_module(
self,
*,
input_table_spec: Optional[TableSpec] = None,
reward_options: Optional[RewardOptions] = None,
setup_data: Optional[Dict[str, bytes]] = None,
saved_setup_data: Optional[Dict[str, bytes]] = None,
reader_options: Optional[ReaderOptions] = None,
resource_options: Optional[ResourceOptions] = None,
) -> Optional[ReAgentDataModule]:
"""
Return the data module. If this is not None, then `run_feature_identification` &
`query_data` will not be run.
"""
return None
@abc.abstractmethod
def build_trainer(
self,
normalization_data_map: Dict[str, NormalizationData],
use_gpu: bool,
reward_options: Optional[RewardOptions] = None,
) -> ReAgentLightningModule:
"""
Implement this to build the trainer, given the config
TODO: This function should return ReAgentLightningModule &
the dictionary of modules created
"""
pass
@abc.abstractmethod
def get_reporter(self) -> ReporterBase:
pass
def train(
self,
trainer_module: ReAgentLightningModule,
train_dataset: Optional[Dataset],
eval_dataset: Optional[Dataset],
test_dataset: Optional[Dataset],
data_module: Optional[ReAgentDataModule],
num_epochs: int,
reader_options: ReaderOptions,
resource_options: ResourceOptions,
checkpoint_path: Optional[str] = None,
) -> Tuple[RLTrainingOutput, pl.Trainer]:
"""
Train the model
Returns partially filled RLTrainingOutput.
The field that should not be filled are:
- output_path
Arguments:
train/eval/test_dataset: what you'd expect
data_module: [pytorch lightning only] a lightning data module that replaces the use of train/eval datasets
num_epochs: number of training epochs
reader_options: options for the data reader
resource_options: options for training resources (currently only used for setting num_nodes in pytorch lightning trainer)
"""
if isinstance(trainer_module, MultiStageTrainer):
assert trainer_module.multi_stage_total_epochs == num_epochs, (
f"The sum of each stage's epoch ({trainer_module.trainer_epoch_mapping})"
f" should be equal to num_epochs ({num_epochs})."
)
reporter = self.get_reporter()
trainer_module.set_reporter(reporter)
assert data_module
lightning_trainer = train_eval_lightning(
train_dataset=train_dataset,
eval_dataset=eval_dataset,
test_dataset=test_dataset,
trainer_module=trainer_module,
data_module=data_module,
num_epochs=num_epochs,
logger_name=str(type(self)),
reader_options=reader_options,
checkpoint_path=checkpoint_path,
resource_options=resource_options,
)
rank = get_rank()
if rank == 0:
# pyre-ignore
trainer_logger = lightning_trainer.logger
logger_data = trainer_logger.line_plot_aggregated
trainer_logger.clear_local_data()
if reporter is None:
training_report = None
else:
# pyre-ignore
training_report = RLTrainingReport.make_union_instance(
reporter.generate_training_report()
)
return (
RLTrainingOutput(
training_report=training_report, logger_data=logger_data
),
lightning_trainer,
)
# Output from processes with non-0 rank is not used
return RLTrainingOutput(), lightning_trainer
# TODO: make abstract
def build_serving_modules(
self,
trainer_module: ReAgentLightningModule,
normalization_data_map: Dict[str, NormalizationData],
) -> Dict[str, torch.nn.Module]:
"""
Returns TorchScript for serving in production
"""
return {
"default_model": self.build_serving_module(
trainer_module, normalization_data_map
)
}
def build_serving_module(
self,
trainer_module: ReAgentLightningModule,
normalization_data_map: Dict[str, NormalizationData],
) -> torch.nn.Module:
"""
Optionaly, implement this method if you only have one model for serving
"""
raise NotImplementedError
# TODO: make abstract
def serving_module_names(self) -> List[str]:
"""
Returns the keys that would be returned in `build_serving_modules()`.
This method is required because we need to reserve entity IDs for
these serving modules before we start the training.
"""
return ["default_model"]
def create_policy(
self,
trainer_module: ReAgentLightningModule,
serving: bool = False,
normalization_data_map: Optional[Dict[str, NormalizationData]] = None,
):
raise NotImplementedError
| 7,097 | 33.965517 | 133 | py |
ReAgent | ReAgent-master/reagent/model_managers/discrete_dqn_base.py | #!/usr/bin/env python3
import abc
import logging
from dataclasses import replace
from typing import Dict, List, Optional, Tuple
from reagent.core import types as rlt
from reagent.core.dataclasses import dataclass, field
from reagent.core.parameters import (
EvaluationParameters,
NormalizationData,
NormalizationKey,
RLParameters,
)
from reagent.data.data_fetcher import DataFetcher
from reagent.data.manual_data_module import ManualDataModule
from reagent.data.reagent_data_module import ReAgentDataModule
from reagent.gym.policies.policy import Policy
from reagent.gym.policies.predictor_policies import create_predictor_policy_from_model
from reagent.gym.policies.samplers.discrete_sampler import (
GreedyActionSampler,
)
from reagent.gym.policies.scorers.discrete_scorer import discrete_dqn_scorer
from reagent.model_managers.model_manager import ModelManager
from reagent.models.model_feature_config_provider import RawModelFeatureConfigProvider
from reagent.preprocessing.batch_preprocessor import (
BatchPreprocessor,
DiscreteDqnBatchPreprocessor,
)
from reagent.preprocessing.preprocessor import Preprocessor
from reagent.preprocessing.types import InputColumn
from reagent.reporting.discrete_dqn_reporter import DiscreteDQNReporter
from reagent.training import ReAgentLightningModule
from reagent.workflow.identify_types_flow import identify_normalization_parameters
from reagent.workflow.types import (
Dataset,
ModelFeatureConfigProvider__Union,
PreprocessingOptions,
ReaderOptions,
ResourceOptions,
RewardOptions,
TableSpec,
)
logger = logging.getLogger(__name__)
@dataclass
class DiscreteDQNBase(ModelManager):
target_action_distribution: Optional[List[float]] = None
state_feature_config_provider: ModelFeatureConfigProvider__Union = field(
# pyre-fixme[28]: Unexpected keyword argument `raw`.
default_factory=lambda: ModelFeatureConfigProvider__Union(
raw=RawModelFeatureConfigProvider(float_feature_infos=[])
)
)
preprocessing_options: Optional[PreprocessingOptions] = None
reader_options: Optional[ReaderOptions] = None
eval_parameters: EvaluationParameters = field(default_factory=EvaluationParameters)
def __post_init_post_parse__(self):
super().__post_init_post_parse__()
@property
@abc.abstractmethod
def rl_parameters(self) -> RLParameters:
pass
@property
@abc.abstractmethod
def action_names(self) -> List[str]:
# Returns the list of possible actions for this instance of problem
pass
def create_policy(
self,
trainer_module: ReAgentLightningModule,
serving: bool = False,
normalization_data_map: Optional[Dict[str, NormalizationData]] = None,
) -> Policy:
"""Create an online DiscreteDQN Policy from env."""
if serving:
assert normalization_data_map
return create_predictor_policy_from_model(
self.build_serving_module(trainer_module, normalization_data_map),
rl_parameters=self.rl_parameters,
)
else:
sampler = GreedyActionSampler()
# pyre-fixme[6]: Expected `ModelBase` for 1st param but got
# `Union[torch.Tensor, torch.nn.Module]`.
scorer = discrete_dqn_scorer(trainer_module.q_network)
return Policy(scorer=scorer, sampler=sampler)
@property
def state_feature_config(self) -> rlt.ModelFeatureConfig:
return self.state_feature_config_provider.value.get_model_feature_config()
def get_state_preprocessing_options(self) -> PreprocessingOptions:
state_preprocessing_options = (
self.preprocessing_options or PreprocessingOptions()
)
state_features = [
ffi.feature_id for ffi in self.state_feature_config.float_feature_infos
]
logger.info(f"state allowedlist_features: {state_features}")
state_preprocessing_options = replace(
state_preprocessing_options, allowedlist_features=state_features
)
return state_preprocessing_options
@property
def multi_steps(self) -> Optional[int]:
return self.rl_parameters.multi_steps
def get_data_module(
self,
*,
input_table_spec: Optional[TableSpec] = None,
reward_options: Optional[RewardOptions] = None,
reader_options: Optional[ReaderOptions] = None,
setup_data: Optional[Dict[str, bytes]] = None,
saved_setup_data: Optional[Dict[str, bytes]] = None,
resource_options: Optional[ResourceOptions] = None,
) -> Optional[ReAgentDataModule]:
return DiscreteDqnDataModule(
input_table_spec=input_table_spec,
reward_options=reward_options,
setup_data=setup_data,
saved_setup_data=saved_setup_data,
reader_options=reader_options,
resource_options=resource_options,
model_manager=self,
)
def get_reporter(self):
return DiscreteDQNReporter(
self.trainer_param.actions,
target_action_distribution=self.target_action_distribution,
)
class DiscreteDqnDataModule(ManualDataModule):
@property
def should_generate_eval_dataset(self) -> bool:
return self.model_manager.eval_parameters.calc_cpe_in_training
def run_feature_identification(
self, input_table_spec: TableSpec
) -> Dict[str, NormalizationData]:
preprocessing_options = (
self.model_manager.preprocessing_options or PreprocessingOptions()
)
state_features = [
ffi.feature_id
for ffi in self.model_manager.state_feature_config.float_feature_infos
]
logger.info(f"Overriding allowedlist_features: {state_features}")
preprocessing_options = replace(
preprocessing_options, allowedlist_features=state_features
)
return {
NormalizationKey.STATE: NormalizationData(
dense_normalization_parameters=identify_normalization_parameters(
input_table_spec, InputColumn.STATE_FEATURES, preprocessing_options
)
)
}
def query_data(
self,
input_table_spec: TableSpec,
sample_range: Optional[Tuple[float, float]],
reward_options: RewardOptions,
data_fetcher: DataFetcher,
) -> Dataset:
return data_fetcher.query_data(
input_table_spec=input_table_spec,
discrete_action=True,
actions=self.model_manager.action_names,
include_possible_actions=True,
sample_range=sample_range,
custom_reward_expression=reward_options.custom_reward_expression,
multi_steps=self.model_manager.multi_steps,
gamma=self.model_manager.rl_parameters.gamma,
)
def build_batch_preprocessor(self) -> BatchPreprocessor:
state_preprocessor = Preprocessor(
self.state_normalization_data.dense_normalization_parameters,
)
return DiscreteDqnBatchPreprocessor(
num_actions=len(self.model_manager.action_names),
state_preprocessor=state_preprocessor,
)
| 7,304 | 36.081218 | 87 | py |
ReAgent | ReAgent-master/reagent/model_managers/actor_critic_base.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from dataclasses import replace
from typing import Dict, List, Optional, Tuple
import numpy as np
import reagent.core.types as rlt
import torch
from reagent.core.dataclasses import dataclass, field
from reagent.core.parameters import (
EvaluationParameters,
NormalizationData,
NormalizationKey,
)
from reagent.data import DataFetcher, ReAgentDataModule, ManualDataModule
from reagent.gym.policies.policy import Policy
from reagent.gym.policies.predictor_policies import create_predictor_policy_from_model
from reagent.model_managers.model_manager import ModelManager
from reagent.models.model_feature_config_provider import RawModelFeatureConfigProvider
from reagent.preprocessing.batch_preprocessor import (
BatchPreprocessor,
PolicyNetworkBatchPreprocessor,
Preprocessor,
)
from reagent.preprocessing.normalization import get_feature_config
from reagent.preprocessing.types import InputColumn
from reagent.reporting.actor_critic_reporter import ActorCriticReporter
from reagent.training import ReAgentLightningModule
from reagent.workflow.identify_types_flow import identify_normalization_parameters
from reagent.workflow.types import (
Dataset,
ModelFeatureConfigProvider__Union,
PreprocessingOptions,
ReaderOptions,
ResourceOptions,
RewardOptions,
RLTrainingOutput,
TableSpec,
)
logger = logging.getLogger(__name__)
class ActorPolicyWrapper(Policy):
"""Actor's forward function is our act"""
def __init__(self, actor_network):
self.actor_network = actor_network
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def act(
self, obs: rlt.FeatureData, possible_actions_mask: Optional[torch.Tensor] = None
) -> rlt.ActorOutput:
self.actor_network.eval()
output = self.actor_network(obs)
self.actor_network.train()
return output.detach().cpu()
@dataclass
class ActorCriticBase(ModelManager):
state_preprocessing_options: Optional[PreprocessingOptions] = None
action_preprocessing_options: Optional[PreprocessingOptions] = None
action_feature_override: Optional[str] = None
state_feature_config_provider: ModelFeatureConfigProvider__Union = field(
# pyre-fixme[28]: Unexpected keyword argument `raw`.
default_factory=lambda: ModelFeatureConfigProvider__Union(
raw=RawModelFeatureConfigProvider(float_feature_infos=[])
)
)
action_float_features: List[Tuple[int, str]] = field(default_factory=list)
reader_options: Optional[ReaderOptions] = None
eval_parameters: EvaluationParameters = field(default_factory=EvaluationParameters)
save_critic_bool: bool = True
def __post_init_post_parse__(self):
super().__post_init_post_parse__()
assert (
self.state_preprocessing_options is None
or self.state_preprocessing_options.allowedlist_features is None
), (
"Please set state allowlist features in state_float_features field of "
"config instead"
)
assert (
self.action_preprocessing_options is None
or self.action_preprocessing_options.allowedlist_features is None
), (
"Please set action allowlist features in action_float_features field of "
"config instead"
)
def create_policy(
self,
trainer_module: ReAgentLightningModule,
serving: bool = False,
normalization_data_map: Optional[Dict[str, NormalizationData]] = None,
) -> Policy:
"""Create online actor critic policy."""
if serving:
assert normalization_data_map
return create_predictor_policy_from_model(
self.build_serving_module(trainer_module, normalization_data_map)
)
else:
return ActorPolicyWrapper(trainer_module.actor_network)
@property
def state_feature_config(self) -> rlt.ModelFeatureConfig:
return self.state_feature_config_provider.value.get_model_feature_config()
@property
def action_feature_config(self) -> rlt.ModelFeatureConfig:
assert len(self.action_float_features) > 0, "You must set action_float_features"
return get_feature_config(self.action_float_features)
def get_state_preprocessing_options(self) -> PreprocessingOptions:
state_preprocessing_options = (
self.state_preprocessing_options or PreprocessingOptions()
)
state_features = [
ffi.feature_id for ffi in self.state_feature_config.float_feature_infos
]
logger.info(f"state allowedlist_features: {state_features}")
state_preprocessing_options = replace(
state_preprocessing_options, allowedlist_features=state_features
)
return state_preprocessing_options
def get_action_preprocessing_options(self) -> PreprocessingOptions:
action_preprocessing_options = (
self.action_preprocessing_options or PreprocessingOptions()
)
action_features = [
ffi.feature_id for ffi in self.action_feature_config.float_feature_infos
]
logger.info(f"action allowedlist_features: {action_features}")
# pyre-fixme
actor_net_builder = self.actor_net_builder.value
action_feature_override = actor_net_builder.default_action_preprocessing
logger.info(f"Default action_feature_override is {action_feature_override}")
if self.action_feature_override is not None:
action_feature_override = self.action_feature_override
assert action_preprocessing_options.feature_overrides is None
action_preprocessing_options = replace(
action_preprocessing_options,
allowedlist_features=action_features,
feature_overrides={fid: action_feature_override for fid in action_features},
)
return action_preprocessing_options
def get_data_module(
self,
*,
input_table_spec: Optional[TableSpec] = None,
reward_options: Optional[RewardOptions] = None,
reader_options: Optional[ReaderOptions] = None,
setup_data: Optional[Dict[str, bytes]] = None,
saved_setup_data: Optional[Dict[str, bytes]] = None,
resource_options: Optional[ResourceOptions] = None,
) -> Optional[ReAgentDataModule]:
return ActorCriticDataModule(
input_table_spec=input_table_spec,
reward_options=reward_options,
setup_data=setup_data,
saved_setup_data=saved_setup_data,
reader_options=reader_options,
resource_options=resource_options,
model_manager=self,
)
def get_reporter(self):
return ActorCriticReporter()
class ActorCriticDataModule(ManualDataModule):
def run_feature_identification(
self, input_table_spec: TableSpec
) -> Dict[str, NormalizationData]:
"""
Derive preprocessing parameters from data.
"""
# Run state feature identification
state_normalization_parameters = identify_normalization_parameters(
input_table_spec,
InputColumn.STATE_FEATURES,
self.model_manager.get_state_preprocessing_options(),
)
# Run action feature identification
action_normalization_parameters = identify_normalization_parameters(
input_table_spec,
InputColumn.ACTION,
self.model_manager.get_action_preprocessing_options(),
)
return {
NormalizationKey.STATE: NormalizationData(
dense_normalization_parameters=state_normalization_parameters
),
NormalizationKey.ACTION: NormalizationData(
dense_normalization_parameters=action_normalization_parameters
),
}
@property
def should_generate_eval_dataset(self) -> bool:
return self.model_manager.eval_parameters.calc_cpe_in_training
def query_data(
self,
input_table_spec: TableSpec,
sample_range: Optional[Tuple[float, float]],
reward_options: RewardOptions,
data_fetcher: DataFetcher,
) -> Dataset:
return data_fetcher.query_data(
input_table_spec=input_table_spec,
discrete_action=False,
include_possible_actions=False,
custom_reward_expression=reward_options.custom_reward_expression,
sample_range=sample_range,
)
def build_batch_preprocessor(self) -> BatchPreprocessor:
state_preprocessor = Preprocessor(
self.state_normalization_data.dense_normalization_parameters,
)
action_preprocessor = Preprocessor(
self.action_normalization_data.dense_normalization_parameters,
)
return PolicyNetworkBatchPreprocessor(
state_preprocessor=state_preprocessor,
action_preprocessor=action_preprocessor,
)
| 9,318 | 36.882114 | 88 | py |
ReAgent | ReAgent-master/reagent/model_managers/slate_q_base.py | #!/usr/bin/env python3
import logging
from typing import Dict, List, Optional, Tuple
import reagent.core.types as rlt
from reagent.core.dataclasses import dataclass
from reagent.core.parameters import NormalizationData, NormalizationKey
from reagent.data import DataFetcher, ReAgentDataModule
from reagent.gym.policies.policy import Policy
from reagent.gym.policies.predictor_policies import create_predictor_policy_from_model
from reagent.gym.policies.samplers.top_k_sampler import TopKSampler
from reagent.gym.policies.scorers.slate_q_scorer import slate_q_scorer
from reagent.model_managers.model_manager import ModelManager
from reagent.models.base import ModelBase
from reagent.preprocessing.normalization import get_feature_config
from reagent.reporting.slate_q_reporter import SlateQReporter
from reagent.training import ReAgentLightningModule
from reagent.workflow.types import (
Dataset,
PreprocessingOptions,
ReaderOptions,
ResourceOptions,
RewardOptions,
RLTrainingOutput,
TableSpec,
)
logger = logging.getLogger(__name__)
@dataclass
class SlateQBase(ModelManager):
slate_feature_id: int = 0
slate_score_id: Tuple[int, int] = (0, 0)
item_preprocessing_options: Optional[PreprocessingOptions] = None
state_preprocessing_options: Optional[PreprocessingOptions] = None
state_float_features: Optional[List[Tuple[int, str]]] = None
item_float_features: Optional[List[Tuple[int, str]]] = None
def __post_init_post_parse__(self):
super().__post_init_post_parse__()
assert (
self.state_preprocessing_options is None
or self.state_preprocessing_options.allowedlist_features is None
), (
"Please set state allowlist features in state_float_features field of "
"config instead"
)
assert (
self.item_preprocessing_options is None
or self.item_preprocessing_options.allowedlist_features is None
), (
"Please set item allowlist features in item_float_features field of "
"config instead"
)
assert (
self.item_preprocessing_options is None
or self.item_preprocessing_options.sequence_feature_id is None
), "Please set slate_feature_id field of config instead"
self._state_preprocessing_options = self.state_preprocessing_options
self._item_preprocessing_options = self.item_preprocessing_options
self.eval_parameters = self.trainer_param.evaluation
def create_policy(
self,
trainer_module: ReAgentLightningModule,
serving: bool = False,
normalization_data_map: Optional[Dict[str, NormalizationData]] = None,
):
if serving:
assert normalization_data_map
return create_predictor_policy_from_model(
self.build_serving_module(trainer_module, normalization_data_map),
# pyre-fixme[16]: `SlateQBase` has no attribute `num_candidates`.
max_num_actions=self.num_candidates,
# pyre-fixme[16]: `SlateQBase` has no attribute `slate_size`.
slate_size=self.slate_size,
)
else:
scorer = slate_q_scorer(
num_candidates=self.num_candidates,
# pyre-fixme[6]: Expected `ModelBase` for 2nd param but got
# `Union[torch.Tensor, torch.nn.Module]`.
q_network=trainer_module.q_network,
)
sampler = TopKSampler(k=self.slate_size)
return Policy(scorer=scorer, sampler=sampler)
@property
def state_feature_config(self) -> rlt.ModelFeatureConfig:
return get_feature_config(self.state_float_features)
@property
def item_feature_config(self) -> rlt.ModelFeatureConfig:
return get_feature_config(self.item_float_features)
def get_reporter(self):
return SlateQReporter()
| 3,947 | 38.48 | 86 | py |
ReAgent | ReAgent-master/reagent/model_managers/parametric_dqn_base.py | #!/usr/bin/env python3
import logging
from dataclasses import replace
from typing import Dict, List, Optional, Tuple
import reagent.core.types as rlt
from reagent.core.dataclasses import dataclass, field
from reagent.core.parameters import (
EvaluationParameters,
NormalizationData,
NormalizationKey,
)
from reagent.data.data_fetcher import DataFetcher
from reagent.data.manual_data_module import ManualDataModule
from reagent.gym.policies.policy import Policy
from reagent.gym.policies.predictor_policies import create_predictor_policy_from_model
from reagent.gym.policies.samplers.discrete_sampler import SoftmaxActionSampler
from reagent.gym.policies.scorers.discrete_scorer import parametric_dqn_scorer
from reagent.model_managers.model_manager import ModelManager
from reagent.models.base import ModelBase
from reagent.preprocessing.batch_preprocessor import BatchPreprocessor
from reagent.preprocessing.normalization import (
get_feature_config,
)
from reagent.preprocessing.types import InputColumn
from reagent.training import ReAgentLightningModule
from reagent.workflow.identify_types_flow import identify_normalization_parameters
from reagent.workflow.types import (
Dataset,
PreprocessingOptions,
ReaderOptions,
RewardOptions,
TableSpec,
)
logger = logging.getLogger(__name__)
@dataclass
class ParametricDQNBase(ModelManager):
state_preprocessing_options: Optional[PreprocessingOptions] = None
action_preprocessing_options: Optional[PreprocessingOptions] = None
state_float_features: Optional[List[Tuple[int, str]]] = None
action_float_features: Optional[List[Tuple[int, str]]] = None
reader_options: Optional[ReaderOptions] = None
eval_parameters: EvaluationParameters = field(default_factory=EvaluationParameters)
def __post_init_post_parse__(self):
super().__post_init_post_parse__()
assert (
self.state_preprocessing_options is None
or self.state_preprocessing_options.allowedlist_features is None
), (
"Please set state allowlist features in state_float_features field of "
"config instead"
)
assert (
self.action_preprocessing_options is None
or self.action_preprocessing_options.allowedlist_features is None
), (
"Please set action allowlist features in action_float_features field of "
"config instead"
)
self._q_network: Optional[ModelBase] = None
def create_policy(
self,
trainer_module: ReAgentLightningModule,
serving: bool = False,
normalization_data_map: Optional[Dict[str, NormalizationData]] = None,
):
"""Create an online DiscreteDQN Policy from env."""
# FIXME: this only works for one-hot encoded actions
# pyre-fixme[16]: `Tensor` has no attribute `input_prototype`.
action_dim = trainer_module.q_network.input_prototype()[1].float_features.shape[
1
]
if serving:
assert normalization_data_map
return create_predictor_policy_from_model(
self.build_serving_module(trainer_module, normalization_data_map),
max_num_actions=action_dim,
)
else:
# pyre-fixme[16]: `ParametricDQNBase` has no attribute `rl_parameters`.
sampler = SoftmaxActionSampler(temperature=self.rl_parameters.temperature)
scorer = parametric_dqn_scorer(
max_num_actions=action_dim,
# pyre-fixme[6]: Expected `ModelBase` for 2nd param but got
# `Union[torch.Tensor, torch.nn.Module]`.
q_network=trainer_module.q_network,
)
return Policy(scorer=scorer, sampler=sampler)
@property
def state_feature_config(self) -> rlt.ModelFeatureConfig:
return get_feature_config(self.state_float_features)
@property
def action_feature_config(self) -> rlt.ModelFeatureConfig:
return get_feature_config(self.action_float_features)
# TODO: Add below get_data_module() method once methods in
# `ParametricDqnDataModule` class are fully implemented
# def get_data_module(
# self,
# *,
# input_table_spec: Optional[TableSpec] = None,
# reward_options: Optional[RewardOptions] = None,
# setup_data: Optional[Dict[str, bytes]] = None,
# saved_setup_data: Optional[Dict[str, bytes]] = None,
# reader_options: Optional[ReaderOptions] = None,
# resource_options: Optional[ResourceOptions] = None,
# ) -> Optional[ReAgentDataModule]:
# return ParametricDqnDataModule(
# input_table_spec=input_table_spec,
# reward_options=reward_options,
# setup_data=setup_data,
# saved_setup_data=saved_setup_data,
# reader_options=reader_options,
# resource_options=resource_options,
# model_manager=self,
# )
class ParametricDqnDataModule(ManualDataModule):
@property
def should_generate_eval_dataset(self) -> bool:
return self.model_manager.eval_parameters.calc_cpe_in_training
def run_feature_identification(
self, input_table_spec: TableSpec
) -> Dict[str, NormalizationData]:
# Run state feature identification
state_preprocessing_options = (
self.model_manager.state_preprocessing_options or PreprocessingOptions()
)
state_features = [
ffi.feature_id
for ffi in self.model_manager.state_feature_config.float_feature_infos
]
logger.info(f"state allowedlist_features: {state_features}")
state_preprocessing_options = replace(
state_preprocessing_options, allowedlist_features=state_features
)
state_normalization_parameters = identify_normalization_parameters(
input_table_spec, InputColumn.STATE_FEATURES, state_preprocessing_options
)
# Run action feature identification
action_preprocessing_options = (
self.model_manager.action_preprocessing_options or PreprocessingOptions()
)
action_features = [
ffi.feature_id
for ffi in self.model_manager.action_feature_config.float_feature_infos
]
logger.info(f"action allowedlist_features: {action_features}")
action_preprocessing_options = replace(
action_preprocessing_options, allowedlist_features=action_features
)
action_normalization_parameters = identify_normalization_parameters(
input_table_spec, InputColumn.ACTION, action_preprocessing_options
)
return {
NormalizationKey.STATE: NormalizationData(
dense_normalization_parameters=state_normalization_parameters
),
NormalizationKey.ACTION: NormalizationData(
dense_normalization_parameters=action_normalization_parameters
),
}
def query_data(
self,
input_table_spec: TableSpec,
sample_range: Optional[Tuple[float, float]],
reward_options: RewardOptions,
data_fetcher: DataFetcher,
) -> Dataset:
raise NotImplementedError
def build_batch_preprocessor(self) -> BatchPreprocessor:
raise NotImplementedError()
| 7,398 | 38.356383 | 88 | py |
ReAgent | ReAgent-master/reagent/model_managers/discrete/discrete_qrdqn.py | #!/usr/bin/env python3
import logging
from typing import Dict, Optional
import torch
from reagent.core.dataclasses import dataclass, field
from reagent.core.parameters import NormalizationData, NormalizationKey, param_hash
from reagent.evaluation.evaluator import get_metrics_to_score
from reagent.model_managers.discrete_dqn_base import DiscreteDQNBase
from reagent.net_builder.discrete_dqn.fully_connected import FullyConnected
from reagent.net_builder.quantile_dqn.dueling_quantile import DuelingQuantile
from reagent.net_builder.unions import (
DiscreteDQNNetBuilder__Union,
QRDQNNetBuilder__Union,
)
from reagent.training import QRDQNTrainer, QRDQNTrainerParameters
from reagent.training import ReAgentLightningModule
from reagent.workflow.types import RewardOptions
logger = logging.getLogger(__name__)
@dataclass
class DiscreteQRDQN(DiscreteDQNBase):
__hash__ = param_hash
trainer_param: QRDQNTrainerParameters = field(
default_factory=QRDQNTrainerParameters
)
net_builder: QRDQNNetBuilder__Union = field(
# pyre-fixme[28]: Unexpected keyword argument `DuelingQuantile`.
default_factory=lambda: QRDQNNetBuilder__Union(
DuelingQuantile=DuelingQuantile()
)
)
cpe_net_builder: DiscreteDQNNetBuilder__Union = field(
# pyre-fixme[28]: Unexpected keyword argument `FullyConnected`
default_factory=lambda: DiscreteDQNNetBuilder__Union(
FullyConnected=FullyConnected()
)
)
def __post_init_post_parse__(self):
super().__post_init_post_parse__()
assert len(self.action_names) > 1, "DiscreteQRDQNModel needs at least 2 actions"
assert (
self.trainer_param.minibatch_size % 8 == 0
), "The minibatch size must be divisible by 8 for performance reasons."
@property
def action_names(self):
return self.trainer_param.actions
@property
def rl_parameters(self):
return self.trainer_param.rl
def build_trainer(
self,
normalization_data_map: Dict[str, NormalizationData],
use_gpu: bool,
reward_options: Optional[RewardOptions] = None,
) -> QRDQNTrainer:
net_builder = self.net_builder.value
q_network = net_builder.build_q_network(
normalization_data_map[NormalizationKey.STATE],
len(self.action_names),
# pyre-fixme[16]: `QRDQNTrainerParameters` has no attribute `num_atoms`.
num_atoms=self.trainer_param.num_atoms,
)
q_network_target = q_network.get_target_network()
reward_options = reward_options or RewardOptions()
metrics_to_score = get_metrics_to_score(reward_options.metric_reward_values)
reward_network, q_network_cpe, q_network_cpe_target = None, None, None
if self.eval_parameters.calc_cpe_in_training:
# Metrics + reward
num_output_nodes = (len(metrics_to_score) + 1) * len(
# pyre-fixme[16]: `QRDQNTrainerParameters` has no attribute `actions`.
self.trainer_param.actions
)
cpe_net_builder = self.cpe_net_builder.value
reward_network = cpe_net_builder.build_q_network(
self.state_feature_config,
normalization_data_map[NormalizationKey.STATE],
num_output_nodes,
)
q_network_cpe = cpe_net_builder.build_q_network(
self.state_feature_config,
normalization_data_map[NormalizationKey.STATE],
num_output_nodes,
)
q_network_cpe_target = q_network_cpe.get_target_network()
trainer = QRDQNTrainer(
q_network=q_network,
q_network_target=q_network_target,
reward_network=reward_network,
q_network_cpe=q_network_cpe,
q_network_cpe_target=q_network_cpe_target,
metrics_to_score=metrics_to_score,
evaluation=self.eval_parameters,
# pyre-fixme[16]: `QRDQNTrainerParameters` has no attribute `asdict`.
**self.trainer_param.asdict(),
)
return trainer
def build_serving_module(
self,
trainer_module: ReAgentLightningModule,
normalization_data_map: Dict[str, NormalizationData],
) -> torch.nn.Module:
"""
Returns a TorchScript predictor module
"""
assert isinstance(trainer_module, QRDQNTrainer)
net_builder = self.net_builder.value
return net_builder.build_serving_module(
trainer_module.q_network,
normalization_data_map[NormalizationKey.STATE],
action_names=self.action_names,
state_feature_config=self.state_feature_config,
)
| 4,782 | 35.792308 | 88 | py |
ReAgent | ReAgent-master/reagent/model_managers/discrete/discrete_dqn.py | #!/usr/bin/env python3
import logging
from typing import Dict, Optional
import torch
from reagent.core.dataclasses import dataclass, field
from reagent.core.parameters import NormalizationData, NormalizationKey, param_hash
from reagent.evaluation.evaluator import get_metrics_to_score
from reagent.model_managers.discrete_dqn_base import DiscreteDQNBase
from reagent.net_builder.discrete_dqn.dueling import Dueling
from reagent.net_builder.discrete_dqn.fully_connected import FullyConnected
from reagent.net_builder.unions import DiscreteDQNNetBuilder__Union
from reagent.reporting.discrete_dqn_reporter import DiscreteDQNReporter
from reagent.training import DQNTrainer, DQNTrainerParameters
from reagent.training import ReAgentLightningModule
from reagent.workflow.types import RewardOptions
logger = logging.getLogger(__name__)
@dataclass
class DiscreteDQN(DiscreteDQNBase):
__hash__ = param_hash
trainer_param: DQNTrainerParameters = field(default_factory=DQNTrainerParameters)
net_builder: DiscreteDQNNetBuilder__Union = field(
# pyre-fixme[28]: Unexpected keyword argument `Dueling`.
default_factory=lambda: DiscreteDQNNetBuilder__Union(Dueling=Dueling())
)
cpe_net_builder: DiscreteDQNNetBuilder__Union = field(
# pyre-fixme[28]: Unexpected keyword argument `FullyConnected`.
default_factory=lambda: DiscreteDQNNetBuilder__Union(
FullyConnected=FullyConnected()
)
)
def __post_init_post_parse__(self):
super().__post_init_post_parse__()
assert (
len(self.action_names) > 1
), f"DiscreteDQNModel needs at least 2 actions. Got {self.action_names}."
if self.trainer_param.minibatch_size % 8 != 0:
logger.warn(
f"minibatch size ({self.trainer_param.minibatch_size}) "
"should be divisible by 8 for performance reasons!"
)
@property
def action_names(self):
return self.trainer_param.actions
@property
def rl_parameters(self):
return self.trainer_param.rl
def build_trainer(
self,
normalization_data_map: Dict[str, NormalizationData],
use_gpu: bool,
reward_options: Optional[RewardOptions] = None,
) -> DQNTrainer:
net_builder = self.net_builder.value
q_network = net_builder.build_q_network(
self.state_feature_config,
normalization_data_map[NormalizationKey.STATE],
len(self.action_names),
)
q_network_target = q_network.get_target_network()
reward_options = reward_options or RewardOptions()
metrics_to_score = get_metrics_to_score(reward_options.metric_reward_values)
reward_network, q_network_cpe, q_network_cpe_target = None, None, None
if self.eval_parameters.calc_cpe_in_training:
# Metrics + reward
num_output_nodes = (len(metrics_to_score) + 1) * len(
# pyre-fixme[16]: `DQNTrainerParameters` has no attribute `actions`.
self.trainer_param.actions
)
cpe_net_builder = self.cpe_net_builder.value
reward_network = cpe_net_builder.build_q_network(
self.state_feature_config,
normalization_data_map[NormalizationKey.STATE],
num_output_nodes,
)
q_network_cpe = cpe_net_builder.build_q_network(
self.state_feature_config,
normalization_data_map[NormalizationKey.STATE],
num_output_nodes,
)
q_network_cpe_target = q_network_cpe.get_target_network()
trainer = DQNTrainer(
q_network=q_network,
q_network_target=q_network_target,
reward_network=reward_network,
q_network_cpe=q_network_cpe,
q_network_cpe_target=q_network_cpe_target,
metrics_to_score=metrics_to_score,
evaluation=self.eval_parameters,
# pyre-fixme[16]: `DQNTrainerParameters` has no attribute `asdict`.
**self.trainer_param.asdict(),
)
return trainer
def get_reporter(self):
return DiscreteDQNReporter(
self.trainer_param.actions,
target_action_distribution=self.target_action_distribution,
)
def serving_module_names(self):
module_names = ["default_model"]
if len(self.action_names) == 2:
module_names.append("binary_difference_scorer")
return module_names
def build_serving_modules(
self,
trainer_module: ReAgentLightningModule,
normalization_data_map: Dict[str, NormalizationData],
):
assert isinstance(trainer_module, DQNTrainer)
serving_modules = {
"default_model": self.build_serving_module(
trainer_module, normalization_data_map
)
}
if len(self.action_names) == 2:
serving_modules.update(
{
"binary_difference_scorer": self._build_binary_difference_scorer(
trainer_module.q_network, normalization_data_map
)
}
)
return serving_modules
def build_serving_module(
self,
trainer_module: ReAgentLightningModule,
normalization_data_map: Dict[str, NormalizationData],
) -> torch.nn.Module:
"""
Returns a TorchScript predictor module
"""
assert isinstance(trainer_module, DQNTrainer)
net_builder = self.net_builder.value
return net_builder.build_serving_module(
trainer_module.q_network,
normalization_data_map[NormalizationKey.STATE],
action_names=self.action_names,
state_feature_config=self.state_feature_config,
)
def _build_binary_difference_scorer(
self,
network,
normalization_data_map: Dict[str, NormalizationData],
):
assert network is not None
net_builder = self.net_builder.value
return net_builder.build_binary_difference_scorer(
network,
normalization_data_map[NormalizationKey.STATE],
action_names=self.action_names,
state_feature_config=self.state_feature_config,
)
| 6,355 | 35.32 | 85 | py |
ReAgent | ReAgent-master/reagent/model_managers/discrete/discrete_crr.py | #!/usr/bin/env python3
# Note: this file is modeled after td3.py
import logging
from typing import Dict, Optional
import numpy as np
import reagent.core.types as rlt
import torch
from reagent.core.dataclasses import dataclass, field
from reagent.core.parameters import (
EvaluationParameters,
NormalizationData,
NormalizationKey,
param_hash,
)
from reagent.evaluation.evaluator import get_metrics_to_score
from reagent.gym.policies.policy import Policy
from reagent.gym.policies.predictor_policies import create_predictor_policy_from_model
from reagent.model_managers.discrete_dqn_base import DiscreteDQNBase
from reagent.models.base import ModelBase
from reagent.net_builder.discrete_actor.fully_connected import (
FullyConnected as DiscreteFullyConnected,
)
from reagent.net_builder.discrete_dqn.dueling import Dueling
from reagent.net_builder.discrete_dqn.fully_connected import FullyConnected
from reagent.net_builder.unions import (
DiscreteActorNetBuilder__Union,
DiscreteDQNNetBuilder__Union,
)
from reagent.reporting.discrete_crr_reporter import DiscreteCRRReporter
from reagent.training import DiscreteCRRTrainer, CRRTrainerParameters
from reagent.training import ReAgentLightningModule
from reagent.workflow.types import RewardOptions
logger = logging.getLogger(__name__)
class ActorPolicyWrapper(Policy):
"""Actor's forward function is our act"""
def __init__(self, actor_network):
self.actor_network = actor_network
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def act(
self, obs: rlt.FeatureData, possible_actions_mask: Optional[torch.Tensor] = None
) -> rlt.ActorOutput:
self.actor_network.eval()
output = self.actor_network(obs)
self.actor_network.train()
return output.detach().cpu()
@dataclass
class DiscreteCRR(DiscreteDQNBase):
__hash__ = param_hash
trainer_param: CRRTrainerParameters = field(default_factory=CRRTrainerParameters)
actor_net_builder: DiscreteActorNetBuilder__Union = field(
# pyre-fixme[28]: Unexpected keyword argument `FullyConnected`.
default_factory=lambda: DiscreteActorNetBuilder__Union(
FullyConnected=DiscreteFullyConnected()
)
)
critic_net_builder: DiscreteDQNNetBuilder__Union = field(
# pyre-fixme[28]: Unexpected keyword argument `FullyConnected`.
default_factory=lambda: DiscreteDQNNetBuilder__Union(Dueling=Dueling())
)
cpe_net_builder: DiscreteDQNNetBuilder__Union = field(
# pyre-fixme[28]: Unexpected keyword argument `FullyConnected`.
default_factory=lambda: DiscreteDQNNetBuilder__Union(
FullyConnected=FullyConnected()
)
)
eval_parameters: EvaluationParameters = field(default_factory=EvaluationParameters)
def __post_init_post_parse__(self):
super().__post_init_post_parse__()
assert (
len(self.action_names) > 1
), f"DiscreteCRRModel needs at least 2 actions. Got {self.action_names}."
@property
def action_names(self):
return self.trainer_param.actions
@property
def rl_parameters(self):
return self.trainer_param.rl
def build_trainer(
self,
normalization_data_map: Dict[str, NormalizationData],
use_gpu: bool,
reward_options: Optional[RewardOptions] = None,
) -> DiscreteCRRTrainer:
actor_net_builder = self.actor_net_builder.value
actor_network = actor_net_builder.build_actor(
normalization_data_map[NormalizationKey.STATE], len(self.action_names)
)
actor_network_target = actor_network.get_target_network()
# The arguments to q_network1 and q_network2 below are modeled after those in discrete_dqn.py
critic_net_builder = self.critic_net_builder.value
q1_network = critic_net_builder.build_q_network(
self.state_feature_config,
normalization_data_map[NormalizationKey.STATE],
len(self.action_names),
)
q1_network_target = q1_network.get_target_network()
q2_network = q2_network_target = None
# pyre-fixme[16]: `CRRTrainerParameters` has no attribute
# `double_q_learning`.
if self.trainer_param.double_q_learning:
q2_network = critic_net_builder.build_q_network(
self.state_feature_config,
normalization_data_map[NormalizationKey.STATE],
len(self.action_names),
)
q2_network_target = q2_network.get_target_network()
reward_options = reward_options or RewardOptions()
metrics_to_score = get_metrics_to_score(reward_options.metric_reward_values)
reward_network, q_network_cpe, q_network_cpe_target = None, None, None
if self.eval_parameters.calc_cpe_in_training:
# Metrics + reward
num_output_nodes = (len(metrics_to_score) + 1) * len(
# pyre-fixme[16]: `CRRTrainerParameters` has no attribute `actions`.
self.trainer_param.actions
)
cpe_net_builder = self.cpe_net_builder.value
reward_network = cpe_net_builder.build_q_network(
self.state_feature_config,
normalization_data_map[NormalizationKey.STATE],
num_output_nodes,
)
q_network_cpe = cpe_net_builder.build_q_network(
self.state_feature_config,
normalization_data_map[NormalizationKey.STATE],
num_output_nodes,
)
q_network_cpe_target = q_network_cpe.get_target_network()
trainer = DiscreteCRRTrainer(
actor_network=actor_network,
actor_network_target=actor_network_target,
q1_network=q1_network,
q1_network_target=q1_network_target,
reward_network=reward_network,
q2_network=q2_network,
q2_network_target=q2_network_target,
q_network_cpe=q_network_cpe,
q_network_cpe_target=q_network_cpe_target,
metrics_to_score=metrics_to_score,
evaluation=self.eval_parameters,
# pyre-fixme[16]: `CRRTrainerParameters` has no attribute `asdict`.
**self.trainer_param.asdict(),
)
return trainer
def create_policy(
self,
trainer_module: ReAgentLightningModule,
serving: bool = False,
normalization_data_map: Optional[Dict[str, NormalizationData]] = None,
) -> Policy:
"""Create online actor critic policy."""
assert isinstance(trainer_module, DiscreteCRRTrainer)
if serving:
assert normalization_data_map
return create_predictor_policy_from_model(
self.build_actor_module(trainer_module, normalization_data_map)
)
else:
return ActorPolicyWrapper(trainer_module.actor_network)
def get_reporter(self):
return DiscreteCRRReporter(
self.trainer_param.actions,
target_action_distribution=self.target_action_distribution,
)
# Note: when using test_gym.py as the entry point, the normalization data
# is set when the line normalization = build_normalizer(env) is executed.
# The code then calls build_state_normalizer() and build_action_normalizer()
# in utils.py
def serving_module_names(self):
module_names = ["default_model", "dqn", "actor_dqn"]
if len(self.action_names) == 2:
module_names.append("binary_difference_scorer")
return module_names
def build_serving_modules(
self,
trainer_module: ReAgentLightningModule,
normalization_data_map: Dict[str, NormalizationData],
):
"""
`actor_dqn` is the actor module wrapped in the DQN predictor wrapper.
This helps putting the actor in places where DQN predictor wrapper is expected.
If the policy is greedy, then this wrapper would work.
"""
assert isinstance(trainer_module, DiscreteCRRTrainer)
serving_modules = {
"default_model": self.build_actor_module(
trainer_module, normalization_data_map
),
"dqn": self._build_dqn_module(
trainer_module.q1_network, normalization_data_map
),
"actor_dqn": self._build_dqn_module(
ActorDQN(trainer_module.actor_network), normalization_data_map
),
}
if len(self.action_names) == 2:
serving_modules.update(
{
"binary_difference_scorer": self._build_binary_difference_scorer(
ActorDQN(trainer_module.actor_network), normalization_data_map
),
}
)
return serving_modules
def _build_dqn_module(
self,
network,
normalization_data_map: Dict[str, NormalizationData],
):
critic_net_builder = self.critic_net_builder.value
assert network is not None
return critic_net_builder.build_serving_module(
network,
normalization_data_map[NormalizationKey.STATE],
action_names=self.action_names,
state_feature_config=self.state_feature_config,
)
def _build_binary_difference_scorer(
self,
network,
normalization_data_map: Dict[str, NormalizationData],
):
critic_net_builder = self.critic_net_builder.value
assert network is not None
return critic_net_builder.build_binary_difference_scorer(
network,
normalization_data_map[NormalizationKey.STATE],
action_names=self.action_names,
state_feature_config=self.state_feature_config,
)
# Also, even though the build_serving_module below is directed to
# discrete_actor_net_builder.py, which returns ActorPredictorWrapper,
# just like in the continuous_actor_net_builder.py, the outputs of the
# discrete actor will still be computed differently from those of the
# continuous actor because during serving, the act() function for the
# Agent class in gym/agents/agents.py returns
# self.action_extractor(actor_output), which is created in
# create_for_env_with_serving_policy, when
# env.get_serving_action_extractor() is called. During serving,
# action_extractor calls serving_action_extractor() in env_wrapper.py,
# which checks the type of action_space during serving time and treats
# spaces.Discrete differently from spaces.Box (continuous).
def build_actor_module(
self,
trainer_module: DiscreteCRRTrainer,
normalization_data_map: Dict[str, NormalizationData],
) -> torch.nn.Module:
net_builder = self.actor_net_builder.value
return net_builder.build_serving_module(
trainer_module.actor_network,
self.state_feature_config,
normalization_data_map[NormalizationKey.STATE],
action_feature_ids=list(range(len(self.action_names))),
)
class ActorDQN(ModelBase):
def __init__(self, actor):
super().__init__()
self.actor = actor
def input_prototype(self):
return self.actor.input_prototype()
def forward(self, state):
return self.actor(state).action
| 11,523 | 37.033003 | 101 | py |
ReAgent | ReAgent-master/reagent/model_managers/discrete/discrete_c51dqn.py | #!/usr/bin/env python3
import logging
from typing import Dict
from typing import Optional
import torch
from reagent.core.dataclasses import dataclass, field
from reagent.core.parameters import param_hash, NormalizationData, NormalizationKey
from reagent.model_managers.discrete_dqn_base import DiscreteDQNBase
from reagent.net_builder.categorical_dqn.categorical import Categorical
from reagent.net_builder.unions import CategoricalDQNNetBuilder__Union
from reagent.training import C51Trainer, C51TrainerParameters
from reagent.training import ReAgentLightningModule
from reagent.workflow.types import RewardOptions
logger = logging.getLogger(__name__)
@dataclass
class DiscreteC51DQN(DiscreteDQNBase):
__hash__ = param_hash
trainer_param: C51TrainerParameters = field(default_factory=C51TrainerParameters)
net_builder: CategoricalDQNNetBuilder__Union = field(
# pyre-fixme[28]: Unexpected keyword argument `Categorical`.
# pyre-fixme[28]: Unexpected keyword argument `Categorical`.
default_factory=lambda: CategoricalDQNNetBuilder__Union(
Categorical=Categorical()
)
)
cpe_net_builder: CategoricalDQNNetBuilder__Union = field(
# pyre-fixme[28]: Unexpected keyword argument `Categorical`.
# pyre-fixme[28]: Unexpected keyword argument `Categorical`.
default_factory=lambda: CategoricalDQNNetBuilder__Union(
Categorical=Categorical()
)
)
def __post_init_post_parse__(self):
super().__post_init_post_parse__()
assert len(self.action_names) > 1, "DiscreteC51DQN needs at least 2 actions"
assert (
self.trainer_param.minibatch_size % 8 == 0
), "The minibatch size must be divisible by 8 for performance reasons."
@property
def action_names(self):
return self.trainer_param.actions
@property
def rl_parameters(self):
return self.trainer_param.rl
def build_trainer(
self,
normalization_data_map: Dict[str, NormalizationData],
use_gpu: bool,
reward_options: Optional[RewardOptions] = None,
) -> C51Trainer:
net_builder = self.net_builder.value
q_network = net_builder.build_q_network(
state_normalization_data=normalization_data_map[NormalizationKey.STATE],
output_dim=len(self.action_names),
# pyre-fixme[16]: `C51TrainerParameters` has no attribute `num_atoms`.
# pyre-fixme[16]: `C51TrainerParameters` has no attribute `num_atoms`.
num_atoms=self.trainer_param.num_atoms,
# pyre-fixme[16]: `C51TrainerParameters` has no attribute `qmin`.
# pyre-fixme[16]: `C51TrainerParameters` has no attribute `qmin`.
qmin=self.trainer_param.qmin,
# pyre-fixme[16]: `C51TrainerParameters` has no attribute `qmax`.
# pyre-fixme[16]: `C51TrainerParameters` has no attribute `qmax`.
qmax=self.trainer_param.qmax,
)
q_network_target = q_network.get_target_network()
return C51Trainer(
q_network=q_network,
q_network_target=q_network_target,
# pyre-fixme[16]: `C51TrainerParameters` has no attribute `asdict`.
# pyre-fixme[16]: `C51TrainerParameters` has no attribute `asdict`.
**self.trainer_param.asdict(),
)
def build_serving_module(
self,
trainer_module: ReAgentLightningModule,
normalization_data_map: Dict[str, NormalizationData],
) -> torch.nn.Module:
"""
Returns a TorchScript predictor module
"""
assert isinstance(trainer_module, C51Trainer)
net_builder = self.net_builder.value
return net_builder.build_serving_module(
trainer_module.q_network,
normalization_data_map[NormalizationKey.STATE],
action_names=self.action_names,
state_feature_config=self.state_feature_config,
)
| 3,985 | 37.699029 | 85 | py |
ReAgent | ReAgent-master/reagent/model_managers/model_based/cross_entropy_method.py | #!/usr/bin/env python3
import logging
from typing import Optional, Dict
import numpy as np
import reagent.core.types as rlt
import torch
from reagent.core.dataclasses import dataclass, field
from reagent.core.parameters import (
CEMTrainerParameters,
param_hash,
NormalizationData,
NormalizationKey,
)
from reagent.gym.policies.policy import Policy
from reagent.model_managers.model_based.world_model import WorldModel
from reagent.model_managers.world_model_base import WorldModelBase
from reagent.models.cem_planner import CEMPlannerNetwork
from reagent.preprocessing.identify_types import CONTINUOUS_ACTION
from reagent.preprocessing.normalization import get_num_output_features
from reagent.training import ReAgentLightningModule
from reagent.training.cem_trainer import CEMTrainer
from reagent.workflow.types import RewardOptions
logger = logging.getLogger(__name__)
class CEMPolicy(Policy):
def __init__(self, cem_planner_network: CEMPlannerNetwork, discrete_action: bool):
self.cem_planner_network = cem_planner_network
self.discrete_action = discrete_action
# TODO: consider possible_actions_mask
def act(
self, obs: rlt.FeatureData, possible_actions_mask: Optional[torch.Tensor] = None
) -> rlt.ActorOutput:
greedy = self.cem_planner_network(obs)
if self.discrete_action:
_, onehot = greedy
return rlt.ActorOutput(
action=onehot.unsqueeze(0), log_prob=torch.tensor(0.0)
)
else:
return rlt.ActorOutput(
action=greedy.unsqueeze(0), log_prob=torch.tensor(0.0)
)
@dataclass
class CrossEntropyMethod(WorldModelBase):
__hash__ = param_hash
trainer_param: CEMTrainerParameters = field(default_factory=CEMTrainerParameters)
def __post_init_post_parse__(self):
super().__post_init_post_parse__()
# TODO: should this be in base class?
def create_policy(
self,
trainer_module: ReAgentLightningModule,
serving: bool = False,
normalization_data_map: Optional[Dict[str, NormalizationData]] = None,
) -> Policy:
assert isinstance(trainer_module, CEMTrainer)
# pyre-fixme[16]: `CrossEntropyMethod` has no attribute `discrete_action`.
return CEMPolicy(trainer_module.cem_planner_network, self.discrete_action)
def build_trainer(
self,
normalization_data_map: Dict[str, NormalizationData],
use_gpu: bool,
reward_options: Optional[RewardOptions] = None,
) -> CEMTrainer:
# pyre-fixme[45]: Cannot instantiate abstract class `WorldModel`.
world_model_manager: WorldModel = WorldModel(
trainer_param=self.trainer_param.mdnrnn
)
world_model_manager.build_trainer(
use_gpu=use_gpu,
reward_options=reward_options,
normalization_data_map=normalization_data_map,
)
world_model_trainers = [
world_model_manager.build_trainer(
normalization_data_map, reward_options=reward_options, use_gpu=use_gpu
)
for _ in range(self.trainer_param.num_world_models)
]
world_model_nets = [trainer.memory_network for trainer in world_model_trainers]
terminal_effective = self.trainer_param.mdnrnn.not_terminal_loss_weight > 0
action_normalization_parameters = normalization_data_map[
NormalizationKey.ACTION
].dense_normalization_parameters
sorted_action_norm_vals = list(action_normalization_parameters.values())
discrete_action = sorted_action_norm_vals[0].feature_type != CONTINUOUS_ACTION
action_upper_bounds, action_lower_bounds = None, None
if not discrete_action:
action_upper_bounds = np.array(
[v.max_value for v in sorted_action_norm_vals]
)
action_lower_bounds = np.array(
[v.min_value for v in sorted_action_norm_vals]
)
cem_planner_network = CEMPlannerNetwork(
mem_net_list=world_model_nets,
cem_num_iterations=self.trainer_param.cem_num_iterations,
cem_population_size=self.trainer_param.cem_population_size,
ensemble_population_size=self.trainer_param.ensemble_population_size,
num_elites=self.trainer_param.num_elites,
plan_horizon_length=self.trainer_param.plan_horizon_length,
state_dim=get_num_output_features(
normalization_data_map[
NormalizationKey.STATE
].dense_normalization_parameters
),
action_dim=get_num_output_features(
normalization_data_map[
NormalizationKey.ACTION
].dense_normalization_parameters
),
discrete_action=discrete_action,
terminal_effective=terminal_effective,
gamma=self.trainer_param.rl.gamma,
alpha=self.trainer_param.alpha,
epsilon=self.trainer_param.epsilon,
action_upper_bounds=action_upper_bounds,
action_lower_bounds=action_lower_bounds,
)
# store for building policy
# pyre-fixme[16]: `CrossEntropyMethod` has no attribute `discrete_action`.
self.discrete_action = discrete_action
logger.info(
f"Built CEM network with discrete action = {discrete_action}, "
f"action_upper_bound={action_upper_bounds}, "
f"action_lower_bounds={action_lower_bounds}"
)
return CEMTrainer(
cem_planner_network=cem_planner_network,
world_model_trainers=world_model_trainers,
parameters=self.trainer_param,
)
| 5,783 | 38.346939 | 88 | py |
ReAgent | ReAgent-master/reagent/model_managers/model_based/synthetic_reward.py | #!/usr/bin/env python3
import logging
from dataclasses import replace
from typing import Dict, List, Optional, Tuple
import reagent.core.types as rlt
import torch
from reagent.core.dataclasses import dataclass, field
from reagent.core.parameters import (
EvaluationParameters,
NormalizationData,
NormalizationKey,
)
from reagent.core.parameters import param_hash
from reagent.data.data_fetcher import DataFetcher
from reagent.data.manual_data_module import ManualDataModule
from reagent.data.reagent_data_module import ReAgentDataModule
from reagent.model_managers.model_manager import ModelManager
from reagent.net_builder.synthetic_reward.single_step_synthetic_reward import (
SingleStepSyntheticReward,
)
from reagent.net_builder.unions import SyntheticRewardNetBuilder__Union
from reagent.preprocessing.normalization import (
get_feature_config,
)
from reagent.preprocessing.types import InputColumn
from reagent.reporting.reward_network_reporter import RewardNetworkReporter
from reagent.training import ReAgentLightningModule
from reagent.training import RewardNetTrainer, RewardNetworkTrainerParameters
from reagent.workflow.identify_types_flow import identify_normalization_parameters
from reagent.workflow.types import (
Dataset,
PreprocessingOptions,
ReaderOptions,
RewardOptions,
TableSpec,
ResourceOptions,
)
logger = logging.getLogger(__name__)
@dataclass
class SyntheticReward(ModelManager):
"""
Train models to attribute single step rewards from sparse/delayed/aggregated rewards.
Ideas from:
1. Synthetic Returns for Long-Term Credit Assignment: https://arxiv.org/pdf/2102.12425.pdf
2. RUDDER: Return Decomposition for Delayed Rewards: https://arxiv.org/pdf/1806.07857.pdf
3. Optimizing Agent Behavior over Long Time Scales by Transporting Value: https://arxiv.org/pdf/1810.06721.pdf
4. Sequence Modeling of Temporal Credit Assignment for Episodic Reinforcement Learning: https://arxiv.org/pdf/1905.13420.pdf
"""
__hash__ = param_hash
trainer_param: RewardNetworkTrainerParameters = field(
default_factory=RewardNetworkTrainerParameters
)
net_builder: SyntheticRewardNetBuilder__Union = field(
# pyre-fixme[28]: Unexpected keyword argument `SlateRewardTransformer`.
default_factory=lambda: SyntheticRewardNetBuilder__Union(
SingleStepSyntheticReward=SingleStepSyntheticReward()
)
)
eval_parameters: EvaluationParameters = field(default_factory=EvaluationParameters)
state_preprocessing_options: Optional[PreprocessingOptions] = None
action_preprocessing_options: Optional[PreprocessingOptions] = None
state_float_features: Optional[List[Tuple[int, str]]] = None
parametric_action_float_features: Optional[List[Tuple[int, str]]] = None
discrete_action_names: Optional[List[str]] = None
# max sequence length to look back to distribute rewards
max_seq_len: int = 5
def __post_init_post_parse__(self):
super().__post_init_post_parse__()
assert self.max_seq_len is not None and self.max_seq_len > 0
assert (
self.state_preprocessing_options is None
or self.state_preprocessing_options.allowedlist_features is None
), (
"Please set state allowlist features in state_float_features field of "
"config instead"
)
if self.discrete_action_names:
assert (
type(self.discrete_action_names) is list
and len(self.discrete_action_names) > 1
), f"Assume this is a discrete action problem, you need to specify at least 2 actions. Got {self.discrete_action_names}."
else:
assert (
self.action_preprocessing_options is None
or self.action_preprocessing_options.allowedlist_features is None
), (
"Please set action allowlist features in parametric_action_float_features field of "
"config instead"
)
@property
def state_feature_config(self) -> rlt.ModelFeatureConfig:
return get_feature_config(self.state_float_features)
@property
def action_feature_config(self) -> rlt.ModelFeatureConfig:
return get_feature_config(self.parametric_action_float_features)
def get_data_module(
self,
*,
input_table_spec: Optional[TableSpec] = None,
reward_options: Optional[RewardOptions] = None,
reader_options: Optional[ReaderOptions] = None,
setup_data: Optional[Dict[str, bytes]] = None,
saved_setup_data: Optional[Dict[str, bytes]] = None,
resource_options: Optional[ResourceOptions] = None,
) -> Optional[ReAgentDataModule]:
return SyntheticRewardDataModule(
input_table_spec=input_table_spec,
reward_options=reward_options,
setup_data=setup_data,
saved_setup_data=saved_setup_data,
reader_options=reader_options,
resource_options=resource_options,
model_manager=self,
)
def build_trainer(
self,
normalization_data_map: Dict[str, NormalizationData],
use_gpu: bool,
reward_options: Optional[RewardOptions] = None,
) -> RewardNetTrainer:
net_builder = self.net_builder.value
action_normalization_data = None
if not self.discrete_action_names:
action_normalization_data = normalization_data_map[NormalizationKey.ACTION]
synthetic_reward_network = net_builder.build_synthetic_reward_network(
normalization_data_map[NormalizationKey.STATE],
action_normalization_data=action_normalization_data,
discrete_action_names=self.discrete_action_names,
)
trainer = RewardNetTrainer(
synthetic_reward_network,
# pyre-fixme[16]: `RewardNetworkTrainerParameters` has no attribute
# `asdict`.
**self.trainer_param.asdict(),
)
return trainer
def get_reporter(self):
return RewardNetworkReporter(
self.trainer_param.loss_type,
str(self.net_builder.value),
)
def build_serving_module(
self,
trainer_module: ReAgentLightningModule,
normalization_data_map: Dict[str, NormalizationData],
) -> torch.nn.Module:
"""
Returns a TorchScript predictor module
"""
assert isinstance(trainer_module, RewardNetTrainer)
net_builder = self.net_builder.value
action_normalization_data = None
if not self.discrete_action_names:
action_normalization_data = normalization_data_map[NormalizationKey.ACTION]
return net_builder.build_serving_module(
self.max_seq_len,
trainer_module.reward_net,
normalization_data_map[NormalizationKey.STATE],
action_normalization_data=action_normalization_data,
discrete_action_names=self.discrete_action_names,
)
class SyntheticRewardDataModule(ManualDataModule):
@property
def should_generate_eval_dataset(self) -> bool:
return self.model_manager.eval_parameters.calc_cpe_in_training
def run_feature_identification(
self, input_table_spec: TableSpec
) -> Dict[str, NormalizationData]:
state_preprocessing_options = (
self.model_manager.state_preprocessing_options or PreprocessingOptions()
)
state_features = [
ffi.feature_id
for ffi in self.model_manager.state_feature_config.float_feature_infos
]
logger.info(f"state allowedlist_features: {state_features}")
state_preprocessing_options = replace(
state_preprocessing_options, allowedlist_features=state_features
)
state_normalization_parameters = identify_normalization_parameters(
input_table_spec, InputColumn.STATE_FEATURES, state_preprocessing_options
)
if self.model_manager.discrete_action_names:
return {
NormalizationKey.STATE: NormalizationData(
dense_normalization_parameters=state_normalization_parameters
)
}
# Run action feature identification
action_preprocessing_options = (
self.model_manager.action_preprocessing_options or PreprocessingOptions()
)
action_features = [
ffi.feature_id
for ffi in self.model_manager.action_feature_config.float_feature_infos
]
logger.info(f"action allowedlist_features: {action_features}")
action_preprocessing_options = replace(
action_preprocessing_options, allowedlist_features=action_features
)
action_normalization_parameters = identify_normalization_parameters(
input_table_spec, InputColumn.ACTION, action_preprocessing_options
)
return {
NormalizationKey.STATE: NormalizationData(
dense_normalization_parameters=state_normalization_parameters
),
NormalizationKey.ACTION: NormalizationData(
dense_normalization_parameters=action_normalization_parameters
),
}
def query_data(
self,
input_table_spec: TableSpec,
sample_range: Optional[Tuple[float, float]],
reward_options: RewardOptions,
data_fetcher: DataFetcher,
) -> Dataset:
return data_fetcher.query_data_synthetic_reward(
input_table_spec=input_table_spec,
discrete_action_names=self.model_manager.discrete_action_names,
sample_range=sample_range,
max_seq_len=self.model_manager.max_seq_len,
)
def build_batch_preprocessor(self):
raise NotImplementedError
| 9,887 | 38.552 | 133 | py |
ReAgent | ReAgent-master/reagent/model_managers/parametric/parametric_dqn.py | #!/usr/bin/env python3
import logging
from typing import Dict, Optional
import torch
from reagent.core.dataclasses import dataclass, field
from reagent.core.parameters import param_hash, NormalizationData, NormalizationKey
from reagent.evaluation.evaluator import get_metrics_to_score
from reagent.model_managers.parametric_dqn_base import ParametricDQNBase
from reagent.net_builder.parametric_dqn.fully_connected import FullyConnected
from reagent.net_builder.unions import ParametricDQNNetBuilder__Union
from reagent.training import ParametricDQNTrainer, ParametricDQNTrainerParameters
from reagent.training import ReAgentLightningModule
from reagent.workflow.types import RewardOptions
logger = logging.getLogger(__name__)
@dataclass
class ParametricDQN(ParametricDQNBase):
__hash__ = param_hash
trainer_param: ParametricDQNTrainerParameters = field(
default_factory=ParametricDQNTrainerParameters
)
net_builder: ParametricDQNNetBuilder__Union = field(
# pyre-fixme[28]: Unexpected keyword argument `FullyConnected`.
default_factory=lambda: ParametricDQNNetBuilder__Union(
FullyConnected=FullyConnected()
)
)
@property
def rl_parameters(self):
return self.trainer_param.rl
def build_trainer(
self,
normalization_data_map: Dict[str, NormalizationData],
use_gpu: bool,
reward_options: Optional[RewardOptions] = None,
) -> ParametricDQNTrainer:
net_builder = self.net_builder.value
# pyre-fixme[16]: `ParametricDQN` has no attribute `_q_network`.
self._q_network = net_builder.build_q_network(
normalization_data_map[NormalizationKey.STATE],
normalization_data_map[NormalizationKey.ACTION],
)
# Metrics + reward
reward_options = reward_options or RewardOptions()
metrics_to_score = get_metrics_to_score(reward_options.metric_reward_values)
reward_output_dim = len(metrics_to_score) + 1
reward_network = net_builder.build_q_network(
normalization_data_map[NormalizationKey.STATE],
normalization_data_map[NormalizationKey.ACTION],
output_dim=reward_output_dim,
)
q_network_target = self._q_network.get_target_network()
return ParametricDQNTrainer(
q_network=self._q_network,
q_network_target=q_network_target,
reward_network=reward_network,
# pyre-fixme[16]: `ParametricDQNTrainerParameters` has no attribute
# `asdict`.
**self.trainer_param.asdict(),
)
def build_serving_module(
self,
trainer_module: ReAgentLightningModule,
normalization_data_map: Dict[str, NormalizationData],
) -> torch.nn.Module:
assert isinstance(trainer_module, ParametricDQNTrainer)
net_builder = self.net_builder.value
return net_builder.build_serving_module(
trainer_module.q_network,
normalization_data_map[NormalizationKey.STATE],
normalization_data_map[NormalizationKey.ACTION],
)
| 3,127 | 36.686747 | 84 | py |
ReAgent | ReAgent-master/reagent/model_managers/policy_gradient/reinforce.py | #!/usr/bin/env python3
import logging
from typing import Dict, Optional
import torch
from reagent.core import types as rlt
from reagent.core.dataclasses import dataclass, field
from reagent.core.parameters import NormalizationData
from reagent.core.parameters import NormalizationKey
from reagent.core.parameters import param_hash
from reagent.gym.policies.policy import Policy
from reagent.gym.policies.predictor_policies import create_predictor_policy_from_model
from reagent.gym.policies.samplers.discrete_sampler import SoftmaxActionSampler
from reagent.model_managers.model_manager import ModelManager
from reagent.models.model_feature_config_provider import RawModelFeatureConfigProvider
from reagent.net_builder.discrete_dqn.dueling import Dueling
from reagent.net_builder.unions import (
DiscreteDQNNetBuilder__Union,
ValueNetBuilder__Union,
)
from reagent.training import ReAgentLightningModule
from reagent.training import ReinforceTrainer, ReinforceTrainerParameters
from reagent.workflow.types import (
ModelFeatureConfigProvider__Union,
RewardOptions,
)
logger = logging.getLogger(__name__)
@dataclass
class Reinforce(ModelManager):
__hash__ = param_hash
trainer_param: ReinforceTrainerParameters = field(
default_factory=ReinforceTrainerParameters
)
# using DQN net here because it supports `possible_actions_mask`
policy_net_builder: DiscreteDQNNetBuilder__Union = field(
# pyre-ignore
default_factory=lambda: DiscreteDQNNetBuilder__Union(Dueling=Dueling())
)
value_net_builder: Optional[ValueNetBuilder__Union] = None
state_feature_config_provider: ModelFeatureConfigProvider__Union = field(
# pyre-ignore
default_factory=lambda: ModelFeatureConfigProvider__Union(
raw=RawModelFeatureConfigProvider(float_feature_infos=[])
)
)
sampler_temperature: float = 1.0
def __post_init_post_parse__(self):
super().__post_init_post_parse__()
self._policy: Optional[Policy] = None
assert (
len(self.action_names) > 1
), f"REINFORCE needs at least 2 actions. Got {self.action_names}."
@property
def action_names(self):
return self.trainer_param.actions
def build_trainer(
self,
normalization_data_map: Dict[str, NormalizationData],
use_gpu: bool,
reward_options: Optional[RewardOptions] = None,
) -> ReinforceTrainer:
policy_net_builder = self.policy_net_builder.value
policy_network = policy_net_builder.build_q_network(
self.state_feature_config,
normalization_data_map[NormalizationKey.STATE],
len(self.action_names),
)
value_net = None
value_net_builder = self.value_net_builder
if value_net_builder:
value_net_builder = value_net_builder.value
value_net = value_net_builder.build_value_network(
normalization_data_map[NormalizationKey.STATE]
)
trainer = ReinforceTrainer(
policy=self._create_policy(policy_network),
value_net=value_net,
**self.trainer_param.asdict(), # pyre-ignore
)
return trainer
def create_policy(
self,
trainer_module: ReAgentLightningModule,
serving: bool = False,
normalization_data_map: Optional[Dict[str, NormalizationData]] = None,
):
assert isinstance(trainer_module, ReinforceTrainer)
if serving:
assert normalization_data_map is not None
return create_predictor_policy_from_model(
self.build_serving_module(trainer_module, normalization_data_map)
)
else:
return self._create_policy(trainer_module.scorer)
def _create_policy(self, policy_network):
if self._policy is None:
sampler = SoftmaxActionSampler(temperature=self.sampler_temperature)
self._policy = Policy(scorer=policy_network, sampler=sampler)
return self._policy
def build_serving_module(
self,
trainer_module: ReAgentLightningModule,
normalization_data_map: Dict[str, NormalizationData],
) -> torch.nn.Module:
assert isinstance(trainer_module, ReinforceTrainer)
policy_serving_module = self.policy_net_builder.value.build_serving_module(
q_network=trainer_module.scorer,
state_normalization_data=normalization_data_map[NormalizationKey.STATE],
action_names=self.action_names,
state_feature_config=self.state_feature_config,
)
return policy_serving_module
@property
def state_feature_config(self) -> rlt.ModelFeatureConfig:
return self.state_feature_config_provider.value.get_model_feature_config()
| 4,827 | 36.426357 | 86 | py |
ReAgent | ReAgent-master/reagent/model_managers/policy_gradient/ppo.py | #!/usr/bin/env python3
import logging
from typing import Dict, Optional
import torch
from reagent.core import types as rlt
from reagent.core.dataclasses import dataclass, field
from reagent.core.parameters import NormalizationData
from reagent.core.parameters import NormalizationKey
from reagent.core.parameters import param_hash
from reagent.gym.policies.policy import Policy
from reagent.gym.policies.predictor_policies import create_predictor_policy_from_model
from reagent.gym.policies.samplers.discrete_sampler import SoftmaxActionSampler
from reagent.model_managers.model_manager import ModelManager
from reagent.models.model_feature_config_provider import RawModelFeatureConfigProvider
from reagent.net_builder.discrete_dqn.dueling import Dueling
from reagent.net_builder.unions import (
DiscreteDQNNetBuilder__Union,
ValueNetBuilder__Union,
)
from reagent.training import PPOTrainer, PPOTrainerParameters
from reagent.training import ReAgentLightningModule
from reagent.workflow.types import (
ModelFeatureConfigProvider__Union,
RewardOptions,
)
logger = logging.getLogger(__name__)
@dataclass
class PPO(ModelManager):
__hash__ = param_hash
trainer_param: PPOTrainerParameters = field(default_factory=PPOTrainerParameters)
# using DQN net here because it supports `possible_actions_mask`
policy_net_builder: DiscreteDQNNetBuilder__Union = field(
# pyre-ignore
default_factory=lambda: DiscreteDQNNetBuilder__Union(Dueling=Dueling())
)
value_net_builder: Optional[ValueNetBuilder__Union] = None
state_feature_config_provider: ModelFeatureConfigProvider__Union = field(
# pyre-ignore
default_factory=lambda: ModelFeatureConfigProvider__Union(
raw=RawModelFeatureConfigProvider(float_feature_infos=[])
)
)
sampler_temperature: float = 1.0
def __post_init_post_parse__(self):
super().__post_init_post_parse__()
self._policy: Optional[Policy] = None
assert (
len(self.action_names) > 1
), f"PPO needs at least 2 actions. Got {self.action_names}."
@property
def action_names(self):
return self.trainer_param.actions
def build_trainer(
self,
normalization_data_map: Dict[str, NormalizationData],
use_gpu: bool,
reward_options: Optional[RewardOptions] = None,
) -> PPOTrainer:
policy_net_builder = self.policy_net_builder.value
policy_network = policy_net_builder.build_q_network(
self.state_feature_config,
normalization_data_map[NormalizationKey.STATE],
len(self.action_names),
)
value_net = None
value_net_builder = self.value_net_builder
if value_net_builder:
value_net_builder = value_net_builder.value
value_net = value_net_builder.build_value_network(
normalization_data_map[NormalizationKey.STATE]
)
trainer = PPOTrainer(
policy=self._create_policy(policy_network),
value_net=value_net,
**self.trainer_param.asdict(), # pyre-ignore
)
return trainer
def create_policy(
self,
trainer_module: ReAgentLightningModule,
serving: bool = False,
normalization_data_map: Optional[Dict[str, NormalizationData]] = None,
):
assert isinstance(trainer_module, PPOTrainer)
if serving:
assert normalization_data_map is not None
return create_predictor_policy_from_model(
self.build_serving_module(trainer_module, normalization_data_map)
)
else:
return self._create_policy(trainer_module.scorer)
def _create_policy(self, policy_network):
if self._policy is None:
sampler = SoftmaxActionSampler(temperature=self.sampler_temperature)
self._policy = Policy(scorer=policy_network, sampler=sampler)
return self._policy
def build_serving_module(
self,
trainer_module: ReAgentLightningModule,
normalization_data_map: Dict[str, NormalizationData],
) -> torch.nn.Module:
assert isinstance(trainer_module, PPOTrainer)
policy_serving_module = self.policy_net_builder.value.build_serving_module(
q_network=trainer_module.scorer,
state_normalization_data=normalization_data_map[NormalizationKey.STATE],
action_names=self.action_names,
state_feature_config=self.state_feature_config,
)
return policy_serving_module
@property
def state_feature_config(self) -> rlt.ModelFeatureConfig:
return self.state_feature_config_provider.value.get_model_feature_config()
| 4,753 | 36.433071 | 86 | py |
ReAgent | ReAgent-master/reagent/model_managers/actor_critic/sac.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from typing import Dict, Optional
import torch
from reagent.core.dataclasses import dataclass, field
from reagent.core.parameters import NormalizationData, NormalizationKey, param_hash
from reagent.model_managers.actor_critic_base import ActorCriticBase
from reagent.models.base import ModelBase
from reagent.net_builder.continuous_actor.gaussian_fully_connected import (
GaussianFullyConnected,
)
from reagent.net_builder.parametric_dqn.fully_connected import FullyConnected
from reagent.net_builder.unions import (
ContinuousActorNetBuilder__Union,
ParametricDQNNetBuilder__Union,
ValueNetBuilder__Union,
)
from reagent.net_builder.value.fully_connected import (
FullyConnected as ValueFullyConnected,
)
from reagent.training import ReAgentLightningModule
from reagent.training import SACTrainer, SACTrainerParameters
from reagent.workflow.types import RewardOptions
logger = logging.getLogger(__name__)
@dataclass
class SAC(ActorCriticBase):
__hash__ = param_hash
trainer_param: SACTrainerParameters = field(default_factory=SACTrainerParameters)
actor_net_builder: ContinuousActorNetBuilder__Union = field(
# pyre-fixme[28]: Unexpected keyword argument `GaussianFullyConnected`.
# pyre-fixme[28]: Unexpected keyword argument `GaussianFullyConnected`.
default_factory=lambda: ContinuousActorNetBuilder__Union(
GaussianFullyConnected=GaussianFullyConnected()
)
)
critic_net_builder: ParametricDQNNetBuilder__Union = field(
# pyre-fixme[28]: Unexpected keyword argument `FullyConnected`.
# pyre-fixme[28]: Unexpected keyword argument `FullyConnected`.
default_factory=lambda: ParametricDQNNetBuilder__Union(
FullyConnected=FullyConnected()
)
)
value_net_builder: Optional[ValueNetBuilder__Union] = field(
# pyre-fixme[28]: Unexpected keyword argument `FullyConnected`.
# pyre-fixme[28]: Unexpected keyword argument `FullyConnected`.
default_factory=lambda: ValueNetBuilder__Union(
FullyConnected=ValueFullyConnected()
)
)
use_2_q_functions: bool = True
serve_mean_policy: bool = True
def __post_init_post_parse__(self):
super().__post_init_post_parse__()
self.rl_parameters = self.trainer_param.rl
def build_trainer(
self,
normalization_data_map: Dict[str, NormalizationData],
use_gpu: bool,
reward_options: Optional[RewardOptions] = None,
) -> SACTrainer:
actor_net_builder = self.actor_net_builder.value
actor_network = actor_net_builder.build_actor(
self.state_feature_config,
normalization_data_map[NormalizationKey.STATE],
normalization_data_map[NormalizationKey.ACTION],
)
critic_net_builder = self.critic_net_builder.value
q1_network = critic_net_builder.build_q_network(
normalization_data_map[NormalizationKey.STATE],
normalization_data_map[NormalizationKey.ACTION],
)
q2_network = (
critic_net_builder.build_q_network(
normalization_data_map[NormalizationKey.STATE],
normalization_data_map[NormalizationKey.ACTION],
)
if self.use_2_q_functions
else None
)
value_network = None
value_net_builder = self.value_net_builder
if value_net_builder:
value_net_builder = value_net_builder.value
value_network = value_net_builder.build_value_network(
normalization_data_map[NormalizationKey.STATE]
)
trainer = SACTrainer(
actor_network=actor_network,
q1_network=q1_network,
value_network=value_network,
q2_network=q2_network,
# pyre-fixme[16]: `SACTrainerParameters` has no attribute `asdict`.
# pyre-fixme[16]: `SACTrainerParameters` has no attribute `asdict`.
**self.trainer_param.asdict(),
)
return trainer
def get_reporter(self):
return None
def build_serving_module(
self,
trainer_module: ReAgentLightningModule,
normalization_data_map: Dict[str, NormalizationData],
) -> torch.nn.Module:
assert isinstance(trainer_module, SACTrainer)
actor_serving_module = self.actor_net_builder.value.build_serving_module(
trainer_module.actor_network,
self.state_feature_config,
normalization_data_map[NormalizationKey.STATE],
normalization_data_map[NormalizationKey.ACTION],
serve_mean_policy=self.serve_mean_policy,
)
return actor_serving_module
# TODO: add in critic
# assert self._q1_network is not None
# _critic_serving_module = self.critic_net_builder.value.build_serving_module(
# self._q1_network,
# self.state_normalization_data,
# self.action_normalization_data,
# )
| 5,112 | 36.321168 | 85 | py |
ReAgent | ReAgent-master/reagent/model_managers/actor_critic/td3.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from typing import Dict, Optional
import torch
from reagent.core.dataclasses import dataclass, field
from reagent.core.parameters import (
EvaluationParameters,
NormalizationData,
NormalizationKey,
param_hash,
)
from reagent.model_managers.actor_critic_base import ActorCriticBase
from reagent.models.base import ModelBase
from reagent.net_builder.continuous_actor.fully_connected import (
FullyConnected as ContinuousFullyConnected,
)
from reagent.net_builder.parametric_dqn.fully_connected import (
FullyConnected as ParametricFullyConnected,
)
from reagent.net_builder.unions import (
ContinuousActorNetBuilder__Union,
ParametricDQNNetBuilder__Union,
)
from reagent.reporting.td3_reporter import TD3Reporter
from reagent.training import ReAgentLightningModule
from reagent.training import TD3Trainer, TD3TrainerParameters
from reagent.workflow.types import RewardOptions
logger = logging.getLogger(__name__)
@dataclass
class TD3(ActorCriticBase):
__hash__ = param_hash
trainer_param: TD3TrainerParameters = field(default_factory=TD3TrainerParameters)
actor_net_builder: ContinuousActorNetBuilder__Union = field(
# pyre-fixme[28]: Unexpected keyword argument `FullyConnected`.
# pyre-fixme[28]: Unexpected keyword argument `FullyConnected`.
default_factory=lambda: ContinuousActorNetBuilder__Union(
FullyConnected=ContinuousFullyConnected()
)
)
critic_net_builder: ParametricDQNNetBuilder__Union = field(
# pyre-fixme[28]: Unexpected keyword argument `FullyConnected`.
# pyre-fixme[28]: Unexpected keyword argument `FullyConnected`.
default_factory=lambda: ParametricDQNNetBuilder__Union(
FullyConnected=ParametricFullyConnected()
)
)
# Why isn't this a parameter in the .yaml config file?
use_2_q_functions: bool = True
eval_parameters: EvaluationParameters = field(default_factory=EvaluationParameters)
def __post_init_post_parse__(self):
super().__post_init_post_parse__()
self.rl_parameters = self.trainer_param.rl
def build_trainer(
self,
normalization_data_map: Dict[str, NormalizationData],
use_gpu: bool,
reward_options: Optional[RewardOptions] = None,
) -> TD3Trainer:
actor_net_builder = self.actor_net_builder.value
actor_network = actor_net_builder.build_actor(
self.state_feature_config,
normalization_data_map[NormalizationKey.STATE],
normalization_data_map[NormalizationKey.ACTION],
)
critic_net_builder = self.critic_net_builder.value
q1_network = critic_net_builder.build_q_network(
normalization_data_map[NormalizationKey.STATE],
normalization_data_map[NormalizationKey.ACTION],
)
q2_network = (
critic_net_builder.build_q_network(
normalization_data_map[NormalizationKey.STATE],
normalization_data_map[NormalizationKey.ACTION],
)
if self.use_2_q_functions
else None
)
trainer = TD3Trainer(
actor_network=actor_network,
q1_network=q1_network,
q2_network=q2_network,
# pyre-fixme[16]: `TD3TrainerParameters` has no attribute `asdict`.
# pyre-fixme[16]: `TD3TrainerParameters` has no attribute `asdict`.
**self.trainer_param.asdict(),
)
return trainer
def get_reporter(self):
return TD3Reporter()
def build_serving_module(
self,
trainer_module: ReAgentLightningModule,
normalization_data_map: Dict[str, NormalizationData],
) -> torch.nn.Module:
assert isinstance(trainer_module, TD3Trainer)
net_builder = self.actor_net_builder.value
return net_builder.build_serving_module(
trainer_module.actor_network,
self.state_feature_config,
normalization_data_map[NormalizationKey.STATE],
normalization_data_map[NormalizationKey.ACTION],
)
| 4,213 | 35.017094 | 87 | py |
ReAgent | ReAgent-master/reagent/model_managers/ranking/slate_q.py | #!/usr/bin/env python3
import logging
from typing import Optional, Dict
import torch
from reagent.core.dataclasses import dataclass, field
from reagent.core.parameters import param_hash, NormalizationData, NormalizationKey
from reagent.model_managers.slate_q_base import SlateQBase
from reagent.models.base import ModelBase
from reagent.net_builder.parametric_dqn.fully_connected import FullyConnected
from reagent.net_builder.unions import ParametricDQNNetBuilder__Union
from reagent.training import ReAgentLightningModule
from reagent.training import SlateQTrainer, SlateQTrainerParameters
from reagent.workflow.types import RewardOptions
logger = logging.getLogger(__name__)
@dataclass
class SlateQ(SlateQBase):
__hash__ = param_hash
slate_size: int = -1
num_candidates: int = -1
trainer_param: SlateQTrainerParameters = field(
default_factory=SlateQTrainerParameters
)
net_builder: ParametricDQNNetBuilder__Union = field(
# pyre-fixme[28]: Unexpected keyword argument `FullyConnected`.
default_factory=lambda: ParametricDQNNetBuilder__Union(
FullyConnected=FullyConnected()
)
)
def __post_init_post_parse__(self):
super().__post_init_post_parse__()
assert (
self.slate_size > 0
), f"Please set valid slate_size (currently {self.slate_size})"
assert (
self.num_candidates > 0
), f"Please set valid num_candidates (currently {self.num_candidates})"
self.eval_parameters = self.trainer_param.evaluation
def build_trainer(
self,
normalization_data_map: Dict[str, NormalizationData],
use_gpu: bool,
reward_options: Optional[RewardOptions] = None,
) -> SlateQTrainer:
net_builder = self.net_builder.value
q_network = net_builder.build_q_network(
normalization_data_map[NormalizationKey.STATE],
normalization_data_map[NormalizationKey.ITEM],
)
q_network_target = q_network.get_target_network()
return SlateQTrainer(
q_network=q_network,
q_network_target=q_network_target,
slate_size=self.slate_size,
# pyre-fixme[16]: `SlateQTrainerParameters` has no attribute `asdict`.
**self.trainer_param.asdict(),
)
def build_serving_module(
self,
trainer_module: ReAgentLightningModule,
normalization_data_map: Dict[str, NormalizationData],
) -> torch.nn.Module:
assert isinstance(trainer_module, SlateQTrainer)
net_builder = self.net_builder.value
return net_builder.build_serving_module(
trainer_module.q_network,
normalization_data_map[NormalizationKey.STATE],
normalization_data_map[NormalizationKey.ITEM],
)
| 2,830 | 34.3875 | 83 | py |
ReAgent | ReAgent-master/reagent/lite/optimizer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import abc
import heapq
import logging
from collections import defaultdict, deque
from math import floor
from typing import Callable, Dict, Tuple, Optional, List, Any
import nevergrad as ng
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from nevergrad.parametrization.choice import Choice
logger = logging.getLogger(__name__)
ANNEAL_RATE = 0.9997
LEARNING_RATE = 0.001
BATCH_SIZE = 512
# People rarely need more than that
MAX_NUM_BEST_SOLUTIONS = 1000
GREEDY_TEMP = 0.0001
def sample_from_logits(
keyed_logits: Dict[str, nn.Parameter], batch_size: int, temp: float
) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
"""Return sampled solutions and sampled log probabilities"""
sampled_log_probs = torch.zeros(batch_size, 1)
sampled_solutions = {}
for k, logits in keyed_logits.items():
softmax_val = F.softmax(logits / temp, dim=-1).squeeze(0)
samples = torch.multinomial(softmax_val, batch_size, replacement=True)
sampled_prob = softmax_val[samples].reshape(-1, 1)
sampled_log_probs += torch.log(sampled_prob)
sampled_solutions[k] = samples
return sampled_solutions, sampled_log_probs
def obj_func_scaler(
obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]],
exp_offset_and_scale: Optional[Tuple[float, float]],
) -> Optional[Callable]:
"""
Scale objective functions to make optimizers get out of local minima more easily.
The scaling formula is: exp((reward - offset) / scale)
if obj_exp_offset_scale is None, do not scale the obj_function (i.e., reward == scaled_reward)
"""
if obj_func is None:
return None
if exp_offset_and_scale is not None:
offset, scale = exp_offset_and_scale
def obj_func_scaled(*args, **kwargs):
x = obj_func(*args, **kwargs)
if exp_offset_and_scale is not None:
return x, torch.exp((x - offset) / scale)
else:
return x, x
return obj_func_scaled
def _num_of_params(model: nn.Module) -> int:
return len(torch.cat([p.flatten() for p in model.parameters()]))
def sol_to_tensors(
sampled_sol: Dict[str, torch.Tensor], input_param: ng.p.Dict
) -> torch.Tensor:
one_hot = [
# pyre-fixme[16]: `Parameter` has no attribute `choices`.
F.one_hot(sampled_sol[k], num_classes=len(input_param[k].choices)).type(
torch.FloatTensor
)
for k in sorted(sampled_sol.keys())
]
batch_tensors = torch.cat(one_hot, dim=-1)
return batch_tensors
class BestResultsQueue:
"""Maintain the `max_len` lowest numbers"""
def __init__(self, max_len: int) -> None:
self.max_len = max_len
self.reward_sol_dict = defaultdict(set)
self.heap = []
def insert(self, reward: torch.Tensor, sol: Dict[str, torch.Tensor]) -> None:
# Negate the reward because maximal N elements will be kept
# in heap, while all optimizers are a minimizer.
reward = -reward
sol_str = str(sol)
# skip duplicated solution
if reward in self.reward_sol_dict and sol_str in self.reward_sol_dict[reward]:
return
self.reward_sol_dict[reward].add(sol_str)
if len(self.heap) < self.max_len:
heapq.heappush(self.heap, (reward, sol_str, sol))
else:
old_r, old_sol_str, old_sol = heapq.heappushpop(
self.heap, (reward, sol_str, sol)
)
self.reward_sol_dict[old_r].remove(old_sol_str)
def topk(self, k: int) -> List[Tuple[torch.Tensor, Dict[str, torch.Tensor]]]:
k = min(k, len(self.heap))
res = heapq.nlargest(k, self.heap)
# a list of (reward, sol) tuples
return [(-r[0], r[2]) for r in res]
class ComboOptimizerBase:
def __init__(
self,
param: ng.p.Dict,
obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None,
batch_size: int = BATCH_SIZE,
obj_exp_offset_scale: Optional[Tuple[float, float]] = None,
) -> None:
for k in param:
assert isinstance(
param[k], Choice
), "Only support discrete parameterization now"
self.param = param
self.obj_func = obj_func_scaler(obj_func, obj_exp_offset_scale)
self.batch_size = batch_size
self.obj_exp_scale = obj_exp_offset_scale
self.last_sample_internal_res = None
self.best_sols = BestResultsQueue(MAX_NUM_BEST_SOLUTIONS)
self._init()
def _init(self) -> None:
pass
def optimize_step(self) -> Tuple:
assert self.obj_func is not None, (
"obj_func not provided. Can't call optimize_step() for optimization. "
"You have to perform manual optimization, i.e., call sample_internal() then update_params()"
)
all_results = self._optimize_step()
sampled_solutions, sampled_reward = all_results[0], all_results[1]
self._maintain_best_solutions(sampled_solutions, sampled_reward)
return all_results
def _maintain_best_solutions(
self, sampled_sols: Dict[str, torch.Tensor], sampled_reward: torch.Tensor
) -> None:
for idx in range(len(sampled_reward)):
r = sampled_reward[idx].item()
sol = {k: sampled_sols[k][idx] for k in sampled_sols}
self.best_sols.insert(r, sol)
def best_solutions(
self, k: int = 1
) -> List[Tuple[torch.Tensor, Dict[str, torch.Tensor]]]:
"""
k solutions with the smallest rewards
Return is a list of tuples (reward, solution)
"""
return self.best_sols.topk(k)
@abc.abstractmethod
def _optimize_step(self) -> Tuple:
"""
The main component of ComboOptimizer.optimize_step(). The user only
needs to loop over optimizer_step() until the budget runs out.
_optimize_step() will call sample_internal() and update_params()
to perform sampling and parameter updating
"""
raise NotImplementedError()
@abc.abstractmethod
def sample_internal(
self,
batch_size: Optional[int] = None,
) -> Tuple:
"""
Record and return sampled solutions and any other important
information for learning.
It samples self.batch_size number of solutions, unless batch_size is provided.
"""
raise NotImplementedError()
@abc.abstractmethod
def update_params(
self,
reward: torch.Tensor,
) -> None:
"""
Update model parameters by reward. Reward is objective function
values evaluated on the solutions sampled by sample_internal()
"""
raise NotImplementedError()
def sample(
self, batch_size: int, temp: Optional[float] = None
) -> Dict[str, torch.Tensor]:
"""
Return sampled solutions, keyed by parameter names.
For discrete parameters, the values are choice indices;
For continuous parameters, the values are sampled float vectors.
This function is usually called after learning is done.
"""
raise NotImplementedError()
def indices_to_raw_choices(
self, sampled_sol: Dict[str, torch.Tensor]
) -> List[Dict[str, str]]:
batch_size = list(sampled_sol.values())[0].shape[0]
sampled_sol_i_vals = []
for i in range(batch_size):
sampled_sol_i = {k: sampled_sol[k][i] for k in sampled_sol}
sampled_sol_i_val = {
# pyre-fixme[16]: `Parameter` has no attribute `choices`.
k: self.param[k].choices.value[v]
for k, v in sampled_sol_i.items()
}
sampled_sol_i_vals.append(sampled_sol_i_val)
return sampled_sol_i_vals
class RandomSearchOptimizer(ComboOptimizerBase):
"""
Find the best solution to minimize a black-box function by random search
Args:
param (ng.p.Dict): a nevergrad dictionary for specifying input choices
obj_func (Callable[[Dict[str, torch.Tensor]], torch.Tensor]):
a function which consumes sampled solutions and returns
rewards as tensors of shape (batch_size, 1).
The input dictionary has choice names as the key and sampled choice
indices as the value (of shape (batch_size, ))
sampling_weights (Optional[Dict[str, np.ndarray]]):
Instead of uniform sampling, we sample solutions with preferred
weights. Key: choice name, value: sampling weights
Example:
>>> _ = torch.manual_seed(0)
>>> np.random.seed(0)
>>> BATCH_SIZE = 4
>>> ng_param = ng.p.Dict(choice1=ng.p.Choice(["blue", "green", "red"]))
>>>
>>> def obj_func(sampled_sol: Dict[str, torch.Tensor]):
... reward = torch.ones(BATCH_SIZE, 1)
... for i in range(BATCH_SIZE):
... # the best action is "red"
... if sampled_sol['choice1'][i] == 2:
... reward[i, 0] = 0.0
... return reward
...
>>> optimizer = RandomSearchOptimizer(ng_param, obj_func, batch_size=BATCH_SIZE)
>>> for i in range(10):
... res = optimizer.optimize_step()
...
>>> best_reward, best_choice = optimizer.best_solutions(k=1)[0]
>>> assert best_reward == 0
>>> assert best_choice['choice1'] == 2
"""
def __init__(
self,
param: ng.p.Dict,
obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None,
batch_size: int = BATCH_SIZE,
sampling_weights: Optional[Dict[str, np.ndarray]] = None,
) -> None:
self.sampling_weights = sampling_weights
super().__init__(
param,
obj_func,
batch_size,
)
def sample(
self, batch_size: int, temp: Optional[float] = None
) -> Dict[str, torch.Tensor]:
assert temp is None, "temp is not used in Random Search"
sampled_sol = {}
for k, param in self.param.items():
# pyre-fixme[16]: `Parameter` has no attribute `choices`.
num_choices = len(param.choices)
if self.sampling_weights is None:
sampled_sol[k] = torch.randint(num_choices, (batch_size,))
else:
weight = self.sampling_weights[k]
sampled_sol[k] = torch.tensor(
np.random.choice(num_choices, batch_size, replace=True, p=weight)
)
return sampled_sol
def sample_internal(
self, batch_size: Optional[int] = None
) -> Tuple[Dict[str, torch.Tensor]]:
batch_size = batch_size or self.batch_size
sampled_sol = self.sample(batch_size, temp=None)
self.last_sample_internal_res = sampled_sol
return (sampled_sol,)
def update_params(self, reward: torch.Tensor):
self.last_sample_internal_res = None
def _optimize_step(self) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
sampled_solutions = self.sample_internal(self.batch_size)[0]
sampled_reward, _ = self.obj_func(sampled_solutions)
sampled_reward = sampled_reward.detach()
self.update_params(sampled_reward)
return sampled_solutions, sampled_reward
class NeverGradOptimizer(ComboOptimizerBase):
"""
Minimize a black-box function using NeverGrad, Rapin & Teytaud, 2018.
https://facebookresearch.github.io/nevergrad/.
Args:
param (ng.p.Dict): a nevergrad dictionary for specifying input choices
estimated_budgets (int): estimated number of budgets (objective evaluation
times) for nevergrad to perform auto tuning.
obj_func (Callable[[Dict[str, torch.Tensor]], torch.Tensor]):
a function which consumes sampled solutions and returns
rewards as tensors of shape (batch_size, 1).
The input dictionary has choice names as the key and sampled choice
indices as the value (of shape (batch_size, ))
optimizer_name (Optional[str]): ng optimizer to be used specifically
All possible nevergrad optimizers are available at:
https://facebookresearch.github.io/nevergrad/optimization.html#choosing-an-optimizer.
If not specified, we use the meta optimizer NGOpt
Example:
>>> _ = torch.manual_seed(0)
>>> np.random.seed(0)
>>> BATCH_SIZE = 4
>>> ng_param = ng.p.Dict(choice1=ng.p.Choice(["blue", "green", "red"]))
>>>
>>> def obj_func(sampled_sol: Dict[str, torch.Tensor]):
... reward = torch.ones(BATCH_SIZE, 1)
... for i in range(BATCH_SIZE):
... # the best action is "red"
... if sampled_sol['choice1'][i] == 2:
... reward[i, 0] = 0.0
... return reward
...
>>> estimated_budgets = 40
>>> optimizer = NeverGradOptimizer(
... ng_param, estimated_budgets, obj_func, batch_size=BATCH_SIZE,
... )
>>>
>>> for i in range(10):
... res = optimizer.optimize_step()
...
>>> best_reward, best_choice = optimizer.best_solutions(k=1)[0]
>>> assert best_reward == 0
>>> assert best_choice['choice1'] == 2
"""
def __init__(
self,
param: ng.p.Dict,
estimated_budgets: int,
obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None,
batch_size: int = BATCH_SIZE,
optimizer_name: Optional[str] = None,
) -> None:
self.estimated_budgets = estimated_budgets
self.optimizer_name = optimizer_name
self.optimizer = None
self.choice_to_index = {}
super().__init__(
param,
obj_func,
batch_size,
)
def _init(self) -> None:
optimizer_name = self.optimizer_name or "NGOpt"
logger.info(f"Nevergrad uses {optimizer_name} optimizer")
self.optimizer = ng.optimizers.registry[optimizer_name](
parametrization=self.param,
budget=self.estimated_budgets,
num_workers=self.batch_size,
)
for k, param in self.param.items():
# pyre-fixme[16]: `Parameter` has no attribute `choices`.
self.choice_to_index[k] = {v: i for i, v in enumerate(param.choices.value)}
def sample(
self, batch_size: int, temp: Optional[float] = None
) -> Dict[str, torch.Tensor]:
assert temp is None, "temp is not used in Random Search"
ng_sols_idx = {k: torch.zeros(batch_size) for k in self.param}
for i in range(batch_size):
ng_sol = self.optimizer.ask().value
for k in ng_sol:
ng_sols_idx[k][i] = self.choice_to_index[k][ng_sol[k]]
return ng_sols_idx
def sample_internal(self, batch_size: Optional[int] = None) -> Tuple:
"""
Return sampled solutions in two formats.
(1) our own format, which is a dictionary and consistent with other optimizers.
The dictionary has choice names as the key and sampled choice indices as the
value (of shape (batch_size, ))
(2) nevergrad format returned by optimizer.ask()
"""
batch_size = batch_size or self.batch_size
ng_sols_idx = {k: torch.zeros(batch_size, dtype=torch.long) for k in self.param}
ng_sols_raw = []
for i in range(batch_size):
ng_sol = self.optimizer.ask()
ng_sols_raw.append(ng_sol)
ng_sol_val = ng_sol.value
for k in ng_sol_val:
ng_sols_idx[k][i] = self.choice_to_index[k][ng_sol_val[k]]
self.last_sample_internal_res = (ng_sols_idx, ng_sols_raw)
return ng_sols_idx, ng_sols_raw
def update_params(self, reward: torch.Tensor) -> None:
_, sampled_sols = self.last_sample_internal_res
for ng_sol, r in zip(sampled_sols, reward):
self.optimizer.tell(ng_sol, r.item())
self.last_sample_internal_res = None
def _optimize_step(self) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
sampled_sol_idxs, sampled_sols = self.sample_internal(self.batch_size)
sampled_reward, _ = self.obj_func(sampled_sol_idxs)
sampled_reward = sampled_reward.detach()
self.update_params(sampled_reward)
return sampled_sol_idxs, sampled_reward
class LogitBasedComboOptimizerBase(ComboOptimizerBase):
def __init__(
self,
param: ng.p.Dict,
start_temp: float,
min_temp: float,
obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None,
learning_rate: float = LEARNING_RATE,
anneal_rate: float = ANNEAL_RATE,
batch_size: int = BATCH_SIZE,
obj_exp_offset_scale: Optional[Tuple[float, float]] = None,
) -> None:
self.temp = start_temp
self.min_temp = min_temp
self.anneal_rate = anneal_rate
self.learning_rate = learning_rate
self.logits: Dict[str, nn.Parameter] = {}
self.optimizer = None
super().__init__(
param,
obj_func,
batch_size,
obj_exp_offset_scale,
)
def _init(self) -> None:
parameters = []
for k in self.param.keys():
v = self.param[k]
if isinstance(v, ng.p.Choice):
logits_shape = len(v.choices)
self.logits[k] = nn.Parameter(torch.randn(1, logits_shape))
parameters.append(self.logits[k])
else:
raise NotImplementedError()
self.optimizer = torch.optim.Adam(parameters, lr=self.learning_rate)
def sample(
self, batch_size: int, temp: Optional[float] = GREEDY_TEMP
) -> Dict[str, torch.Tensor]:
assert temp is not None, "temp is needed for sampling logits"
sampled_solutions, _ = sample_from_logits(self.logits, batch_size, temp)
return sampled_solutions
def sample_gumbel(shape: Tuple[int, ...], eps: float = 1e-20) -> torch.Tensor:
U = torch.rand(shape)
return -torch.log(-torch.log(U + eps) + eps)
def gumbel_softmax(logits: torch.Tensor, temperature: float) -> torch.Tensor:
y = logits + sample_gumbel(logits.size())
return F.softmax(y / temperature, dim=-1)
class GumbelSoftmaxOptimizer(LogitBasedComboOptimizerBase):
"""
Minimize a differentiable objective function which takes in categorical inputs.
The method is based on Categorical Reparameterization with Gumbel-Softmax,
Jang, Gu, & Poole, 2016. https://arxiv.org/abs/1611.01144.
Args:
param (ng.p.Dict): a nevergrad dictionary for specifying input choices
obj_func (Callable[[Dict[str, torch.Tensor]], torch.Tensor]):
an analytical function which consumes sampled solutions and returns
rewards as tensors of shape (batch_size, 1).
The input dictionary has choice names as the key and sampled gumbel-softmax
distributions of shape (batch_size, num_choices) as the value
start_temp: starting temperature
min_temp: minimal temperature (towards the end of learning) for sampling gumbel-softmax
update_params_within_optimizer (bool): If False, skip updating parameters within this
Optimizer. The Gumbel-softmax parameters will be updated in external systems.
Example:
>>> _ = torch.manual_seed(0)
>>> np.random.seed(0)
>>> BATCH_SIZE = 4
>>> ng_param = ng.p.Dict(choice1=ng.p.Choice(["blue", "green", "red"]))
>>>
>>> def obj_func(sampled_sol: Dict[str, torch.Tensor]):
... # best action is "red"
... reward = torch.mm(sampled_sol['choice1'], torch.tensor([[1.], [1.], [0.]]))
... return reward
...
>>> optimizer = GumbelSoftmaxOptimizer(
... ng_param, obj_func, anneal_rate=0.9, batch_size=BATCH_SIZE, learning_rate=0.1
... )
...
>>> for i in range(30):
... res = optimizer.optimize_step()
...
>>> assert optimizer.sample(1)['choice1'] == 2
"""
def __init__(
self,
param: ng.p.Dict,
obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None,
start_temp: float = 1.0,
min_temp: float = 0.1,
learning_rate: float = LEARNING_RATE,
anneal_rate: float = ANNEAL_RATE,
batch_size: int = BATCH_SIZE,
update_params_within_optimizer: bool = True,
) -> None:
self.update_params_within_optimizer = update_params_within_optimizer
super().__init__(
param,
start_temp,
min_temp,
obj_func,
learning_rate,
anneal_rate,
batch_size,
# no reward scaling in gumbel softmax
obj_exp_offset_scale=None,
)
def sample_internal(
self, batch_size: Optional[int] = None
) -> Tuple[Dict[str, torch.Tensor]]:
batch_size = batch_size or self.batch_size
sampled_softmax_vals = {}
for k, logits in self.logits.items():
sampled_softmax_vals[k] = gumbel_softmax(
logits.repeat(batch_size, 1), self.temp
)
self.last_sample_internal_res = sampled_softmax_vals
return (sampled_softmax_vals,)
def update_params(self, reward: torch.Tensor) -> None:
if self.update_params_within_optimizer:
reward_mean = reward.mean()
assert reward_mean.requires_grad
self.optimizer.zero_grad()
reward_mean.backward()
self.optimizer.step()
self.temp = np.maximum(self.temp * self.anneal_rate, self.min_temp)
self.last_sample_internal_res = None
def _optimize_step(self) -> Tuple:
sampled_softmax_vals = self.sample_internal(self.batch_size)[0]
sampled_reward, _ = self.obj_func(sampled_softmax_vals)
self.update_params(sampled_reward)
sampled_softmax_vals = {
k: v.detach().clone() for k, v in sampled_softmax_vals.items()
}
logits = {k: v.detach().clone() for k, v in self.logits.items()}
return sampled_softmax_vals, sampled_reward, logits
class PolicyGradientOptimizer(LogitBasedComboOptimizerBase):
"""
Minimize a black-box objective function which takes in categorical inputs.
The method is based on REINFORCE, Williams, 1992.
https://link.springer.com/article/10.1007/BF00992696
In this method, the action distribution is a joint distribution of multiple
*independent* softmax distributions, each corresponding to one discrete
choice type.
Args:
param (ng.p.Dict): a nevergrad dictionary for specifying input choices
obj_func (Callable[[Dict[str, torch.Tensor]], torch.Tensor]):
a function which consumes sampled solutions and returns
rewards as tensors of shape (batch_size, 1).
The input dictionary has choice names as the key and sampled choice
indices as the value (of shape (batch_size, ))
Example:
>>> _ = torch.manual_seed(0)
>>> np.random.seed(0)
>>> BATCH_SIZE = 16
>>> ng_param = ng.p.Dict(choice1=ng.p.Choice(["blue", "green", "red"]))
>>>
>>> def obj_func(sampled_sol: Dict[str, torch.Tensor]):
... reward = torch.ones(BATCH_SIZE, 1)
... for i in range(BATCH_SIZE):
... # the best action is "red"
... if sampled_sol['choice1'][i] == 2:
... reward[i, 0] = 0.0
... return reward
...
>>> optimizer = PolicyGradientOptimizer(
... ng_param, obj_func, batch_size=BATCH_SIZE, learning_rate=0.1
... )
>>> for i in range(30):
... res = optimizer.optimize_step()
...
>>> best_reward, best_choice = optimizer.best_solutions(k=1)[0]
>>> assert best_reward == 0
>>> assert best_choice['choice1'] == 2
>>> assert optimizer.sample(1)['choice1'] == 2
"""
def __init__(
self,
param: ng.p.Dict,
obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None,
# default (start_temp=min_temp=1.0): no temperature change for policy gradient
start_temp: float = 1.0,
min_temp: float = 1.0,
learning_rate: float = LEARNING_RATE,
anneal_rate: float = ANNEAL_RATE,
batch_size: int = BATCH_SIZE,
obj_exp_offset_scale: Optional[Tuple[float, float]] = None,
) -> None:
super().__init__(
param,
start_temp,
min_temp,
obj_func,
learning_rate,
anneal_rate,
batch_size,
obj_exp_offset_scale,
)
def sample(
self, batch_size: int, temp: Optional[float] = GREEDY_TEMP
) -> Dict[str, torch.Tensor]:
assert temp is not None, "temp is needed for sampling logits"
sampled_solutions, _ = sample_from_logits(self.logits, batch_size, temp)
return sampled_solutions
def sample_internal(
self,
batch_size: Optional[int] = None,
) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
batch_size = batch_size or self.batch_size
sampled_solutions, sampled_log_probs = sample_from_logits(
self.logits,
batch_size,
self.temp,
)
self.last_sample_internal_res = sampled_solutions, sampled_log_probs
return sampled_solutions, sampled_log_probs
def update_params(self, reward: torch.Tensor):
_, sampled_log_probs = self.last_sample_internal_res
if self.batch_size == 1:
adv = reward
else:
adv = reward - torch.mean(reward)
assert not adv.requires_grad
assert sampled_log_probs.requires_grad
assert sampled_log_probs.shape == adv.shape == reward.shape
assert adv.ndim == 2
assert adv.shape[-1] == 1
loss = (adv * sampled_log_probs).mean()
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.temp = np.maximum(self.temp * self.anneal_rate, self.min_temp)
self.last_sample_internal_res = None
def _optimize_step(self) -> Tuple:
sampled_solutions, sampled_log_probs = self.sample_internal(self.batch_size)
sampled_reward, sampled_scaled_reward = self.obj_func(sampled_solutions)
sampled_reward, sampled_scaled_reward = (
sampled_reward.detach(),
sampled_scaled_reward.detach(),
)
self.update_params(sampled_scaled_reward)
return sampled_solutions, sampled_reward, sampled_log_probs
def shuffle_exp_replay(exp_replay: List[Any]) -> Any:
shuffle_idx = np.random.permutation(len(exp_replay))
for idx in shuffle_idx:
yield exp_replay[idx]
class QLearningOptimizer(ComboOptimizerBase):
"""
Treat the problem of minimizing a black-box function as a sequential decision problem,
and solve it by Deep Q-Learning. See "Human-Level Control through Deep Reinforcement
Learning", Mnih et al., 2015. https://www.nature.com/articles/nature14236.
In each episode step, Q-learning makes a decision for one categorical input. The reward
is given only at the end of the episode, which is the value of the black-box function
at the input determined by the choices made at all steps.
Args:
param (ng.p.Dict): a nevergrad dictionary for specifying input choices
start_temp (float): the starting exploration rate in epsilon-greedy sampling
min_temp (float): the minimal exploration rate in epsilon-greedy
obj_func (Callable[[Dict[str, torch.Tensor]], torch.Tensor]):
a function which consumes sampled solutions and returns
rewards as tensors of shape (batch_size, 1).
The input dictionary has choice names as the key and sampled choice
indices as the value (of shape (batch_size, ))
model_dim (int): hidden layer size for the q-network: input -> model_dim -> model_dim -> output
num_batches_per_learning (int): the number of batches sampled from replay buffer
for q-learning.
replay_size (int): the maximum batches held in the replay buffer. Note, a problem instance of n
choices will generate n batches in the replay buffer.
Example:
>>> _ = torch.manual_seed(0)
>>> np.random.seed(0)
>>> BATCH_SIZE = 4
>>> ng_param = ng.p.Dict(choice1=ng.p.Choice(["blue", "green", "red"]))
>>>
>>> def obj_func(sampled_sol: Dict[str, torch.Tensor]):
... reward = torch.ones(BATCH_SIZE, 1)
... for i in range(BATCH_SIZE):
... # the best action is "red"
... if sampled_sol['choice1'][i] == 2:
... reward[i, 0] = 0.0
... return reward
...
>>> optimizer = QLearningOptimizer(ng_param, obj_func, batch_size=BATCH_SIZE)
>>> for i in range(10):
... res = optimizer.optimize_step()
...
>>> best_reward, best_choice = optimizer.best_solutions(k=1)[0]
>>> assert best_reward == 0
>>> assert best_choice['choice1'] == 2
>>> assert optimizer.sample(1)['choice1'] == 2
"""
def __init__(
self,
param: ng.p.Dict,
obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None,
start_temp: float = 1.0,
min_temp: float = 0.1,
learning_rate: float = LEARNING_RATE,
anneal_rate: float = ANNEAL_RATE,
batch_size: int = BATCH_SIZE,
model_dim: int = 128,
obj_exp_offset_scale: Optional[Tuple[float, float]] = None,
num_batches_per_learning: int = 10,
replay_size: int = 100,
) -> None:
self.model_dim = model_dim
self.sorted_keys = sorted(param.keys())
assert (
start_temp <= 1.0 and start_temp > 0
), "Starting temperature for epsilon-greedy should be between (0, 1]"
assert (
min_temp <= start_temp and min_temp >= 0
), "Minimum temperature for epsilon-greedy should be between [0, start_temp]"
self.temp = start_temp
self.min_temp = min_temp
self.learning_rate = learning_rate
self.anneal_rate = anneal_rate
self.num_batches_per_learning = num_batches_per_learning
self.replay_size = replay_size
self.exp_replay = deque([], maxlen=replay_size)
self.input_dim = 0
self.q_net = None
self.optimizer = None
super().__init__(
param,
obj_func,
batch_size,
obj_exp_offset_scale,
)
def _init(self) -> None:
for k in self.sorted_keys:
v = self.param[k]
if isinstance(v, ng.p.Choice):
num_choices = len(v.choices)
self.input_dim += num_choices
else:
raise NotImplementedError()
self.q_net = nn.Sequential(
*[
nn.Linear(self.input_dim, self.model_dim),
nn.ReLU(),
nn.Linear(self.model_dim, self.model_dim),
nn.ReLU(),
nn.Linear(self.model_dim, 1),
]
)
for p in self.q_net.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
self.optimizer = torch.optim.Adam(
self.q_net.parameters(), lr=self.learning_rate
)
logger.info(f"Number of total params: {_num_of_params(self.q_net)}")
def sample_internal(
self,
batch_size: Optional[int] = None,
) -> Tuple[Dict[str, torch.Tensor], List[Any]]:
batch_size = batch_size or self.batch_size
return self._sample_internal(batch_size, self.temp)
def _sample_internal(
self,
batch_size: int,
temp: float,
) -> Tuple[Dict[str, torch.Tensor], List[Any]]:
logger.info(f"Explore with temp={temp}")
sampled_solutions: Dict[str, torch.Tensor] = {}
exp_replay = []
acc_input_dim = 0
# The first cur_state_action is a dummy vector of all -1
cur_state_action = torch.full((batch_size, self.input_dim), -1).float()
for k in self.sorted_keys:
v = self.param[k]
# pyre-fixme[16]: `Parameter` has no attribute `choices`.
num_choices = len(v.choices)
next_state_action_all_pairs = cur_state_action.repeat_interleave(
num_choices, dim=0
).reshape(batch_size, num_choices, self.input_dim)
next_state_action_all_pairs[
:, :, acc_input_dim : acc_input_dim + num_choices
] = torch.eye(num_choices)
q_values = (
self.q_net(next_state_action_all_pairs)
.detach()
.reshape(batch_size, num_choices)
)
q_actions = q_values.argmax(dim=1)
random_actions = torch.randint(num_choices, (batch_size,))
explore_prob = torch.rand(batch_size)
selected_action = (
(explore_prob <= temp) * random_actions
+ (explore_prob > temp) * q_actions
).long()
sampled_solutions[k] = selected_action
# the last element is terminal indicator
exp_replay.append((cur_state_action, next_state_action_all_pairs, False))
cur_state_action = next_state_action_all_pairs[
torch.arange(batch_size), selected_action
]
acc_input_dim += num_choices
# add dummy next_state_action_all_pairs and terminal indicator
exp_replay.append((cur_state_action, cur_state_action.squeeze(1), True))
# the first element is not useful
exp_replay.pop(0)
self.last_sample_internal_res = (sampled_solutions, exp_replay)
return sampled_solutions, exp_replay
def sample(
self, batch_size: int, temp: Optional[float] = GREEDY_TEMP
) -> Dict[str, torch.Tensor]:
assert temp is not None, "temp is needed for epsilon greedy"
sampled_solutions, _ = self._sample_internal(batch_size, temp)
return sampled_solutions
def update_params(self, reward: torch.Tensor) -> None:
_, exp_replay = self.last_sample_internal_res
# insert reward placeholder to exp replay
# exp replay now has the format:
# (cur_state_action, next_state_action_all_pairs, terminal, reward)
self.exp_replay.extend([[*exp, None] for exp in exp_replay])
self.exp_replay[-1][-1] = reward
assert len(exp_replay) == len(self.sorted_keys)
avg_td_loss = []
for i, (
cur_state_action,
next_state_action_all_pairs,
terminal,
r,
) in enumerate(shuffle_exp_replay(self.exp_replay)):
q = self.q_net(cur_state_action)
if terminal:
# negate reward to be consistent with other optimizers.
# reward returned by obj_func is to be minimized
# but q-learning tries to maxmize accumulated rewards
loss = F.mse_loss(q, -r)
else:
q_next = self.q_net(next_state_action_all_pairs).detach()
# assume gamma=1 (no discounting)
loss = F.mse_loss(q, q_next.max(dim=1).values)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
avg_td_loss.append(loss.detach())
if i == self.num_batches_per_learning - 1:
break
avg_td_loss = np.mean(avg_td_loss)
logger.info(f"Avg td loss: {avg_td_loss}")
self.temp = np.maximum(self.temp * self.anneal_rate, self.min_temp)
self.last_sample_internal_res = None
def _optimize_step(
self,
) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
sampled_solutions, exp_replay = self.sample_internal(self.batch_size)
sampled_reward, sampled_scaled_reward = self.obj_func(sampled_solutions)
sampled_reward, sampled_scaled_reward = (
sampled_reward.detach(),
sampled_scaled_reward.detach(),
)
self.update_params(sampled_scaled_reward)
return sampled_solutions, sampled_reward
class BayesianOptimizer(ComboOptimizerBase):
"""
Bayessian Optimization with mutation optimization and acquisition function.
The method is motivated from BANANAS, White, 2020.
https://arxiv.org/abs/1910.11858
In this method, the searching is based on mutation over the current best solutions.
Acquisition function, e.g., its estimates the expected imrpovement.
Args:
param (ng.p.Dict): a nevergrad dictionary for specifying input choices
obj_func (Callable[[Dict[str, torch.Tensor]], torch.Tensor]):
a function which consumes sampled solutions and returns
rewards as tensors of shape (batch_size, 1).
The input dictionary has choice names as the key and sampled choice
indices as the value (of shape (batch_size, ))
acq_type (str): type of acquisition function.
mutation_type (str): type of mutation, e.g., random.
temp (float): percentage of mutation - how many variables will be mutated.
"""
def __init__(
self,
param: ng.p.Dict,
start_temp: float,
min_temp: float,
obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None,
acq_type: str = "its",
mutation_type: str = "random",
anneal_rate: float = ANNEAL_RATE,
batch_size: int = BATCH_SIZE,
obj_exp_offset_scale: Optional[Tuple[float, float]] = None,
) -> None:
self.start_temp = start_temp
self.min_temp = min_temp
self.temp = start_temp
self.acq_type = acq_type
self.mutation_type = mutation_type
self.anneal_rate = anneal_rate
super().__init__(
param,
obj_func,
batch_size,
obj_exp_offset_scale,
)
def sample(
self, batch_size: int, temp: Optional[float] = None
) -> Dict[str, torch.Tensor]:
"""
Applies a type of mutation, e.g., random mutation, on the best solutions recorded so far.
For example, with random mutation, variables are randomly selected,
and their values are randomly set with respect to their domains.
"""
assert temp is not None, "temp is needed for Bayesian Optimizer"
best_solutions = self.best_solutions(batch_size)
batch_size = len(best_solutions)
sampled_sol = [sol for _, sol in best_solutions]
sampled_solutions = {}
for k in sorted(self.param.keys()):
sampled_solutions[k] = torch.cat([sol[k].reshape(1) for sol in sampled_sol])
if self.mutation_type == "random":
mutated_keys = [
np.random.choice(
sorted(self.param.keys()),
floor(temp * len(self.param)),
replace=False,
)
for _ in range(batch_size)
]
mutated_solutions = {}
for key in sorted(self.param.keys()):
mutated_solutions[key] = sampled_solutions[key].clone()
indices = torch.tensor(
[idx for idx, k in enumerate(mutated_keys) if key in k]
)
if len(indices):
mutated_solutions[key][indices] = torch.randint(
# pyre-fixme[16]: `Parameter` has no attribute `choices`.
len(self.param[key].choices),
(len(indices),),
)
else:
raise NotImplementedError()
return mutated_solutions
def acquisition(
self,
acq_type: str,
sampled_sol: Dict[str, torch.Tensor],
predictor: List[nn.Module],
) -> torch.Tensor:
assert predictor is not None
batch_tensors = sol_to_tensors(sampled_sol, self.param)
if acq_type == "its":
with torch.no_grad():
predictions = torch.stack([net(batch_tensors) for net in predictor])
acquisition_reward = torch.normal(
torch.mean(predictions, dim=0), torch.std(predictions, dim=0)
)
else:
raise NotImplementedError()
return acquisition_reward.view(-1)
class BayesianMLPEnsemblerOptimizer(BayesianOptimizer):
"""
Bayessian Optimizer with ensemble of mlp networks, random mutation, and ITS.
The Method is motivated by the BANANAS optimization method, White, 2019.
https://arxiv.org/abs/1910.11858.
The mutation rate (temp) is starting from start_temp and is decreasing over time
with anneal_rate. It's lowest possible value is min_temp.
Thus, initially the algorithm explores mutations with a higer mutation rate (more variables are randomly mutated).
As time passes, the algorithm exploits the best solutions recorded so far (less variables are mutated).
Args:
param (ng.p.Dict): a nevergrad dictionary for specifying input choices
obj_func (Callable[[Dict[str, torch.Tensor]], torch.Tensor]):
a function which consumes sampled solutions and returns
rewards as tensors of shape (batch_size, 1).
The input dictionary has choice names as the key and sampled choice
indices as the value (of shape (batch_size, ))
acq_type (str): type of acquisition function.
mutation_type (str): type of mutation, e.g., random.
num_mutations (int): number of best solutions recorded so far that will be mutated.
num_ensemble (int): number of predictors.
start_temp (float): initial temperature (ratio) for mutation, e.g., with 1.0 all variables will be initally mutated.
min_temp (float): lowest temperature (ratio) for mutation, e.g., with 0.0 no mutation will occur.
"""
def __init__(
self,
param: ng.p.Dict,
start_temp: float = 1.0,
min_temp: float = 0.0,
obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None,
acq_type: str = "its",
mutation_type: str = "random",
anneal_rate: float = ANNEAL_RATE,
num_mutations: int = 50,
epochs: int = 1,
learning_rate: float = LEARNING_RATE,
batch_size: int = BATCH_SIZE,
obj_exp_offset_scale: Optional[Tuple[float, float]] = None,
model_dim: int = 128,
num_ensemble: int = 5,
) -> None:
self.temp = start_temp
self.num_mutations = num_mutations
self.epochs = epochs
self.learning_rate = learning_rate
self.model_dim = model_dim
self.num_ensemble = num_ensemble
self.input_dim = 0
self.predictor = None
super().__init__(
param,
start_temp,
min_temp,
obj_func,
acq_type,
mutation_type,
anneal_rate,
batch_size,
obj_exp_offset_scale,
)
def _init(self) -> None:
# initial population
sampled_solutions = {}
for k, param in self.param.items():
if isinstance(param, ng.p.Choice):
num_choices = len(param.choices)
self.input_dim += num_choices
sampled_solutions[k] = torch.randint(num_choices, (self.num_mutations,))
else:
raise NotImplementedError()
# predictor
self.predictor = []
for _ in range(self.num_ensemble):
model = nn.Sequential(
*[
nn.Linear(self.input_dim, self.model_dim),
nn.LeakyReLU(),
nn.Linear(self.model_dim, self.model_dim),
nn.LeakyReLU(),
nn.Linear(self.model_dim, 1),
]
)
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
self.predictor.append(model)
sampled_reward, _ = self.obj_func(sampled_solutions)
sampled_reward = sampled_reward.detach()
self._maintain_best_solutions(sampled_solutions, sampled_reward)
self.update_predictor(sampled_solutions, sampled_reward)
def sample_internal(
self,
batch_size: Optional[int] = None,
) -> Tuple[Dict[str, torch.Tensor]]:
batch_size = batch_size or self.batch_size
mutated_solutions = self.sample(self.num_mutations, self.temp)
_, indices = torch.sort(
self.acquisition(self.acq_type, mutated_solutions, self.predictor), dim=0
)
sampled_solutions = {}
for key in sorted(self.param.keys()):
sampled_solutions[key] = mutated_solutions[key][indices[:batch_size]]
self.last_sample_internal_res = sampled_solutions
return (sampled_solutions,)
def update_predictor(
self, sampled_solutions: Dict[str, torch.Tensor], sampled_reward: torch.Tensor
) -> List[float]:
x = sol_to_tensors(sampled_solutions, self.param)
y = sampled_reward
losses = []
for model in self.predictor:
model.train()
optimizer = torch.optim.Adam(model.parameters(), lr=self.learning_rate)
for _ in range(self.epochs):
pred = model(x)
loss = F.mse_loss(pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss.detach())
model.eval()
return np.mean(losses)
def update_params(self, reward: torch.Tensor):
self.temp = np.maximum(self.temp * self.anneal_rate, self.min_temp)
self.last_sample_internal_res = None
def _optimize_step(self) -> Tuple:
sampled_solutions = self.sample_internal(self.batch_size)[0]
sampled_reward, _ = self.obj_func(sampled_solutions)
sampled_reward = sampled_reward.detach()
loss = self.update_predictor(sampled_solutions, sampled_reward)
self.update_params(sampled_reward)
return sampled_solutions, sampled_reward, loss
| 46,935 | 36.821112 | 124 | py |
ReAgent | ReAgent-master/reagent/data/oss_data_fetcher.py | #!/usr/bin/env python3
import logging
from typing import List, Optional, Tuple
# pyre-fixme[21]: Could not find `pyspark`.
# pyre-fixme[21]: Could not find `pyspark`.
from pyspark.sql.functions import col, crc32, explode, map_keys, udf
from pyspark.sql.types import (
ArrayType,
BooleanType,
FloatType,
LongType,
MapType,
StructField,
StructType,
)
from reagent.data.data_fetcher import DataFetcher
from reagent.data.spark_utils import get_spark_session, get_table_url
from reagent.workflow.types import Dataset, TableSpec
logger = logging.getLogger(__name__)
# for normalizing crc32 output
MAX_UINT32 = 4294967295
# for generating/checking random tmp table names for upload_as_parquet
UPLOAD_PARQUET_TMP_SUFFIX_LEN = 10
MAX_UPLOAD_PARQUET_TRIES = 10
def calc_custom_reward(df, custom_reward_expression: str):
sqlCtx = get_spark_session()
# create a temporary table for running sql
temp_table_name = "_tmp_calc_reward_df"
temp_reward_name = "_tmp_reward_col"
df.createOrReplaceTempView(temp_table_name)
df = sqlCtx.sql(
f"SELECT *, CAST(COALESCE({custom_reward_expression}, 0) AS FLOAT)"
f" as {temp_reward_name} FROM {temp_table_name}"
)
return df.drop("reward").withColumnRenamed(temp_reward_name, "reward")
def calc_reward_multi_steps(df, multi_steps: int, gamma: float):
# assumes df[reward] is array[float] and 1 <= len(df[reward]) <= multi_steps
# computes r_0 + gamma * (r_1 + gamma * (r_2 + ... ))
expr = f"AGGREGATE(REVERSE(reward), FLOAT(0), (s, x) -> FLOAT({gamma}) * s + x)"
return calc_custom_reward(df, expr)
def set_reward_col_as_reward(
df,
custom_reward_expression: Optional[str] = None,
multi_steps: Optional[int] = None,
gamma: Optional[float] = None,
):
# after this, reward column should be set to be the reward
if custom_reward_expression is not None:
df = calc_custom_reward(df, custom_reward_expression)
elif multi_steps is not None:
assert gamma is not None
df = calc_reward_multi_steps(df, multi_steps, gamma)
return df
def hash_mdp_id_and_subsample(df, sample_range: Optional[Tuple[float, float]] = None):
"""Since mdp_id is a string but Pytorch Tensors do not store strings,
we hash them with crc32, which is treated as a cryptographic hash
(with range [0, MAX_UINT32-1]). We also perform an optional subsampling
based on this hash value.
NOTE: we're assuming no collisions in this hash! Otherwise, two mdp_ids
can be indistinguishable after the hash.
TODO: change this to a deterministic subsample.
"""
if sample_range:
assert (
0.0 <= sample_range[0]
and sample_range[0] <= sample_range[1]
and sample_range[1] <= 100.0
), f"{sample_range} is invalid."
# pyre-fixme[16]: Module `functions` has no attribute `col`.
df = df.withColumn("mdp_id", crc32(col("mdp_id")))
if sample_range:
lower_bound = sample_range[0] / 100.0 * MAX_UINT32
upper_bound = sample_range[1] / 100.0 * MAX_UINT32
# pyre-fixme[16]: Module `functions` has no attribute `col`.
# pyre-fixme[16]: Module `functions` has no attribute `col`.
df = df.filter((lower_bound <= col("mdp_id")) & (col("mdp_id") <= upper_bound))
return df
def make_sparse2dense(df, col_name: str, possible_keys: List):
"""Given a list of possible keys, convert sparse map to dense array.
In our example, both value_type is assumed to be a float.
"""
output_type = StructType(
[
StructField("presence", ArrayType(BooleanType()), False),
StructField("dense", ArrayType(FloatType()), False),
]
)
def sparse2dense(map_col):
assert isinstance(
map_col, dict
), f"{map_col} has type {type(map_col)} and is not a dict."
presence = []
dense = []
for key in possible_keys:
val = map_col.get(key, None)
if val is not None:
presence.append(True)
dense.append(float(val))
else:
presence.append(False)
dense.append(0.0)
return presence, dense
sparse2dense_udf = udf(sparse2dense, output_type)
df = df.withColumn(col_name, sparse2dense_udf(col_name))
# pyre-fixme[16]: Module `functions` has no attribute `col`.
df = df.withColumn(f"{col_name}_presence", col(f"{col_name}.presence"))
# pyre-fixme[16]: Module `functions` has no attribute `col`.
df = df.withColumn(col_name, col(f"{col_name}.dense"))
return df
#################################################
# Below are some UDFs we use for preprocessing. #
#################################################
def make_get_step_udf(multi_steps: Optional[int]):
"""Get step count by taking length of next_states_features array."""
def get_step(col: List):
return 1 if multi_steps is None else min(len(col), multi_steps)
return udf(get_step, LongType())
def make_next_udf(multi_steps: Optional[int], return_type):
"""Generic udf to get next (after multi_steps) item, provided item type."""
def get_next(next_col):
return (
next_col
if multi_steps is None
else next_col[min(len(next_col), multi_steps) - 1]
)
return udf(get_next, return_type)
def make_where_udf(arr: List[str]):
"""Return index of item in arr, and len(arr) if not found."""
def find(item: str):
for i, arr_item in enumerate(arr):
if arr_item == item:
return i
return len(arr)
return udf(find, LongType())
def make_existence_bitvector_udf(arr: List[str]):
"""one-hot encode elements of target depending on their existence in arr."""
default = [0] * len(arr)
def encode(target: List[str]):
bitvec = default.copy()
for i, arr_item in enumerate(arr):
if arr_item in target:
bitvec[i] = 1
return bitvec
return udf(encode, ArrayType(LongType()))
def misc_column_preprocessing(df, multi_steps: Optional[int]):
"""Miscellaneous columns are step, time_diff, sequence_number, not_terminal."""
# step refers to n in n-step RL; special case when approaching terminal
df = df.withColumn("step", make_get_step_udf(multi_steps)("next_state_features"))
# take the next time_diff
next_long_udf = make_next_udf(multi_steps, LongType())
df = df.withColumn("time_diff", next_long_udf("time_diff"))
# assuming use_seq_num_diff_as_time_diff = False for now
# pyre-fixme[16]: Module `functions` has no attribute `col`.
df = df.withColumn("sequence_number", col("sequence_number_ordinal"))
return df
def state_and_metrics_sparse2dense(
df, states: List[int], metrics: List[str], multi_steps: Optional[int]
):
"""Sparse-to-dense preprocessing of Map columns, which are states and metrics.
For each column of type Map, w/ name X, output two columns.
Map values are assumed to be scalar. This process is called sparse-to-dense.
X = {"state_features", "next_state_features", "metrics"}.
(a) Replace column X with a dense repesentation of the inputted (sparse) map.
Dense representation is to concatenate map values into a list.
(b) Create new column X_presence, which is a list of same length as (a) and
the ith entry is 1 iff the key was present in the original map.
"""
next_map_udf = make_next_udf(multi_steps, MapType(LongType(), FloatType()))
df = df.withColumn("next_state_features", next_map_udf("next_state_features"))
df = df.withColumn("metrics", next_map_udf("metrics"))
df = make_sparse2dense(df, "state_features", states)
df = make_sparse2dense(df, "next_state_features", states)
df = make_sparse2dense(df, "metrics", metrics)
return df
def discrete_action_preprocessing(
df, actions: List[str], multi_steps: Optional[int] = None
):
"""
Inputted actions and possible_actions are strings, which isn't supported
for PyTorch Tensors. Here, we represent them with LongType.
(a) action and next_action are strings, so simply return their position
in the action_space (as given by argument actions).
(b) possible_actions and possible_next_actions are list of strs, so
return an existence bitvector of length len(actions), where ith
index is true iff actions[i] was in the list.
By-product: output not_terminal from preprocessing actions.
"""
# turn string actions into indices
where_udf = make_where_udf(actions)
df = df.withColumn("action", where_udf("action"))
next_long_udf = make_next_udf(multi_steps, LongType())
df = df.withColumn("next_action", where_udf(next_long_udf("next_action")))
def make_not_terminal_udf(actions: List[str]):
"""Return true iff next_action is terminal (i.e. idx = len(actions))."""
def get_not_terminal(next_action):
return next_action < len(actions)
return udf(get_not_terminal, BooleanType())
not_terminal_udf = make_not_terminal_udf(actions)
df = df.withColumn("not_terminal", not_terminal_udf("next_action"))
# turn List[str] possible_actions into existence bitvectors
next_long_arr_udf = make_next_udf(multi_steps, ArrayType(LongType()))
existence_bitvector_udf = make_existence_bitvector_udf(actions)
df = df.withColumn(
"possible_actions_mask", existence_bitvector_udf("possible_actions")
)
df = df.withColumn(
"possible_next_actions_mask",
existence_bitvector_udf(next_long_arr_udf("possible_next_actions")),
)
return df
def parametric_action_preprocessing(
df,
actions: List[str],
multi_steps: Optional[int] = None,
include_possible_actions: bool = True,
):
assert (
not include_possible_actions
), "current we don't support include_possible_actions"
next_map_udf = make_next_udf(multi_steps, MapType(LongType(), FloatType()))
df = df.withColumn("next_action", next_map_udf("next_action"))
def make_not_terminal_udf():
"""Return true iff next_action is an empty map"""
def get_not_terminal(next_action):
return len(next_action) > 0
return udf(get_not_terminal, BooleanType())
not_terminal_udf = make_not_terminal_udf()
df = df.withColumn("not_terminal", not_terminal_udf("next_action"))
df = make_sparse2dense(df, "action", actions)
df = make_sparse2dense(df, "next_action", actions)
return df
def select_relevant_columns(
df, discrete_action: bool = True, include_possible_actions: bool = True
):
"""Select all the relevant columns and perform type conversions."""
if not discrete_action and include_possible_actions:
raise NotImplementedError("currently we don't support include_possible_actions")
select_col_list = [
# pyre-fixme[16]: Module `functions` has no attribute `col`.
col("reward").cast(FloatType()),
# pyre-fixme[16]: Module `functions` has no attribute `col`.
col("state_features").cast(ArrayType(FloatType())),
# pyre-fixme[16]: Module `functions` has no attribute `col`.
col("state_features_presence").cast(ArrayType(BooleanType())),
# pyre-fixme[16]: Module `functions` has no attribute `col`.
col("next_state_features").cast(ArrayType(FloatType())),
# pyre-fixme[16]: Module `functions` has no attribute `col`.
col("next_state_features_presence").cast(ArrayType(BooleanType())),
# pyre-fixme[16]: Module `functions` has no attribute `col`.
col("not_terminal").cast(BooleanType()),
# pyre-fixme[16]: Module `functions` has no attribute `col`.
col("action_probability").cast(FloatType()),
# pyre-fixme[16]: Module `functions` has no attribute `col`.
col("mdp_id").cast(LongType()),
# pyre-fixme[16]: Module `functions` has no attribute `col`.
col("sequence_number").cast(LongType()),
# pyre-fixme[16]: Module `functions` has no attribute `col`.
col("step").cast(LongType()),
# pyre-fixme[16]: Module `functions` has no attribute `col`.
col("time_diff").cast(LongType()),
# pyre-fixme[16]: Module `functions` has no attribute `col`.
col("metrics").cast(ArrayType(FloatType())),
# pyre-fixme[16]: Module `functions` has no attribute `col`.
col("metrics_presence").cast(ArrayType(BooleanType())),
]
if discrete_action:
select_col_list += [
# pyre-fixme[16]: Module `functions` has no attribute `col`.
col("action").cast(LongType()),
# pyre-fixme[16]: Module `functions` has no attribute `col`.
col("next_action").cast(LongType()),
]
else:
select_col_list += [
# pyre-fixme[16]: Module `functions` has no attribute `col`.
col("action").cast(ArrayType(FloatType())),
# pyre-fixme[16]: Module `functions` has no attribute `col`.
col("next_action").cast(ArrayType(FloatType())),
# pyre-fixme[16]: Module `functions` has no attribute `col`.
col("action_presence").cast(ArrayType(BooleanType())),
# pyre-fixme[16]: Module `functions` has no attribute `col`.
col("next_action_presence").cast(ArrayType(BooleanType())),
]
if include_possible_actions:
select_col_list += [
# pyre-fixme[16]: Module `functions` has no attribute `col`.
col("possible_actions_mask").cast(ArrayType(LongType())),
# pyre-fixme[16]: Module `functions` has no attribute `col`.
col("possible_next_actions_mask").cast(ArrayType(LongType())),
]
return df.select(*select_col_list)
def get_distinct_keys(df, col_name, is_col_arr_map=False):
"""Return list of distinct keys.
Set is_col_arr_map to be true if column is an array of Maps.
Otherwise, assume column is a Map.
"""
if is_col_arr_map:
df = df.select(explode(col_name).alias(col_name))
df = df.select(explode(map_keys(col_name)))
return df.distinct().rdd.flatMap(lambda x: x).collect()
def infer_states_names(df, multi_steps: Optional[int]):
"""Infer possible state names from states and next state features."""
state_keys = get_distinct_keys(df, "state_features")
next_states_is_col_arr_map = not (multi_steps is None)
next_state_keys = get_distinct_keys(
df, "next_state_features", is_col_arr_map=next_states_is_col_arr_map
)
return sorted(set(state_keys) | set(next_state_keys))
def infer_action_names(df, multi_steps: Optional[int]):
action_keys = get_distinct_keys(df, "action")
next_action_is_col_arr_map = not (multi_steps is None)
next_action_keys = get_distinct_keys(
df, "next_action", is_col_arr_map=next_action_is_col_arr_map
)
return sorted(set(action_keys) | set(next_action_keys))
def infer_metrics_names(df, multi_steps: Optional[int]):
"""Infer possible metrics names.
Assume in multi-step case, metrics is an array of maps.
"""
is_col_arr_map = not (multi_steps is None)
return sorted(get_distinct_keys(df, "metrics", is_col_arr_map=is_col_arr_map))
def rand_string(length):
import random
import string
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return "".join(random.choice(letters) for _ in range(length))
def upload_as_parquet(df) -> Dataset:
"""Generate a random parquet. Fails if cannot generate a non-existent name."""
# get a random tmp name and check if it exists
sqlCtx = get_spark_session()
success = False
for _ in range(MAX_UPLOAD_PARQUET_TRIES):
suffix = rand_string(length=UPLOAD_PARQUET_TMP_SUFFIX_LEN)
rand_name = f"tmp_parquet_{suffix}"
if not sqlCtx.catalog._jcatalog.tableExists(rand_name):
success = True
break
if not success:
raise Exception(f"Failed to find name after {MAX_UPLOAD_PARQUET_TRIES} tries.")
# perform the write
# pyre-fixme[61]: `rand_name` may not be initialized here.
df.write.mode("errorifexists").format("parquet").saveAsTable(rand_name)
# pyre-fixme[61]: `rand_name` may not be initialized here.
parquet_url = get_table_url(rand_name)
logger.info(f"Saved parquet to {parquet_url}")
return Dataset(parquet_url=parquet_url)
class OssDataFetcher(DataFetcher):
def query_data(
self,
input_table_spec: TableSpec,
discrete_action: bool,
actions: Optional[List[str]] = None,
include_possible_actions=True,
custom_reward_expression: Optional[str] = None,
sample_range: Optional[Tuple[float, float]] = None,
multi_steps: Optional[int] = None,
gamma: Optional[float] = None,
) -> Dataset:
"""Perform reward calculation, hashing mdp + subsampling and
other preprocessing such as sparse2dense.
"""
sqlCtx = get_spark_session()
# pyre-ignore
df = sqlCtx.sql(f"SELECT * FROM {input_table_spec.table_name}")
df = set_reward_col_as_reward(
df,
custom_reward_expression=custom_reward_expression,
multi_steps=multi_steps,
gamma=gamma,
)
df = hash_mdp_id_and_subsample(df, sample_range=sample_range)
df = misc_column_preprocessing(df, multi_steps=multi_steps)
df = state_and_metrics_sparse2dense(
df,
states=infer_states_names(df, multi_steps),
metrics=infer_metrics_names(df, multi_steps),
multi_steps=multi_steps,
)
if discrete_action:
assert include_possible_actions
assert actions is not None, "in discrete case, actions must be given."
df = discrete_action_preprocessing(
df, actions=actions, multi_steps=multi_steps
)
else:
actions = infer_action_names(df, multi_steps)
df = parametric_action_preprocessing(
df,
actions=actions,
multi_steps=multi_steps,
include_possible_actions=include_possible_actions,
)
df = select_relevant_columns(
df,
discrete_action=discrete_action,
include_possible_actions=include_possible_actions,
)
return upload_as_parquet(df)
| 18,591 | 37.255144 | 88 | py |
ReAgent | ReAgent-master/reagent/data/reagent_data_module.py | #!/usr/bin/env python3
import abc
from typing import Dict, List, Optional
import pytorch_lightning as pl
from reagent.core.parameters import NormalizationData
class ReAgentDataModule(pl.LightningDataModule):
@abc.abstractmethod
def get_normalization_data_map(
self,
keys: Optional[List[str]] = None,
) -> Dict[str, NormalizationData]:
pass
| 380 | 21.411765 | 53 | py |
ReAgent | ReAgent-master/reagent/data/manual_data_module.py | #!/usr/bin/env python3
import abc
import logging
import pickle
from typing import NamedTuple, Dict, List, Optional, Tuple
logger = logging.getLogger(__name__)
try:
# pyre-fixme[21]: Could not find `petastorm`.
from petastorm import make_batch_reader
# pyre-fixme[21]: Could not find module `petastorm.pytorch`.
# pyre-fixme[21]: Could not find module `petastorm.pytorch`.
from petastorm.pytorch import DataLoader, decimal_friendly_collate
except ModuleNotFoundError:
logger.warn("petastorm is not installed; please install if you want to use this")
from reagent.core.parameters import NormalizationData
from reagent.data.data_fetcher import DataFetcher
from reagent.data.oss_data_fetcher import OssDataFetcher
from reagent.preprocessing.batch_preprocessor import (
BatchPreprocessor,
)
from reagent.workflow.types import (
Dataset,
ReaderOptions,
RewardOptions,
TableSpec,
ResourceOptions,
)
from .reagent_data_module import ReAgentDataModule
class TrainEvalSampleRanges(NamedTuple):
train_sample_range: Tuple[float, float]
eval_sample_range: Tuple[float, float]
def get_sample_range(
input_table_spec: TableSpec, calc_cpe_in_training: bool
) -> TrainEvalSampleRanges:
table_sample = input_table_spec.table_sample
eval_table_sample = input_table_spec.eval_table_sample
if not calc_cpe_in_training:
# use all data if table sample = None
if table_sample is None:
train_sample_range = (0.0, 100.0)
else:
train_sample_range = (0.0, table_sample)
return TrainEvalSampleRanges(
train_sample_range=train_sample_range,
# eval samples will not be used
eval_sample_range=(0.0, 0.0),
)
error_msg = (
"calc_cpe_in_training is set to True. "
f"Please specify table_sample(current={table_sample}) and "
f"eval_table_sample(current={eval_table_sample}) such that "
"eval_table_sample + table_sample <= 100. "
"In order to reliably calculate CPE, eval_table_sample "
"should not be too small."
)
assert table_sample is not None, error_msg
assert eval_table_sample is not None, error_msg
assert (eval_table_sample + table_sample) <= (100.0 + 1e-3), error_msg
return TrainEvalSampleRanges(
train_sample_range=(0.0, table_sample),
eval_sample_range=(100.0 - eval_table_sample, 100.0),
)
# pyre-fixme[13]: Attribute `_normalization_data_map` is never initialized.
# pyre-fixme[13]: Attribute `_train_dataset` is never initialized.
# pyre-fixme[13]: Attribute `_eval_dataset` is never initialized.
class ManualDataModule(ReAgentDataModule):
_normalization_data_map: Dict[str, NormalizationData]
_train_dataset: Dataset
_eval_dataset: Optional[Dataset]
def __init__(
self,
*,
input_table_spec: Optional[TableSpec] = None,
reward_options: Optional[RewardOptions] = None,
setup_data: Optional[Dict[str, bytes]] = None,
saved_setup_data: Optional[Dict[str, bytes]] = None,
reader_options: Optional[ReaderOptions] = None,
resource_options: Optional[ResourceOptions] = None,
model_manager=None,
):
super().__init__()
self.input_table_spec = input_table_spec
self.reward_options = reward_options or RewardOptions()
self.reader_options = reader_options or ReaderOptions()
self.resource_options = resource_options or ResourceOptions(gpu=0)
self._model_manager = model_manager
self.setup_data = setup_data
self.saved_setup_data = saved_setup_data or {}
self._setup_done = False
self._num_train_data_loader_calls = 0
self._num_val_data_loader_calls = 0
self._num_test_data_loader_calls = 0
def prepare_data(self, *args, **kwargs):
if self.setup_data is not None:
return None
key = "normalization_data_map"
data_fetcher = OssDataFetcher()
normalization_data_map = (
self.run_feature_identification(self.input_table_spec)
if key not in self.saved_setup_data
else pickle.loads(self.saved_setup_data[key])
)
calc_cpe_in_training = self.should_generate_eval_dataset
sample_range_output = get_sample_range(
self.input_table_spec, calc_cpe_in_training
)
train_dataset = self.query_data(
input_table_spec=self.input_table_spec,
sample_range=sample_range_output.train_sample_range,
reward_options=self.reward_options,
data_fetcher=data_fetcher,
)
eval_dataset = None
if calc_cpe_in_training:
eval_dataset = self.query_data(
input_table_spec=self.input_table_spec,
sample_range=sample_range_output.eval_sample_range,
reward_options=self.reward_options,
data_fetcher=data_fetcher,
)
self.setup_data = self._pickle_setup_data(
normalization_data_map=normalization_data_map,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
)
return self.setup_data
def _pickle_setup_data(
self,
normalization_data_map: Dict[str, NormalizationData],
train_dataset: Dataset,
eval_dataset: Optional[Dataset],
) -> Dict[str, bytes]:
setup_data = dict(
normalization_data_map=pickle.dumps(normalization_data_map),
train_dataset=pickle.dumps(train_dataset),
eval_dataset=pickle.dumps(eval_dataset),
)
self.setup_data = setup_data
return setup_data
def setup(self, stage=None):
if self._setup_done:
return
setup_data = {k: pickle.loads(v) for k, v in self.setup_data.items()}
self._normalization_data_map = setup_data["normalization_data_map"]
self._train_dataset = setup_data["train_dataset"]
self._eval_dataset = setup_data["eval_dataset"]
self._setup_done = True
@property
def model_manager(self):
model_manager = self._model_manager
assert model_manager
return model_manager
@model_manager.setter
def model_manager(self, model_manager):
assert self._model_manager is None
self._model_manager = model_manager
def get_normalization_data_map(
self,
keys: Optional[List[str]] = None,
) -> Dict[str, NormalizationData]:
return self._normalization_data_map
@abc.abstractmethod
def run_feature_identification(
self, input_table_spec: TableSpec
) -> Dict[str, NormalizationData]:
"""
Derive preprocessing parameters from data.
"""
pass
def __getattr__(self, attr):
"""Get X_normalization_data by attribute"""
normalization_data_suffix = "_normalization_data"
if attr.endswith(normalization_data_suffix):
assert self._normalization_data_map is not None, (
f"Trying to access {attr} but normalization_data_map "
"has not been set. Did you run `setup()`"
)
normalization_key = attr[: -len(normalization_data_suffix)]
normalization_data = self._normalization_data_map.get(
normalization_key, None
)
if normalization_data is None:
raise AttributeError(
f"normalization key `{normalization_key}` is unavailable. "
f"Available keys are: {self._normalization_data_map.keys()}."
)
return normalization_data
raise AttributeError(f"attr {attr} not available {type(self)}")
@property
@abc.abstractmethod
def should_generate_eval_dataset(self) -> bool:
pass
@abc.abstractmethod
def query_data(
self,
input_table_spec: TableSpec,
sample_range: Optional[Tuple[float, float]],
reward_options: RewardOptions,
data_fetcher: DataFetcher,
) -> Dataset:
"""
Massage input table into the format expected by the trainer
"""
pass
@abc.abstractmethod
def build_batch_preprocessor(self) -> BatchPreprocessor:
pass
def get_dataloader(self, dataset: Dataset, identity: str = "Default"):
batch_preprocessor = self.build_batch_preprocessor()
reader_options = self.reader_options
assert reader_options
data_reader = make_batch_reader(
# pyre-fixme[16]: `HiveDataSetClass` has no attribute `parquet_url`.
dataset.parquet_url,
num_epochs=1,
reader_pool_type=reader_options.petastorm_reader_pool_type,
)
# NOTE: must be wrapped by DataLoaderWrapper to call __exit__() on end of epoch
dataloader = DataLoader(
data_reader,
batch_size=reader_options.minibatch_size,
collate_fn=collate_and_preprocess(
batch_preprocessor=batch_preprocessor, use_gpu=False
),
)
return _closing_iter(dataloader)
def train_dataloader(self):
self._num_train_data_loader_calls += 1
return self.get_dataloader(
self._train_dataset,
identity=f"train_{self._num_train_data_loader_calls}",
)
def test_dataloader(self):
self._num_test_data_loader_calls += 1
# TODO: we currently use the same data for test and validation.
# We should have three different splits of the total data
return self._get_eval_dataset(
identity=f"test_{self._num_test_data_loader_calls}"
)
def val_dataloader(self):
self._num_val_data_loader_calls += 1
return self._get_eval_dataset(identity=f"val_{self._num_val_data_loader_calls}")
def _get_eval_dataset(self, identity: str):
test_dataset = getattr(self, "_eval_dataset", None)
if not test_dataset:
return None
return self.get_dataloader(test_dataset, identity)
def _closing_iter(dataloader):
yield from dataloader
dataloader.__exit__(None, None, None)
def collate_and_preprocess(batch_preprocessor: BatchPreprocessor, use_gpu: bool):
"""Helper for Petastorm's DataLoader to preprocess.
TODO(kaiwenw): parallelize preprocessing by using transform of Petastorm reader
Should pin memory and preprocess in reader and convert to gpu in collate_fn.
"""
def collate_fn(batch_list: List[Dict]):
batch = decimal_friendly_collate(batch_list)
preprocessed_batch = batch_preprocessor(batch)
if use_gpu:
preprocessed_batch = preprocessed_batch.cuda()
return preprocessed_batch
return collate_fn
| 10,842 | 33.753205 | 88 | py |
ReAgent | ReAgent-master/reagent/optimizer/scheduler_union.py | #!/usr/bin/env python3
import logging
from typing import List
import reagent.optimizer.uninferrable_schedulers as cannot_be_inferred
import torch
from reagent.core.configuration import make_config_class, param_hash
from reagent.core.fb_checker import IS_FB_ENVIRONMENT
from reagent.core.tagged_union import TaggedUnion
from .scheduler import LearningRateSchedulerConfig
from .utils import is_torch_lr_scheduler
logger = logging.getLogger(__name__)
cannot_be_inferred_modules = [cannot_be_inferred]
if IS_FB_ENVIRONMENT:
import reagent.optimizer.fb.uninferrable_schedulers as fb_cannot_be_inferred
cannot_be_inferred_modules.append(fb_cannot_be_inferred)
def get_torch_lr_schedulers() -> List[str]:
# Not type annotated and default is None (i.e unable to infer valid annotation)
return [
name
for name in dir(torch.optim.lr_scheduler)
if is_torch_lr_scheduler(getattr(torch.optim.lr_scheduler, name))
]
classes = {}
for name in get_torch_lr_schedulers():
cannot_be_inferred_module = None
for module in cannot_be_inferred_modules:
if hasattr(module, name):
cannot_be_inferred_module = module
break
if cannot_be_inferred_module is not None:
# these were manually filled in.
subclass = getattr(cannot_be_inferred_module, name)
else:
torch_lr_scheduler_class = getattr(torch.optim.lr_scheduler, name)
subclass = type(
name,
# must subclass LearningRateSchedulerConfig to be added to the Registry
(LearningRateSchedulerConfig,),
{"__module__": __name__},
)
make_config_class(torch_lr_scheduler_class, blocklist=["optimizer"])(subclass)
subclass.__hash__ = param_hash
classes[name] = subclass
@LearningRateSchedulerConfig.fill_union()
class LearningRateScheduler__Union(TaggedUnion):
pass
| 1,898 | 29.142857 | 86 | py |
ReAgent | ReAgent-master/reagent/optimizer/soft_update.py | #!/usr/bin/env python3
import torch
class SoftUpdate(torch.optim.Optimizer):
def __init__(self, target_params, source_params, tau=0.1):
"""
Perform soft-update on target_params. Soft-update gradually blends
source_params into target_params with this update equation:
target_param = tau * source_param + (1 - tau) * target_param
"""
target_params = list(target_params)
source_params = list(source_params)
if len(target_params) != len(source_params):
raise ValueError(
"target and source must have the same number of parameters"
)
for t_param, s_param in zip(target_params, source_params):
if t_param.shape != s_param.shape:
raise ValueError(
"The shape of target parameter doesn't match that of the source"
)
params = target_params + source_params
defaults = dict(
tau=tau, lr=1.0
) # set a dummy learning rate because optimizers are expected to have one
super().__init__(params, defaults)
for group in self.param_groups:
tau = group["tau"]
if tau > 1.0 or tau < 0.0:
raise ValueError(f"tau should be in [0.0, 1.0]; got {tau}")
@classmethod
def make_optimizer_scheduler(cls, target_params, source_params, tau):
su = cls(target_params, source_params, tau)
return {"optimizer": su}
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params = group["params"]
n = len(params)
tau = group["tau"]
for target_param, source_param in zip(params[: n // 2], params[n // 2 :]):
if target_param is source_param:
# skip soft-updating when the target network share s the parameter with
# the network being train.
continue
new_param = tau * source_param.data + (1.0 - tau) * target_param.data
target_param.data.copy_(new_param)
return loss
| 2,463 | 34.710145 | 91 | py |
ReAgent | ReAgent-master/reagent/optimizer/union.py | #!/usr/bin/env python3
import logging
from typing import List
import reagent.optimizer.uninferrable_optimizers as cannot_be_inferred
import torch
from reagent.core.configuration import make_config_class, param_hash
from reagent.core.tagged_union import TaggedUnion
from .optimizer import OptimizerConfig
from .utils import is_torch_optimizer
logger = logging.getLogger(__name__)
def get_torch_optimizers() -> List[str]:
return [
name
for name in dir(torch.optim)
if is_torch_optimizer(getattr(torch.optim, name))
]
classes = {}
for name in get_torch_optimizers():
if hasattr(cannot_be_inferred, name):
# these were manually filled in.
subclass = getattr(cannot_be_inferred, name)
else:
# this points to the optimizer class in torch.optim (e.g. Adam)
torch_optimizer_class = getattr(torch.optim, name)
# dynamically create wrapper class, which has the same name as torch_class
subclass = type(
name,
# must subclass Optimizer to be added to the Registry
(OptimizerConfig,),
{},
)
# fill in optimizer parameters (except params)
make_config_class(torch_optimizer_class, blocklist=["params"])(subclass)
subclass.__hash__ = param_hash
classes[name] = subclass
@OptimizerConfig.fill_union()
class Optimizer__Union(TaggedUnion):
@classmethod
def default(cls, **kwargs):
"""Return default factory for Optimizer (defaulting to Adam)."""
return (
cls(Adam=classes["Adam"]())
if kwargs == {}
else cls(Adam=classes["Adam"](**kwargs))
)
def make_optimizer_scheduler(self, params):
return self.value.make_optimizer_scheduler(params)
| 1,781 | 27.741935 | 82 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.