code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
"""An online Q-lambda agent trained to play BSuite's Catch env."""
import collections
from absl import app
from absl import flags
from bsuite.environments import catch
import dm_env
import haiku as hk
from haiku import nets
import jax
import jax.numpy as jnp
import numpy as np
import optax
import rlax
from rlax.examples import experiment
ActorOutput = collections.namedtuple("ActorOutput", ["actions", "q_values"])
FLAGS = flags.FLAGS
flags.DEFINE_integer("seed", 42, "Random seed.")
flags.DEFINE_integer("train_episodes", 500, "Number of train episodes.")
flags.DEFINE_integer("num_hidden_units", 50, "Number of network hidden units.")
flags.DEFINE_integer("sequence_length", 4,
"Length of (action, timestep) sequences.")
flags.DEFINE_float("epsilon", 0.01, "Epsilon-greedy exploration probability.")
flags.DEFINE_float("lambda_", 0.9, "Mixing parameter for Q(lambda).")
flags.DEFINE_float("discount_factor", 0.99, "Q-learning discount factor.")
flags.DEFINE_float("learning_rate", 0.005, "Optimizer learning rate.")
flags.DEFINE_integer("eval_episodes", 100, "Number of evaluation episodes.")
flags.DEFINE_integer("evaluate_every", 50,
"Number of episodes between evaluations.")
def build_network(num_hidden_units: int, num_actions: int) -> hk.Transformed:
"""Factory for a simple MLP network for approximating Q-values."""
def q(obs):
flatten = lambda x: jnp.reshape(x, (-1,))
network = hk.Sequential(
[flatten, nets.MLP([num_hidden_units, num_actions])])
return network(obs)
return hk.without_apply_rng(hk.transform(q))
class SequenceAccumulator:
"""Accumulator for gathering the latest timesteps into sequences.
Note sequences can overlap and cross episode boundaries.
"""
def __init__(self, length):
self._timesteps = collections.deque(maxlen=length)
def push(self, timestep, action):
# Replace `None`s with zeros as these will be put into NumPy arrays.
a_tm1 = 0 if action is None else action
timestep_t = timestep._replace(
step_type=int(timestep.step_type),
reward=0. if timestep.reward is None else timestep.reward,
discount=0. if timestep.discount is None else timestep.discount,
)
self._timesteps.append((a_tm1, timestep_t))
def sample(self, batch_size):
"""Returns current sequence of accumulated timesteps."""
if batch_size != 1:
raise ValueError("Require batch_size == 1.")
if len(self._timesteps) != self._timesteps.maxlen:
raise ValueError("Not enough timesteps for a full sequence.")
actions, timesteps = jax.tree_map(lambda *ts: np.stack(ts),
*self._timesteps)
return actions, timesteps
def is_ready(self, batch_size):
if batch_size != 1:
raise ValueError("Require batch_size == 1.")
return len(self._timesteps) == self._timesteps.maxlen
class OnlineQLambda:
"""An online Q-lambda agent."""
def __init__(self, observation_spec, action_spec, num_hidden_units, epsilon,
lambda_, learning_rate):
self._observation_spec = observation_spec
self._action_spec = action_spec
self._epsilon = epsilon
self._lambda = lambda_
# Neural net and optimiser.
self._network = build_network(num_hidden_units, action_spec.num_values)
self._optimizer = optax.adam(learning_rate)
# Jitting for speed.
self.actor_step = jax.jit(self.actor_step)
self.learner_step = jax.jit(self.learner_step)
def initial_params(self, key):
sample_input = self._observation_spec.generate_value()
return self._network.init(key, sample_input)
def initial_actor_state(self):
return ()
def initial_learner_state(self, params):
return self._optimizer.init(params)
def actor_step(self, params, env_output, actor_state, key, evaluation):
q = self._network.apply(params, env_output.observation)
train_a = rlax.epsilon_greedy(self._epsilon).sample(key, q)
eval_a = rlax.greedy().sample(key, q)
a = jax.lax.select(evaluation, eval_a, train_a)
return ActorOutput(actions=a, q_values=q), actor_state
def learner_step(self, params, data, learner_state, unused_key):
dloss_dtheta = jax.grad(self._loss)(params, *data)
updates, learner_state = self._optimizer.update(dloss_dtheta, learner_state)
params = optax.apply_updates(params, updates)
return params, learner_state
def _loss(self, params, actions, timesteps):
"""Calculates Q-lambda loss given parameters, actions and timesteps."""
network_apply_sequence = jax.vmap(self._network.apply, in_axes=(None, 0))
q = network_apply_sequence(params, timesteps.observation)
# Use a mask since the sequence could cross episode boundaries.
mask = jnp.not_equal(timesteps.step_type, int(dm_env.StepType.LAST))
a_tm1 = actions[1:]
r_t = timesteps.reward[1:]
# Discount ought to be zero on a LAST timestep, use the mask to ensure this.
discount_t = timesteps.discount[1:] * mask[1:]
q_tm1 = q[:-1]
q_t = q[1:]
mask_tm1 = mask[:-1]
# Mask out TD errors for the last state in an episode.
td_error_tm1 = mask_tm1 * rlax.q_lambda(
q_tm1, a_tm1, r_t, discount_t, q_t, lambda_=self._lambda)
return jnp.sum(rlax.l2_loss(td_error_tm1)) / jnp.sum(mask_tm1)
def main(unused_arg):
env = catch.Catch(seed=FLAGS.seed)
agent = OnlineQLambda(
observation_spec=env.observation_spec(),
action_spec=env.action_spec(),
num_hidden_units=FLAGS.num_hidden_units,
epsilon=FLAGS.epsilon,
lambda_=FLAGS.lambda_,
learning_rate=FLAGS.learning_rate)
accumulator = SequenceAccumulator(length=FLAGS.sequence_length)
experiment.run_loop(
agent=agent,
environment=env,
accumulator=accumulator,
seed=FLAGS.seed,
batch_size=1,
train_episodes=FLAGS.train_episodes,
evaluate_every=FLAGS.evaluate_every,
eval_episodes=FLAGS.eval_episodes,
)
if __name__ == "__main__":
app.run(main) | /rlax-0.1.6-py3-none-any.whl/examples/online_q_lambda.py | 0.885291 | 0.452475 | online_q_lambda.py | pypi |
"""A simple online Q-learning agent trained to play BSuite's Catch env."""
import collections
from absl import app
from absl import flags
from bsuite.environments import catch
import haiku as hk
from haiku import nets
import jax
import jax.numpy as jnp
import optax
import rlax
from rlax.examples import experiment
ActorOutput = collections.namedtuple("ActorOutput", "actions q_values")
Transition = collections.namedtuple("Transition",
"obs_tm1 a_tm1 r_t discount_t obs_t")
FLAGS = flags.FLAGS
flags.DEFINE_integer("seed", 42, "Random seed.")
flags.DEFINE_integer("train_episodes", 500, "Number of train episodes.")
flags.DEFINE_integer("num_hidden_units", 50, "Number of network hidden units.")
flags.DEFINE_float("epsilon", 0.01, "Epsilon-greedy exploration probability.")
flags.DEFINE_float("discount_factor", 0.99, "Q-learning discount factor.")
flags.DEFINE_float("learning_rate", 0.005, "Optimizer learning rate.")
flags.DEFINE_integer("eval_episodes", 100, "Number of evaluation episodes.")
flags.DEFINE_integer("evaluate_every", 50,
"Number of episodes between evaluations.")
def build_network(num_hidden_units: int, num_actions: int) -> hk.Transformed:
"""Factory for a simple MLP network for approximating Q-values."""
def q(obs):
flatten = lambda x: jnp.reshape(x, (-1,))
network = hk.Sequential(
[flatten, nets.MLP([num_hidden_units, num_actions])])
return network(obs)
return hk.without_apply_rng(hk.transform(q))
class TransitionAccumulator:
"""Simple Python accumulator for transitions."""
def __init__(self):
self._prev = None
self._action = None
self._latest = None
def push(self, env_output, action):
self._prev = self._latest
self._action = action
self._latest = env_output
def sample(self, batch_size):
assert batch_size == 1
return Transition(self._prev.observation, self._action, self._latest.reward,
self._latest.discount, self._latest.observation)
def is_ready(self, batch_size):
assert batch_size == 1
return self._prev is not None
class OnlineQ:
"""An online Q-learning deep RL agent."""
def __init__(self, observation_spec, action_spec, num_hidden_units, epsilon,
learning_rate):
self._observation_spec = observation_spec
self._action_spec = action_spec
self._epsilon = epsilon
# Neural net and optimiser.
self._network = build_network(num_hidden_units, action_spec.num_values)
self._optimizer = optax.adam(learning_rate)
# Jitting for speed.
self.actor_step = jax.jit(self.actor_step)
self.learner_step = jax.jit(self.learner_step)
def initial_params(self, key):
sample_input = self._observation_spec.generate_value()
return self._network.init(key, sample_input)
def initial_actor_state(self):
return ()
def initial_learner_state(self, params):
return self._optimizer.init(params)
def actor_step(self, params, env_output, actor_state, key, evaluation):
q = self._network.apply(params, env_output.observation)
train_a = rlax.epsilon_greedy(self._epsilon).sample(key, q)
eval_a = rlax.greedy().sample(key, q)
a = jax.lax.select(evaluation, eval_a, train_a)
return ActorOutput(actions=a, q_values=q), actor_state
def learner_step(self, params, data, learner_state, unused_key):
dloss_dtheta = jax.grad(self._loss)(params, *data)
updates, learner_state = self._optimizer.update(dloss_dtheta, learner_state)
params = optax.apply_updates(params, updates)
return params, learner_state
def _loss(self, params, obs_tm1, a_tm1, r_t, discount_t, obs_t):
q_tm1 = self._network.apply(params, obs_tm1)
q_t = self._network.apply(params, obs_t)
td_error = rlax.q_learning(q_tm1, a_tm1, r_t, discount_t, q_t)
return rlax.l2_loss(td_error)
def main(unused_arg):
env = catch.Catch(seed=FLAGS.seed)
agent = OnlineQ(
observation_spec=env.observation_spec(),
action_spec=env.action_spec(),
num_hidden_units=FLAGS.num_hidden_units,
epsilon=FLAGS.epsilon,
learning_rate=FLAGS.learning_rate,
)
accumulator = TransitionAccumulator()
experiment.run_loop(
agent=agent,
environment=env,
accumulator=accumulator,
seed=FLAGS.seed,
batch_size=1,
train_episodes=FLAGS.train_episodes,
evaluate_every=FLAGS.evaluate_every,
eval_episodes=FLAGS.eval_episodes,
)
if __name__ == "__main__":
app.run(main) | /rlax-0.1.6-py3-none-any.whl/examples/online_q_learning.py | 0.886402 | 0.53206 | online_q_learning.py | pypi |
import math
import numpy as np
import rlbase.misc as misc
class BaseAgent:
def __init__(self,**kwargs):
self.pi = kwargs.get("pi")
env = kwargs.get("env")
q_init = kwargs.get("q_init",0)
self.v = {s:0 for s in env.states}
self.nv = {s:0 for s in env.states}
self.q = {s:{a:q_init for a in env.valid_actions[s]} for s in env.states}
self.nq = {s:{a:0 for a in env.valid_actions[s]} for s in env.states}
def start(self, s):
return self.pi.get(s)
def step(self, r, s):
return self.pi.get(s)
def end(self, r):
pass
class SimpleAgent(BaseAgent):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.alpha = kwargs.get("alpha")
self.ucb_c = kwargs.get("ucb_c")
def start(self, s):
self.t = 0
self.last_action =self.pi.get(s)
self.last_state = s
return self.last_action
def step(self, r, s):
self.t += 1
self.nv[self.last_state] += 1
self.nq[self.last_state][self.last_action] += 1
if self.alpha:
stepsize = self.alpha
else:
stepsize = 1 / self.nq[self.last_state][self.last_action]
self.q[self.last_state][self.last_action] += stepsize * (r - self.q[self.last_state][self.last_action])
if self.ucb_c:
tmp = {a : v + self.ucb_c*(math.log(self.t) / (1e-5+self.nq[self.last_state][a]))**.5 for (a,v) in self.q[self.last_state].items()}
else:
tmp = self.q[self.last_state]
self.pi.update(self.last_state,misc.argmax_unique(tmp))
self.last_action = self.pi.get(s)
return self.last_action
class GradientAgent(BaseAgent):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.alpha = kwargs.get("alpha")
self.baseline = kwargs.get("baseline",0)
self.average_reward = 0
def start(self, s):
self.t = 0
self.last_state = s
self.last_action = self.pi.get(s)
return self.last_action
def step(self, r, s):
self.t += 1
if self.alpha:
stepsize = self.alpha
else:
stepsize = 1 / self.nq[self.last_state][self.last_action]
if self.baseline:
self.average_reward += (r - self.average_reward) / self.t
one_hot = np.zeros(self.pi.na)
one_hot[self.last_action] = 1
self.pi.h[self.last_state] += stepsize * (r - self.average_reward) * ( one_hot - misc.softmax(self.pi.h[self.last_state]))
self.pi.update(self.last_state)
self.last_state = s
self.last_action = self.pi.get(s)
return self.last_action
class SarsaAgent(BaseAgent):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.alpha = kwargs.get("alpha",0.5)
env = kwargs.get("env")
self.q = {s:{a:0 for a in env.valid_actions[s]} for s in env.states}
def start(self, s):
self.last_state = s
self.last_action = self.pi.get(s)
return self.last_action
def step(self, r, s):
a = self.pi.get(s)
self.q[self.last_state][self.last_action] += self.alpha * (r + self.gamma * self.q[s][a] - self.q[self.last_state][self.last_action])
a0 = misc.argmax_unique(self.q[self.last_state])
self.pi.update(self.last_state,a0)
self.last_state = s
self.last_action = a
return self.last_action
def end(self, r):
self.q[self.last_state][self.last_action] += self.alpha * (r - self.q[self.last_state][self.last_action])
a0 = misc.argmax_unique(self.q[self.last_state])
self.pi.update(self.last_state,a0)
class QlearningAgent(BaseAgent):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.alpha = kwargs.get("alpha",0.5)
def start(self, s):
self.last_state = s
self.last_action = self.pi.get(s)
return self.last_action
def step(self, r, s):
a = self.pi.get(s)
a1 = misc.argmax_unique(self.q[s])
self.q[self.last_state][self.last_action] += self.alpha * (r + self.gamma * self.q[s][a1] - self.q[self.last_state][self.last_action])
a0 = misc.argmax_unique(self.q[self.last_state])
self.pi.update(self.last_state,a0)
self.last_state = s
self.last_action = a
return self.last_action
def end(self, r):
self.q[self.last_state][self.last_action] += self.alpha * (r - self.q[self.last_state][self.last_action])
a0 = misc.argmax_unique(self.q[self.last_state])
self.pi.update(self.last_state,a0)
class ExpectedSarsaAgent(BaseAgent):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.alpha = kwargs.get("alpha",0.5)
def start(self, s):
self.last_state = s
self.last_action = self.pi.get(s)
return self.last_action
def step(self, r, s):
a = self.pi.get(s)
exp_val = sum(self.pi.prob(a1,s) * val for (a1,val) in self.q[s].items())
self.q[self.last_state][self.last_action] += self.alpha * (r + self.gamma * exp_val - self.q[self.last_state][self.last_action])
a0 = misc.argmax_unique(self.q[self.last_state])
self.pi.update(self.last_state,a0)
self.last_state = s
self.last_action = a
return self.last_action
def end(self, r):
self.q[self.last_state][self.last_action] += self.alpha * (r - self.q[self.last_state][self.last_action])
a0 = misc.argmax_unique(self.q[self.last_state])
self.pi.update(self.last_state,a0) | /rlbase-chicotobi-0.6.0.tar.gz/rlbase-chicotobi-0.6.0/src/rlbase/agent.py | 0.500977 | 0.24646 | agent.py | pypi |
import numpy as np
import numpy.random as npr
import rlbase.misc as misc
def transform_Q_to_BestAction(Q):
return {s:misc.all_argmax(Q[s]) for s in Q.keys()}
class Policy:
def __init__(self,**kwargs):
self.valid_actions = kwargs.get("env").valid_actions
self.n_valid_actions = {s:len(a) for (s,a) in self.valid_actions.items()}
def prob(self,a,s):
pass
def get(self,s):
pass
class UniformPolicy(Policy):
def __init__(self,**kwargs):
super().__init__(**kwargs)
def prob(self,a,s):
return 1 / self.n_valid_actions[s]
def get(self,s):
return misc.sample(self.valid_actions[s])
class BestActionPolicy(Policy):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.best_actions = kwargs.get("best_actions")
self.n_best_actions = {s:len(a) for (s,a) in self.best_actions.items()}
def prob(self,a,s):
if a in self.best_actions[s]:
return 1 / self.n_best_actions[s]
return 0
def get(self,s):
return misc.sample(self.best_actions[s])
class DeterministicPolicy(Policy):
def __init__(self,**kwargs):
super().__init__(**kwargs)
if kwargs.get("best_actions"):
self.det_actions = {s:a[0] for (s,a) in kwargs.get("best_actions").items()}
else:
self.det_actions = {s:self.valid_actions[s][0] for s in self.valid_actions.keys()}
def prob(self,a,s):
return self.det_actions[s] == a
def get(self,s):
return self.det_actions[s]
def update(self,s,a):
self.det_actions[s] = a
class EpsGreedy(Policy):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.eps = kwargs.get("eps")
if kwargs.get("det_policy"):
self.det_policy = kwargs.get("det_policy")
else:
d = kwargs
d["valid_actions"] = self.valid_actions
self.det_policy = DeterministicPolicy(**d)
def prob(self,a,s):
if a == self.det_policy.get(s):
return 1-self.eps+self.eps/self.n_valid_actions[s]
else:
return self.eps/self.n_valid_actions[s]
def get(self,s):
a0 = self.det_policy.get(s)
if npr.rand() > self.eps:
return a0
else:
return misc.sample(self.valid_actions[s])
def update(self,s,a0):
self.det_policy.update(s,a0)
class Softmax(Policy):
def __init__(self,**kwargs):
super().__init__(**kwargs)
env = kwargs.get("env")
self.na = len(env.actions)
self.h = {s:[0]*self.na for s in env.states}
self.p = {s:[1/self.na]*self.na for s in env.states}
def prob(self,a,s):
return self.p[s][a]
def get(self,s):
return np.random.choice(self.na,p=self.p[s])
def update(self,s):
self.p[s] = misc.softmax(self.h[s]) | /rlbase-chicotobi-0.6.0.tar.gz/rlbase-chicotobi-0.6.0/src/rlbase/policy.py | 0.449634 | 0.230974 | policy.py | pypi |
import typing as t
import attrs
import numpy as np
import numpy.typing as npt
from rlbcore import api, uis
from rlbcore.external_utils.other import null_object
@attrs.define()
class EpisodeReturnRecorder:
"""Records episode returns during training and prints them to console.
Args:
ui (CliUI): The UI to use for printing the episode returns.
Example:
```python
>>> from pydantic import BaseModel
... from rlbcore.uis import CliUI
... recorder = EpisodeReturnRecorder(CliUI("rlb", "foo", BaseModel()))
... recorder.track(np.array([1, 2, 3]), np.array([False, False, False]))
... recorder.track(np.array([1, 2, 3]), np.array([False, False, True]))
... # Prints Step 2: -> Avg Return: 6.00
... assert (recorder._episode_returns == np.array([2, 4, 0])).all()
... recorder.track(np.array([1, 2, 3]), np.array([False, True, False]))
... # Prints Step 3: -> Avg Return: 6.00
... assert (recorder._episode_returns == np.array([3, 0, 3])).all()
... recorder.track(np.array([1, 2, 3]), np.array([True, False, False]))
... # Prints Step 4: -> Avg Return: 4.00
... assert (recorder._episode_returns == np.array([0, 2, 6])).all()
```
"""
ui: api.UI = attrs.field(factory=lambda: null_object(uis.CliUI))
_step: int = attrs.field(init=False, default=0)
_episode_returns: npt.NDArray[np.float32] = attrs.field(init=False, default=None)
def track(
self, rewards: npt.NDArray[np.float32], dones: npt.NDArray[np.bool_]
) -> None:
"""Track the episode returns.
Args:
rewards: The rewards received in the current step.
dones: The dones received in the current step.
Effects:
Updates the internal state of the recorder.
On each episode completion, prints the episode return to console.
"""
self._step += 1
if self._episode_returns is None:
self._episode_returns = np.zeros_like(rewards)
self._episode_returns += rewards
if not dones.any():
return
episode_returns = np.extract(dones, self._episode_returns)
np.putmask(self._episode_returns, dones, 0)
avg_return = episode_returns.mean()
self.ui.log_ep_return(step=self._step, avg_return=avg_return)
def get_final_episode_observations(
next_observations: np.ndarray[t.Any, np.dtype[t.Any]],
dones: np.ndarray[t.Any, np.dtype[np.bool_]],
infos: dict[str, t.Any],
) -> np.ndarray[t.Any, np.dtype[t.Any]]:
"""Get the final observations of the episodes.
VectorEnv returns the first observation of the new episode as part of
next_observations when an episode ends and the final observation of the previous
episode as part of the infos dict. This function returns the final observation of
the episode as part of next_observations.
Args:
next_observations: The next observations returned by the environment.
dones: The dones returned by the environment.
infos: The infos returned by the environment. Must contain key named
`final_observation`.
Returns:
next_observations with the first observation of new episode replaced with
final observation of previous episode.
Example: When there is no episode completion, next_observations is returned as is.
```python
>>> from rlbcore.external_utils import get_final_episode_observations
... next_observations = np.array([1, 2, 3])
... dones = np.array([False, False, False])
... infos = {}
... assert np.equal(
... get_final_episode_observations(next_observations, dones, infos),
... np.array([1, 2, 3])
... ).all()
```
Example: When there is an episode completion.
```python
>>> from rlbcore.external_utils import get_final_episode_observations
... next_observations = np.array([1, 2, 3])
... dones = np.array([False, False, True])
... infos = {"final_observation": np.array([None, None, np.array(6)])}
... assert np.equal(
... get_final_episode_observations(next_observations, dones, infos),
... np.array([1, 2, 6])
... ).all()
```
See also:
[gymnasium.vector.VectorEnv.step](https://gymnasium.farama.org/api/vector/
#gymnasium.vector.VectorEnv.step) for more details on this behavior.
"""
if not dones.any():
return next_observations
result: np.ndarray[t.Any, np.dtype[t.Any]] = next_observations.copy()
for done_idxs in np.argwhere(dones):
done_idx = done_idxs[0]
result[done_idx] = infos["final_observation"][done_idx]
return result | /external_utils/gym_.py | 0.869452 | 0.804137 | gym_.py | pypi |
import functools
import typing as t
from pprint import pformat
from omegaconf import MISSING, DictConfig, ListConfig, MissingMandatoryValue, OmegaConf
def nested_dict_contains_dot_key(
dictionary: dict[str, t.Any] | DictConfig, dot_key: str
) -> bool:
"""Return True if the dictionary contains dot_key in the exact nested position.
Args:
dictionary: The dictionary to check.
dot_key: The dot key to check. For example, "a.b.c" will check if the dictionary
at the key "a" contains a dict as value for the key "b" which contains the
key "c".
Returns:
True only if the dictionary contains the dot key in the exact nested position
NOTE:
This function is currently used in `rlbcore.cli.run` to check if the config
overrides provided through the CLI are keys in the config.
Example:
```python
>>> assert nested_dict_contains_dot_key(dict(a=0, b=dict(c=0, d=0)), "b.d")
>>> assert not nested_dict_contains_dot_key(dict(a=0, b=dict(c=0, d=0)), "d")
```
"""
if "." not in dot_key:
return dot_key in dictionary
key_to_check, _, rem_dot_key = dot_key.partition(".")
if key_to_check in dictionary:
return nested_dict_contains_dot_key(dictionary[key_to_check], rem_dot_key)
else:
return False
def update_maybe_nested(
config: dict[str, t.Any] | DictConfig, key: str, value: t.Any
) -> None:
"""Set value of possibly nested `key` in config.
Args:
config: Dictionary with arbitrary level of nesting.
key: Uses dot notation if nested.
value: Value to set for key.
Returns:
None, but updates config in place.
Example:
```python
>>> config = dict(
... a=0,
... b=dict(
... c="???",
... d=dict(
... e=0,
... f="???",
... ),
... ),
... )
>>> update_maybe_nested(
... config,
... "b.d.f",
... 1
... )
>>> config
{'a': 0, 'b': {'c': '???', 'd': {'e': 0, 'f': 1}}}
```
"""
if "." not in key:
config[key] = value
return
else:
key, _, sub_key = key.partition(".")
update_maybe_nested(config[key], sub_key, value)
def get_top_level_key(
config: dict[str, t.Any] | DictConfig | list[t.Any] | ListConfig, key: str
) -> t.Any:
"""Return value of top level `key` in config.
Args:
config: The config to get the key from.
key: The key to get.
Returns:
The value of the key.
Example: Get top level key from dict
```python
>>> import pytest
>>> get_top_level_key(dict(a=0, b=1), "a")
0
>>> get_top_level_key(dict(a=0, b=1), "b")
1
>>> with pytest.raises(KeyError):
... get_top_level_key(dict(a=0, b=1), "c")
```
Example: Get top level key from list
```python
>>> get_top_level_key([0, 1], "[0]")
0
>>> get_top_level_key([0, 1], "[1]")
1
```
Example: Get top level key from DictConfig
```python
>>> import pytest
>>> from omegaconf import OmegaConf
>>> config = OmegaConf.create(dict(a=0, b=1))
>>> get_top_level_key(config, "a")
0
>>> get_top_level_key(config, "b")
1
>>> with pytest.raises(KeyError):
... get_top_level_key(config, "c")
```
Example: Get top level key from ListConfig
```python
>>> from omegaconf import OmegaConf
>>> config = OmegaConf.create([0, 1])
>>> get_top_level_key(config, "[0]")
0
>>> get_top_level_key(config, "[1]")
1
```
"""
if isinstance(config, (DictConfig, dict)):
return config[key]
assert key.startswith("[") and key.endswith("]"), key
assert "][" not in key, f"Not top level key: {key}"
return config[int(key[1:-1])]
def select_maybe_nested(
config: dict[str, t.Any] | DictConfig | list[t.Any] | ListConfig, key: str
) -> t.Any:
"""Return value of possibly nested `key` in config.
Args:
config: Dictionary with arbitrary level of nesting.
key: Uses dot notation if nested.
Returns:
None, but updates the config in place.
Example: Select nested key from dict
```python
>>> select_maybe_nested(
... dict(
... a=0,
... b=dict(
... c="???",
... d=dict(
... e=0,
... f="???",
... ),
... ),
... ),
... "b.d"
... )
{'e': 0, 'f': '???'}
```
Example: Select nested key from list
```python
>>> select_maybe_nested(
... [
... 0,
... [
... "???",
... [
... 0,
... "???",
... ],
... ],
... ],
... "[1][1]"
... )
[0, '???']
```
"""
if ("." not in key and "][" not in key) or key in config:
try:
return get_top_level_key(config, key)
except (KeyError, IndexError) as e:
raise KeyError(f"Key {key} not found in config") from e
if "][" in key and "." in key:
is_list = key.index("][") < key.index(".")
if is_list:
top_key, _, sub_key = key.partition("][")
top_key = f"{top_key}]"
sub_key = f"[{sub_key}"
else:
top_key, _, sub_key = key.partition(".")
elif "][" in key:
top_key, _, sub_key = key.partition("][")
top_key = f"{top_key}]"
sub_key = f"[{sub_key}"
else:
top_key, _, sub_key = key.partition(".")
try:
return select_maybe_nested(get_top_level_key(config, top_key), sub_key)
except (KeyError, IndexError) as e:
raise KeyError(f"Key {key} not found in config") from e
@functools.singledispatch
def get_missing_keys(config: dict[str, t.Any] | DictConfig) -> list[str]:
"""Return list of keys with value = "???" in config in dot key format.
Missing keys may be arbitrarily nested. For nested keys with missing values, the
keys are returned in dot format such that the key can be accessed with OmegaConf as
`OmegaConf.select(missing_key)`
Args:
config: Dictionary with arbitrary level of nesting.
Example: Get missing keys from DictConfig
```python
>>> get_missing_keys(
... DictConfig(
... dict(
... a=0,
... b=0,
... c="???",
... d=dict(
... e=0,
... f="???",
... ),
... )
... )
... )
['c', 'd.f']
```
Example: Get missing keys from dict
```python
>>> get_missing_keys(
... dict(
... a=0,
... b=0,
... c="???",
... d=dict(
... e=0,
... f="???",
... ),
... )
... )
['c', 'd.f']
```
"""
raise NotImplementedError(f"for config of type {type(config)}")
@get_missing_keys.register
def _get_missing_keys_dict_config(config: DictConfig) -> list[str]:
# sourcery skip: remove-redundant-pass
result: list[str] = []
to_check: list[str] = []
for key in config:
assert isinstance(key, str)
if OmegaConf.is_missing(config, key):
result.append(key)
elif isinstance(config[key], (DictConfig, dict)):
to_check.append(key)
else:
# Key is present. We're not interested in it.
pass
for key in to_check:
missing_keys = _get_missing_keys_dict_config(config[key])
missing_dot_keys = (f"{key}.{x}" for x in missing_keys)
result.extend(missing_dot_keys)
return result
@get_missing_keys.register
def _get_missing_keys_dict(config: dict) -> list[str]: # type: ignore
result: list[str] = []
to_check: list[str] = []
for key, value in config.items(): # type: ignore
if value == MISSING:
result.append(key) # type: ignore
elif isinstance(value, dict):
to_check.append(key) # type: ignore
for key in to_check:
missing_keys = _get_missing_keys_dict(config[key])
missing_dot_keys = (f"{key}.{x}" for x in missing_keys)
result.extend(missing_dot_keys)
return result
def recursive_iter(
config: DictConfig | dict[str, t.Any] | ListConfig | list[t.Any],
list_is_leaf: bool = True,
base_key: str = "",
) -> t.Iterator[str]:
"""Iterate over all keys in config, including keys in nested configs.
Args:
config: config to iterate over
list_is_leaf: if True, will not iterate over list and ListConfig values
base_key: base key to prepend to all keys
Yields:
key in config
Raises:
MissingMandatoryValue: if config contains missing keys, i.e. if
`len(get_missing_keys(config)) > 0`
Example: Iterate over DictConfig
```python
>>> list(
... recursive_iter(
... DictConfig(
... dict(
... a=0,
... b=dict(
... c=0,
... d=dict(e=0),
... f=dict(g=0),
... h=0,
... )
... )
... )
... )
... )
['a', 'b', 'b.c', 'b.d', 'b.d.e', 'b.f', 'b.f.g', 'b.h']
>>> import pytest
... with pytest.raises(MissingMandatoryValue):
... list(recursive_iter(DictConfig(dict(a=0, b=dict(c=0, d="???")))))
```
Example: Iterate over dict
```python
>>> list(
... recursive_iter(
... dict(
... a=0,
... b=dict(
... c=0,
... d=dict(e=0),
... f=dict(g=0),
... h=0,
... )
... )
... )
... )
['a', 'b', 'b.c', 'b.d', 'b.d.e', 'b.f', 'b.f.g', 'b.h']
```
Example: Iterate over dict with list
```python
>>> list(
... recursive_iter(
... dict(
... a=0,
... b=dict(c=[1, 2, 3])
... ),
... )
... )
['a', 'b', 'b.c']
```
Example: Iterate over dict with list and list_is_leaf=False
```python
>>> list(
... recursive_iter(
... dict(
... a=0,
... b=dict(c=[1, 2, 3])
... ),
... list_is_leaf=False,
... )
... )
['a', 'b', 'b.c', 'b.c.[0]', 'b.c.[1]', 'b.c.[2]']
```
"""
yield from filter(None, _recursive_iter(config, list_is_leaf, base_key))
def _recursive_iter(
config: t.Any, list_is_leaf: bool = True, base_key: str = ""
) -> t.Iterator[str | None]:
if isinstance(config, (DictConfig, dict)):
key_iter = iter(config)
elif not list_is_leaf and isinstance(config, (ListConfig, list)):
key_iter = range(len(config))
else:
return
separator = "." if base_key else ""
for key in key_iter:
suffix = f"[{key}]" if isinstance(key, int) else key
new_key = f"{base_key}{separator}{suffix}"
try:
value = config[key] # type: ignore
except MissingMandatoryValue as e:
raise MissingMandatoryValue(
f"Cannot iterate over config with missing keys: \n{pformat(config)}"
) from e
if isinstance(value, type(MISSING)) and value == MISSING:
raise MissingMandatoryValue(
f"Cannot iterate over config with missing keys: \n{pformat(config)}"
)
yield new_key
yield from recursive_iter(value, list_is_leaf, new_key)
def iter_leaves(
config: DictConfig | dict[str, t.Any] | ListConfig | list[t.Any],
list_is_leaf: bool = True,
) -> t.Iterator[str]:
"""Return an iterator over keys whose values are not DictConfigs.
Args:
config: config to iterate over
list_is_leaf: if True, any key containing a list of values will be treated as
a leaf node
Returns:
iterator over keys whose values are not DictConfigs or dicts (or lists
and ListConfigs if list_is_leaf is False)
Example: Iterate over DictConfig
```python
>>> list(
... iter_leaves(
... DictConfig(
... dict(
... a=0,
... b=dict(
... c=0,
... d=dict(e=0),
... f=dict(g=0),
... h=0,
... )
... )
... )
... )
... )
['a', 'b.c', 'b.d.e', 'b.f.g', 'b.h']
```
Example: Iterate over dict
```python
>>> list(
... iter_leaves(
... dict(
... a=0,
... b=dict(
... c=0,
... d=dict(e=0),
... f=dict(g=0),
... h=0,
... )
... )
... )
... )
['a', 'b.c', 'b.d.e', 'b.f.g', 'b.h']
```
Example: if missing values ("???") are present, an exception is raised
```python
>>> import pytest
... with pytest.raises(MissingMandatoryValue):
... list(iter_leaves(DictConfig(dict(a=0, b=dict(c=0, d="???")))))
```
Example: list or ListConfig is treated as a leaf node
```python
>>> list(
... iter_leaves(
... dict(
... a=0,
... b=dict(c=0),
... d=dict(e=[0, 1, 2]),
... ),
... list_is_leaf=True,
... )
... )
['a', 'b.c', 'd.e']
```
Example: list or ListConfig is NOT treated as a leaf node
```python
>>> list(
... iter_leaves(
... dict(
... a=0,
... b=dict(c=0),
... d=dict(e=[0, 1, 2]),
... ),
... list_is_leaf=False,
... )
... )
['a', 'b.c', 'd.e.[0]', 'd.e.[1]', 'd.e.[2]']
```
"""
for key in recursive_iter(config, list_is_leaf):
if _is_leaf_type(select_maybe_nested(config, key), list_is_leaf):
yield key
def _is_leaf_type(value: t.Any, list_is_leaf: bool) -> bool:
non_leaf_types = t.cast(tuple[type[t.Any], ...], (DictConfig, dict))
if not list_is_leaf:
non_leaf_types = non_leaf_types + (ListConfig, list)
return not isinstance(value, non_leaf_types)
def iter_leaf_items(
config: DictConfig | dict[str, t.Any] | ListConfig | list[t.Any],
list_is_leaf: bool = True,
) -> t.Iterator[tuple[str, t.Any]]:
"""Iterate recursively over all key, value pairs (even nested) in config.
Args:
config: The config to iterate over.
list_is_leaf: If True, any key containing a list of values will be treated as
a leaf node
Returns:
An iterator over key, (leaf) value pairs.
Raises:
MissingMandatoryValue, if config contains missing keys, i.e. if
`len(get_missing_keys(config)) > 0`
Example: Iterate over all leaf items in a DictConfig
```python
>>> list(
... iter_leaf_items(
... DictConfig(
... dict(
... a=0,
... b=dict(
... c=1,
... d=dict(e=2),
... f=dict(g=3),
... h=4,
... )
... )
... )
... )
... )
[('a', 0), ('b.c', 1), ('b.d.e', 2), ('b.f.g', 3), ('b.h', 4)]
```
Example: Iterate over all leaf items in a dict
```python
>>> list(
... iter_leaf_items(
... dict(
... a=0,
... b=dict(
... c=1,
... d=dict(e=2),
... f=dict(g=3),
... h=4,
... )
... )
... )
... )
[('a', 0), ('b.c', 1), ('b.d.e', 2), ('b.f.g', 3), ('b.h', 4)]
```
Example: If some values are missing ("???"), an exception is raised
```python
>>> import pytest
... with pytest.raises(MissingMandatoryValue):
... list(iter_leaf_items(DictConfig(dict(a=0, b=dict(c=0, d="???")))))
```
"""
for key in iter_leaves(config, list_is_leaf):
yield key, select_maybe_nested(config, key)
def iter_leaf_values(
config: DictConfig | dict[str, t.Any] | ListConfig | list[t.Any],
list_is_leaf: bool = True,
) -> t.Iterator[t.Any]:
"""Iterate over all values in config, including keys in nested configs.
Args:
config: The config to iterate over.
list_is_leaf: If True, any key containing a list of values will be treated as
a leaf node
Returns:
An iterator over all (leaf) values in config.
Raises:
MissingMandatoryValue, if config contains missing keys, i.e. if
`len(get_missing_keys(config)) > 0`
Example: Iterate over all leaf values in a DictConfig
```python
>>> list(
... iter_leaf_values(
... DictConfig(
... dict(
... a=0,
... b=dict(
... c=1,
... d=dict(e=2),
... f=dict(g=3),
... h=4,
... )
... )
... )
... )
... )
[0, 1, 2, 3, 4]
```
Example: If some values are missing ("???"), an exception is raised
```python
>>> import pytest
... with pytest.raises(MissingMandatoryValue):
... list(recursive_iter(DictConfig(dict(a=0, b=dict(c=0, d="???")))))
```
"""
for key in iter_leaves(config, list_is_leaf):
yield select_maybe_nested(config, key) | /external_utils/builtins_.py | 0.792986 | 0.712507 | builtins_.py | pypi |
import typing as t
from unittest.mock import MagicMock
AnyT = t.TypeVar("AnyT")
def null_object(
cls: type[AnyT],
property_returns: dict[str, t.Any] | None = None,
method_returns: dict[str, t.Any] | None = None,
) -> AnyT:
"""Create a null object following the Null object pattern.
Args:
cls: The class of the object for which a null object will be created.
property_returns: A dictionary mapping property names to the values that
should be returned when the property is accessed.
method_returns: A dictionary mapping method names to the values that should
be returned when the method is called.
Returns:
A dummy object having the same spec as `cls` that does nothing for any method
call.
Example:
```python
>>> import pytest
... class Foo:
... def __init__(self, x: int):
... self.x = x
... def bar(self, y: list[int]) -> None:
... y.append(self.x)
... actual_foo = Foo(1)
... l = []
... actual_foo.bar(l)
... assert l == [1]
... null_foo = null_object(Foo)
... new_l = []
... null_foo.bar(new_l)
... assert new_l == []
... with pytest.raises(AttributeError):
... null_foo.x
... with pytest.raises(AttributeError):
... null_foo.a_method_that_does_not_exist()
```
Usage:
------
Instead of the following pattern:
```python
>>> foo: Foo | None = None
... if foo is not None:
... foo.bar([])
```
Do:
```python
>>> foo = null_object(Foo)
... foo.bar([]) # This will do nothing
```
"""
_property_returns = property_returns or {}
_method_returns = method_returns or {}
class NullObject(cls): # type: ignore
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
pass
def __getattribute__(self, __name: str) -> t.Any:
if __name in _property_returns:
return _property_returns[__name]
if __name in _method_returns:
return MagicMock(return_value=_method_returns[__name])
if hasattr(super(), __name):
# If the attribute exists in the parent class, but attribute was not
# specified in method or property returns, return it's mocked version
return MagicMock()
return super().__getattribute__(__name) # type: ignore
NullObject.__name__ = cls.__name__
return t.cast(AnyT, NullObject()) | /external_utils/other.py | 0.852506 | 0.759359 | other.py | pypi |
<!-- Logo -->
<p align="center">
<img src="https://raw.githubusercontent.com/rlberry-py/rlberry/main/assets/logo_wide.svg" width="50%">
</p>
<!-- Short description -->
<p align="center">
A Reinforcement Learning Library for Research and Education
</p>
<!-- The badges -->
<p align="center">
<a href="https://github.com/rlberry-py/rlberry/workflows/test/badge.svg">
<img alt="pytest" src="https://github.com/rlberry-py/rlberry/workflows/test/badge.svg">
</a>
<a href='https://rlberry.readthedocs.io/en/latest/?badge=latest'>
<img alt="Documentation Status" src="https://readthedocs.org/projects/rlberry/badge/?version=latest">
</a>
<a href="https://img.shields.io/github/contributors/rlberry-py/rlberry">
<img alt="contributors" src="https://img.shields.io/github/contributors/rlberry-py/rlberry">
</a>
<a href="https://codecov.io/gh/rlberry-py/rlberry">
<img alt="codecov" src="https://codecov.io/gh/rlberry-py/rlberry/branch/main/graph/badge.svg?token=TIFP7RUD75">
</a>
</p>
<p align="center">
<!-- <a href="https://img.shields.io/pypi/pyversions/rlberry">
<img alt="PyPI - Python Version" src="https://img.shields.io/pypi/pyversions/rlberry">
</a> -->
</p>
<p align="center">
<!-- <a href="https://pypi.org/project/rlberry/">
<img alt="PyPI" src="https://img.shields.io/pypi/v/rlberry">
</a> -->
<!-- <a href="https://img.shields.io/pypi/wheel/rlberry">
<img alt="PyPI - Wheel" src="https://img.shields.io/pypi/wheel/rlberry">
</a> -->
<!-- <a href="https://img.shields.io/pypi/status/rlberry">
<img alt="PyPI - Status" src="https://img.shields.io/pypi/status/rlberry">
</a> -->
<!-- <a href="https://img.shields.io/pypi/dm/rlberry">
<img alt="PyPI - Downloads" src="https://img.shields.io/pypi/dm/rlberry">
</a> -->
<!-- <a href="https://zenodo.org/badge/latestdoi/304451364">
<img src="https://zenodo.org/badge/304451364.svg" alt="DOI">
</a> -->
</p>
<p align="center">
<a href="https://colab.research.google.com/github/rlberry-py/notebooks/blob/main/introduction_to_rlberry.ipynb">
<b>Try it on Google Colab!</b>
<img alt="Open In Colab" src="https://colab.research.google.com/assets/colab-badge.svg">
</a>
</p>
<!-- Horizontal rule -->
<hr>
<!-- Table of content -->
## What is `rlberry`?
**Writing reinforcement learning algorithms is fun!** *But after the fun, we have lots of boring things to implement*:
run our agents in parallel, average and plot results, optimize hyperparameters, compare to baselines, create tricky
environments etc etc!
`rlberry` **is a Python library that makes your life easier** by doing all these things with a few lines of code, so
that you can spend most of your time developing agents.
`rlberry` also provides implementations of several RL agents, benchmark environments and many other useful tools.
## Installation
Install the latest version for a stable release.
```bash
pip install rlberry
```
The documentation includes more [installation instructions](https://rlberry.readthedocs.io/en/latest/installation.html) in particular for users that work with Jax.
## Getting started
In our [documentation](https://rlberry.readthedocs.io/en/latest/), you will find [quick starts](https://rlberry.readthedocs.io/en/latest/user_guide.html#quick-start-setup-an-experiment-and-evaluate-different-agents) to the library and a [user guide](https://rlberry.readthedocs.io/en/latest/user_guide.html) with a few tutorials on using rlberry.
Also, we provide a handful of notebooks on [Google colab](https://colab.research.google.com/) as examples to show you
how to use `rlberry`:
| Content | Description | Link |
|-|-|-|
| Introduction to `rlberry` | How to create an agent, optimize its hyperparameters and compare to a baseline.| <a href="https://colab.research.google.com/github/rlberry-py/notebooks/blob/main/introduction_to_rlberry.ipynb"><img alt="Open In Colab" src="https://colab.research.google.com/assets/colab-badge.svg"></a>|
| Evaluating and optimizing agents | Train a REINFORCE agent and optimize its hyperparameters| <a href="https://colab.research.google.com/github/rlberry-py/notebooks/blob/main/rlberry_evaluate_and_optimize_agent.ipynb"><img alt="Open In Colab" src="https://colab.research.google.com/assets/colab-badge.svg"></a>
## Changelog
See the [changelog](https://rlberry.readthedocs.io/en/latest/changelog.html) for a history of the chages made to rlberry.
## Citing rlberry
If you use `rlberry` in scientific publications, we would appreciate citations using the following Bibtex entry:
```bibtex
@misc{rlberry,
author = {Domingues, Omar Darwiche and Flet-Berliac, Yannis and Leurent, Edouard and M{\'e}nard, Pierre and Shang, Xuedong and Valko, Michal},
doi = {10.5281/zenodo.5544540},
month = {10},
title = {{rlberry - A Reinforcement Learning Library for Research and Education}},
url = {https://github.com/rlberry-py/rlberry},
year = {2021}
}
```
## Development notes
The modules listed below are experimental at the moment, that is, they are not thoroughly tested and are susceptible to evolve.
* `rlberry.network`: Allows communication between a server and client via sockets, and can be used to run agents remotely.
* `rlberry.agents.experimental`: Experimental agents that are not thoroughly tested.
## About us
This project was initiated and is actively maintained by [INRIA SCOOL team](https://team.inria.fr/scool/).
More information [here](https://rlberry.readthedocs.io/en/latest/about.html#).
## Contributing
Want to contribute to `rlberry`? Please check [our contribution guidelines](https://rlberry.readthedocs.io/en/latest/contributing.html). **If you want to add any new agents or environments, do not hesitate
to [open an issue](https://github.com/rlberry-py/rlberry/issues/new/choose)!**
| /rlberry-0.5.0.tar.gz/rlberry-0.5.0/README.md | 0.418697 | 0.8308 | README.md | pypi |
import datetime
from typing import TypeVar
import six
from rlbot_action_server import typing_utils
def _deserialize(data, klass):
"""Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if data is None:
return None
if klass in six.integer_types or klass in (float, str, bool):
return _deserialize_primitive(data, klass)
elif klass == object:
return _deserialize_object(data)
elif klass == datetime.date:
return deserialize_date(data)
elif klass == datetime.datetime:
return deserialize_datetime(data)
elif typing_utils.is_generic(klass): # Modified this according to https://github.com/OpenAPITools/openapi-generator/issues/1866
if typing_utils.is_list(klass):
return _deserialize_list(data, klass.__args__[0])
if typing_utils.is_dict(klass):
return _deserialize_dict(data, klass.__args__[1])
elif isinstance(klass, TypeVar): # Invented this myself.
return data
else:
return deserialize_model(data, klass)
def _deserialize_primitive(data, klass):
"""Deserializes to primitive type.
:param data: data to deserialize.
:param klass: class literal.
:return: int, long, float, str, bool.
:rtype: int | long | float | str | bool
"""
try:
value = klass(data)
except UnicodeEncodeError:
value = six.u(data)
except TypeError:
value = data
return value
def _deserialize_object(value):
"""Return a original value.
:return: object.
"""
return value
def deserialize_date(string):
"""Deserializes string to date.
:param string: str.
:type string: str
:return: date.
:rtype: date
"""
try:
from dateutil.parser import parse
return parse(string).date()
except ImportError:
return string
def deserialize_datetime(string):
"""Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:type string: str
:return: datetime.
:rtype: datetime
"""
try:
from dateutil.parser import parse
return parse(string)
except ImportError:
return string
def deserialize_model(data, klass):
"""Deserializes list or dict to model.
:param data: dict, list.
:type data: dict | list
:param klass: class literal.
:return: model object.
"""
instance = klass()
if not instance.swagger_types:
return data
for attr, attr_type in six.iteritems(instance.swagger_types):
if data is not None \
and instance.attribute_map[attr] in data \
and isinstance(data, (list, dict)):
value = data[instance.attribute_map[attr]]
setattr(instance, attr, _deserialize(value, attr_type))
return instance
def _deserialize_list(data, boxed_type):
"""Deserializes a list and its elements.
:param data: list to deserialize.
:type data: list
:param boxed_type: class literal.
:return: deserialized list.
:rtype: list
"""
return [_deserialize(sub_data, boxed_type)
for sub_data in data]
def _deserialize_dict(data, boxed_type):
"""Deserializes a dict and its elements.
:param data: dict to deserialize.
:type data: dict
:param boxed_type: class literal.
:return: deserialized dict.
:rtype: dict
"""
return {k: _deserialize(v, boxed_type)
for k, v in six.iteritems(data)} | /rlbot_action_server-1.1.0.tar.gz/rlbot_action_server-1.1.0/rlbot_action_server/util.py | 0.797675 | 0.376222 | util.py | pypi |
from math import pi
from typing import List
import eel
from rlbot.gateway_util import NetworkingRole
from rlbot.matchconfig.loadout_config import LoadoutConfig
from rlbot.matchconfig.match_config import PlayerConfig, MatchConfig, MutatorConfig, ScriptConfig
from rlbot.parsing.incrementing_integer import IncrementingInteger
from rlbot.setup_manager import SetupManager, RocketLeagueLauncherPreference
from rlbot.utils.structures.bot_input_struct import PlayerInput
from rlbot.utils.game_state_util import GameState, CarState, BallState, Physics, Vector3, Rotator
from rlbot.utils.structures.game_data_struct import GameTickPacket
from rlbot_gui.type_translation.set_state_translation import dict_to_game_state
from rlbot_gui.match_runner.custom_maps import (
prepare_custom_map,
identify_map_directory,
convert_custom_map_to_path
)
sm: SetupManager = None
def create_player_config(bot: dict, human_index_tracker: IncrementingInteger):
player_config = PlayerConfig()
player_config.bot = bot['type'] in ('rlbot', 'psyonix')
player_config.rlbot_controlled = bot['type'] in ('rlbot', 'party_member_bot')
player_config.bot_skill = bot['skill']
player_config.human_index = 0 if player_config.bot else human_index_tracker.increment()
player_config.name = bot['name']
player_config.team = int(bot['team'])
if 'path' in bot and bot['path']:
player_config.config_path = bot['path']
return player_config
def create_script_config(script):
return ScriptConfig(script['path'])
def spawn_car_in_showroom(loadout_config: LoadoutConfig, team: int, showcase_type: str, map_name: str,
launcher_prefs: RocketLeagueLauncherPreference):
match_config = MatchConfig()
match_config.game_mode = 'Soccer'
match_config.game_map = map_name
match_config.instant_start = True
match_config.existing_match_behavior = 'Continue And Spawn'
match_config.networking_role = NetworkingRole.none
match_config.enable_state_setting = True
match_config.skip_replays = True
bot_config = PlayerConfig()
bot_config.bot = True
bot_config.rlbot_controlled = True
bot_config.team = team
bot_config.name = "Showroom"
bot_config.loadout_config = loadout_config
match_config.player_configs = [bot_config]
match_config.mutators = MutatorConfig()
match_config.mutators.boost_amount = 'Unlimited'
match_config.mutators.match_length = 'Unlimited'
global sm
if sm is None:
sm = SetupManager()
sm.connect_to_game(launcher_preference=launcher_prefs)
sm.load_match_config(match_config)
sm.start_match()
game_state = GameState(
cars={0: CarState(physics=Physics(
location=Vector3(0, 0, 20),
velocity=Vector3(0, 0, 0),
angular_velocity=Vector3(0, 0, 0),
rotation=Rotator(0, 0, 0)
))},
ball=BallState(physics=Physics(
location=Vector3(0, 0, -100),
velocity=Vector3(0, 0, 0),
angular_velocity=Vector3(0, 0, 0)
))
)
player_input = PlayerInput()
team_sign = -1 if team == 0 else 1
if showcase_type == "boost":
player_input.boost = True
player_input.steer = 1
game_state.cars[0].physics.location.y = -1140
game_state.cars[0].physics.velocity.x = 2300
game_state.cars[0].physics.angular_velocity.z = 3.5
elif showcase_type == "throttle":
player_input.throttle = 1
player_input.steer = 0.56
game_state.cars[0].physics.location.y = -1140
game_state.cars[0].physics.velocity.x = 1410
game_state.cars[0].physics.angular_velocity.z = 1.5
elif showcase_type == "back-center-kickoff":
game_state.cars[0].physics.location.y = 4608 * team_sign
game_state.cars[0].physics.rotation.yaw = -0.5 * pi * team_sign
elif showcase_type == "goal-explosion":
game_state.cars[0].physics.location.y = -2000 * team_sign
game_state.cars[0].physics.rotation.yaw = -0.5 * pi * team_sign
game_state.cars[0].physics.velocity.y = -2300 * team_sign
game_state.ball.physics.location = Vector3(0, -3500 * team_sign, 93)
sm.game_interface.update_player_input(player_input, 0)
sm.game_interface.set_game_state(game_state)
def set_game_state(state):
global sm
if sm is None:
sm = SetupManager()
sm.connect_to_game()
game_state = dict_to_game_state(state)
sm.game_interface.set_game_state(game_state)
def fetch_game_tick_packet() -> GameTickPacket:
global sm
if sm is None:
sm = SetupManager()
sm.connect_to_game()
game_tick_packet = GameTickPacket()
sm.game_interface.update_live_data_packet(game_tick_packet)
return game_tick_packet
def get_fresh_setup_manager():
global sm
if sm is not None:
try:
sm.shut_down()
except Exception as e:
print(e)
sm = SetupManager()
return sm
def setup_match(
setup_manager: SetupManager, match_config: MatchConfig, launcher_pref: RocketLeagueLauncherPreference
):
"""Starts the match and bots. Also detects and handles custom maps"""
def do_setup():
setup_manager.early_start_seconds = 5
setup_manager.connect_to_game(launcher_preference=launcher_pref)
# Loading the setup manager's game interface just as a quick fix because story mode uses it. Ideally story mode
# should now make its own game interface to use.
setup_manager.game_interface.load_interface(wants_ball_predictions=False, wants_quick_chat=False, wants_game_messages=False)
setup_manager.load_match_config(match_config)
setup_manager.launch_early_start_bot_processes()
setup_manager.start_match()
setup_manager.launch_bot_processes()
return setup_manager
game_map = match_config.game_map
if game_map.endswith('.upk') or game_map.endswith('.udk'):
map_file = convert_custom_map_to_path(game_map)
rl_directory = identify_map_directory(launcher_pref)
if not all([map_file, rl_directory]):
print("Couldn't load custom map")
return
with prepare_custom_map(map_file, rl_directory) as (game_map, metadata):
match_config.game_map = game_map
if "config_path" in metadata:
config_path = metadata["config_path"]
match_config.script_configs.append(
create_script_config({'path': config_path}))
print(f"Will load custom script for map {config_path}")
do_setup()
else:
do_setup()
def start_match_helper(bot_list: List[dict], match_settings: dict, launcher_prefs: RocketLeagueLauncherPreference):
print(bot_list)
print(match_settings)
match_config = MatchConfig()
match_config.game_mode = match_settings['game_mode']
match_config.game_map = match_settings['map']
match_config.skip_replays = match_settings['skip_replays']
match_config.instant_start = match_settings['instant_start']
match_config.enable_lockstep = match_settings['enable_lockstep']
match_config.enable_rendering = match_settings['enable_rendering']
match_config.enable_state_setting = match_settings['enable_state_setting']
match_config.auto_save_replay = match_settings['auto_save_replay']
match_config.existing_match_behavior = match_settings['match_behavior']
match_config.mutators = MutatorConfig()
mutators = match_settings['mutators']
match_config.mutators.match_length = mutators['match_length']
match_config.mutators.max_score = mutators['max_score']
match_config.mutators.overtime = mutators['overtime']
match_config.mutators.series_length = mutators['series_length']
match_config.mutators.game_speed = mutators['game_speed']
match_config.mutators.ball_max_speed = mutators['ball_max_speed']
match_config.mutators.ball_type = mutators['ball_type']
match_config.mutators.ball_weight = mutators['ball_weight']
match_config.mutators.ball_size = mutators['ball_size']
match_config.mutators.ball_bounciness = mutators['ball_bounciness']
match_config.mutators.boost_amount = mutators['boost_amount']
match_config.mutators.rumble = mutators['rumble']
match_config.mutators.boost_strength = mutators['boost_strength']
match_config.mutators.gravity = mutators['gravity']
match_config.mutators.demolish = mutators['demolish']
match_config.mutators.respawn_time = mutators['respawn_time']
human_index_tracker = IncrementingInteger(0)
match_config.player_configs = [create_player_config(bot, human_index_tracker) for bot in bot_list]
match_config.script_configs = [create_script_config(script) for script in match_settings['scripts']]
sm = get_fresh_setup_manager()
try:
setup_match(sm, match_config, launcher_prefs)
except Exception as e:
print(e)
eel.matchStartFailed(str(e))
return
# Note that we are not calling infinite_loop because that is not compatible with the way eel works!
# Instead we will reproduce the important behavior from infinite_loop inside this file.
eel.matchStarted()
def do_infinite_loop_content():
if sm is not None and sm.has_started:
sm.try_recieve_agent_metadata()
def hot_reload_bots():
if sm is not None:
sm.reload_all_agents()
def shut_down():
if sm is not None:
sm.shut_down(time_limit=5, kill_all_pids=True)
else:
print("There gotta be some setup manager already") | /rlbot_gui-0.0.140-py3-none-any.whl/rlbot_gui/match_runner/match_runner.py | 0.545044 | 0.235534 | match_runner.py | pypi |
from contextlib import contextmanager
from datetime import datetime
from os import path
from typing import List, Optional
import glob
import shutil
import os
from rlbot.setup_manager import (
SetupManager,
RocketLeagueLauncherPreference,
try_get_steam_executable_path,
)
from rlbot.gamelaunch.epic_launch import locate_epic_games_launcher_rocket_league_binary
from rlbot.utils import logging_utils
from rlbot_gui.persistence.settings import load_settings, BOT_FOLDER_SETTINGS_KEY
CUSTOM_MAP_TARGET = {"filename": "Labs_Utopia_P.upk", "game_map": "UtopiaRetro"}
logger = logging_utils.get_logger("custom_maps")
@contextmanager
def prepare_custom_map(custom_map_file: str, rl_directory: str):
"""
Provides a context manager. It will swap out the custom_map_file
for an existing map in RL and it will return the `game_map`
name that should be used in a MatchConfig.
Once the context is left, the original map is replaced back.
The context should be left as soon as the match has started
"""
# check if there metadata for the custom file
expected_config_name = "_" + path.basename(custom_map_file)[:-4] + ".cfg"
config_path = path.join(path.dirname(custom_map_file), expected_config_name)
additional_info = {
"original_path": custom_map_file,
}
if path.exists(config_path):
additional_info["config_path"] = config_path
real_map_file = path.join(rl_directory, CUSTOM_MAP_TARGET["filename"])
timestamp = datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
temp_filename = real_map_file + "." + timestamp
shutil.copy2(real_map_file, temp_filename)
logger.info("Copied real map to %s", temp_filename)
shutil.copy2(custom_map_file, real_map_file)
logger.info("Copied custom map from %s", custom_map_file)
try:
yield CUSTOM_MAP_TARGET["game_map"], additional_info
finally:
os.replace(temp_filename, real_map_file)
logger.info("Reverted real map to %s", real_map_file)
def convert_custom_map_to_path(custom_map: str) -> Optional[str]:
"""
Search through user's selected folders to find custom_map
Return none if not found, full path if found
"""
custom_map_file = None
folders = get_search_folders()
for folder in folders:
scan_query = path.join(glob.escape(folder), "**", custom_map)
for match in glob.iglob(scan_query, recursive=True):
custom_map_file = match
if not custom_map_file:
logger.warning("%s - map doesn't exist", custom_map)
return custom_map_file
def find_all_custom_maps() -> List[str]:
"""
Ignores maps starting with _
"""
folders = get_search_folders()
maps = []
for folder in folders:
scan_query = path.join(glob.escape(folder), "**", "*.u[pd]k")
for match in glob.iglob(scan_query, recursive=True):
basename = path.basename(match)
if basename.startswith("_"):
continue
maps.append(basename)
return maps
def get_search_folders() -> List[str]:
"""Get all folders to search for maps"""
bot_folders_setting = load_settings().value(BOT_FOLDER_SETTINGS_KEY, type=dict)
folders = {}
if "folders" in bot_folders_setting:
folders = bot_folders_setting["folders"]
return [k for k, v in folders.items() if v['visible']]
def identify_map_directory(launcher_pref: RocketLeagueLauncherPreference):
"""Find RocketLeague map directory"""
final_path = None
if launcher_pref.preferred_launcher == RocketLeagueLauncherPreference.STEAM:
steam = try_get_steam_executable_path()
suffix = r"steamapps\common\rocketleague\TAGame\CookedPCConsole"
if not steam:
return None
# TODO: Steam can install RL on a different disk. Need to
# read libraryfolders.vdf to detect this situation
# It's a human-readable but custom format so not trivial to parse
final_path = path.join(path.dirname(steam), suffix)
else:
rl_executable = locate_epic_games_launcher_rocket_league_binary()
suffix = r"TAGame\CookedPCConsole"
if not rl_executable:
return None
# Binaries/Win64/ is what we want to strip off
final_path = path.join(path.dirname(rl_executable), "..", "..", suffix)
if not path.exists(final_path):
logger.warning("%s - directory doesn't exist", final_path)
return None
return final_path | /rlbot_gui-0.0.140-py3-none-any.whl/rlbot_gui/match_runner/custom_maps.py | 0.54698 | 0.201145 | custom_maps.py | pypi |
import platform
import random
import time
from datetime import datetime
from multiprocessing import Queue as MPQueue
from traceback import print_exc
from typing import Tuple
from rlbot.matchconfig.match_config import MatchConfig, MutatorConfig
from rlbot.parsing.match_settings_config_parser import (game_mode_types,
match_length_types)
from rlbot.setup_manager import RocketLeagueLauncherPreference, SetupManager
from rlbot.utils.game_state_util import CarState, GameState
from rlbot.utils.structures.game_data_struct import GameTickPacket
from .start_match_util import start_match_wrapper
WITNESS_ID = random.randint(0, 1e5)
RENDERING_GROUP = "STORY"
DEBUG_MODE_SHORT_GAMES = False
def setup_failure_freeplay(setup_manager: SetupManager, message: str, color_key="red"):
setup_manager.shut_down()
match_config = MatchConfig()
match_config.game_mode = game_mode_types[0]
match_config.game_map = "BeckwithPark"
match_config.enable_rendering = True
mutators = MutatorConfig()
mutators.match_length = match_length_types[3]
match_config.mutators = mutators
match_config.player_configs = []
setup_manager.load_match_config(match_config)
setup_manager.start_match()
# wait till num players is 0
wait_till_cars_spawned(setup_manager, 0)
color = getattr(setup_manager.game_interface.renderer, color_key)()
setup_manager.game_interface.renderer.begin_rendering(RENDERING_GROUP)
# setup_manager.game_interface.renderer.draw_rect_2d(20, 20, 800, 800, True, setup_manager.game_interface.renderer.black())
setup_manager.game_interface.renderer.draw_string_2d(20, 200, 4, 4, message, color)
setup_manager.game_interface.renderer.end_rendering()
def packet_to_game_results(game_tick_packet: GameTickPacket):
"""Take the final game_tick_packet and
returns the info related to the final game results
"""
players = game_tick_packet.game_cars
human_player = next(p for p in players if not p.is_bot)
player_stats = [
{
"name": p.name,
"team": p.team,
# these are always 0, so we don't add them
# "spawn_id": p.spawn_id,
# "score": p.score_info.score,
# "goals": p.score_info.goals,
# "own_goals": p.score_info.own_goals,
# "assists": p.score_info.assists,
# "saves": p.score_info.saves,
# "shots": p.score_info.shots,
# "demolitions": p.score_info.demolitions
}
for p in players
if p.name
]
if platform.system() == "Windows":
# team_index = gamePacket.teams[i].team_index
# new_score = gamePacket.teams[i].score
scores_sorted = [
{"team_index": t.team_index, "score": t.score} for t in game_tick_packet.teams
]
else:
# gotta love them bugs! juicy!!!
# team_index = gamePacket.teams[i].score - 1
# new_score = gamePacket.teams[i].team_index
scores_sorted = [
{"team_index": t.score - 1, "score": t.team_index} for t in game_tick_packet.teams
]
scores_sorted.sort(key=lambda x: x["score"], reverse=True)
human_won = scores_sorted[0]["team_index"] == human_player.team
return {
"human_team": human_player.team,
"score": scores_sorted, # [{team_index, score}]
"stats": player_stats,
"human_won": human_won,
"timestamp": datetime.now().isoformat(),
}
def has_user_perma_failed(challenge, manual_stats):
"""
Check if the user has perma-failed the challenge
meaning more time in the game doesn't change the result
"""
if "completionConditions" not in challenge:
return False
failed = False
completionConditions = challenge["completionConditions"]
if "selfDemoCount" in completionConditions:
survived = (
manual_stats["recievedDemos"] <= completionConditions["selfDemoCount"]
)
failed = failed or not survived
return failed
def end_by_mercy(challenge, manual_stats, results):
"""Returns true if the human team is ahead by a lot
and the other challenges have finished"""
challenge_completed = calculate_completion(challenge, manual_stats, results)
mercy_difference = 5
# ignore the team, just look at the differential
score_differential = results["score"][0]["score"] - results["score"][1]["score"]
return score_differential >= mercy_difference and challenge_completed
def calculate_completion(challenge, manual_stats, results):
"""
parse challenge to file completionConditions and evaluate
each.
All conditions are "and"
"""
completed = results["human_won"]
if "completionConditions" not in challenge:
return completed
if has_user_perma_failed(challenge, manual_stats):
return False
completionConditions = challenge["completionConditions"]
if not completionConditions.get("win", True):
# the "win" requirement is explicitly off
completed = True
if "scoreDifference" in completionConditions:
# ignore the team, jsut look at the differential
condition = completionConditions["scoreDifference"]
difference = results["score"][0]["score"] - results["score"][1]["score"]
completed = completed and (difference >= condition)
if "demoAchievedCount" in completionConditions:
achieved = (
manual_stats["opponentRecievedDemos"]
>= completionConditions["demoAchievedCount"]
)
completed = completed and achieved
if "goalsScored" in completionConditions:
achieved = manual_stats["humanGoalsScored"] >= completionConditions["goalsScored"]
completed = completed and achieved
return completed
class ManualStatsTracker:
def __init__(self, challenge):
self.stats = {
"recievedDemos": 0, # how many times the human got demo'd
"opponentRecievedDemos": 0, # how many times the opponents were demo'd
"humanGoalsScored": 0,
}
self._challenge = challenge
self._player_count = challenge["humanTeamSize"] + len(challenge["opponentBots"])
# helper to find discrete demo events
self._in_demo_state = [False] * self._player_count
# helper to find who scored!
self._last_touch_by_team = [None, None]
self._last_score_by_team = [0, 0]
def updateStats(self, gamePacket: GameTickPacket):
"""
Update and track stats based on the game packet
"""
# keep track of demos
for i in range(len(self._in_demo_state)):
cur_player = gamePacket.game_cars[i]
if self._in_demo_state[i]: # we will toggle this if we have respawned
self._in_demo_state[i] = cur_player.is_demolished
elif cur_player.is_demolished:
print("SOMEONE GOT DEMO'd")
self._in_demo_state[i] = True
if not gamePacket.game_cars[i].is_bot:
self.stats["recievedDemos"] += 1
elif i >= self._challenge["humanTeamSize"]:
# its an opponent bot
self.stats["opponentRecievedDemos"] += 1
touch = gamePacket.game_ball.latest_touch
team = touch.team
self._last_touch_by_team[team] = touch
for i in range(2): # iterate of [{team_index, score}]
if platform.system() == "Windows":
team_index = gamePacket.teams[i].team_index
new_score = gamePacket.teams[i].score
else:
# gotta love them bugs! juicy!!!
team_index = gamePacket.teams[i].score - 1
new_score = gamePacket.teams[i].team_index
if new_score != self._last_score_by_team[team_index]:
self._last_score_by_team[team_index] = new_score
if self._last_touch_by_team[team_index] is not None:
last_touch_player = self._last_touch_by_team[team_index].player_index
last_touch_player_name = self._last_touch_by_team[team_index].player_name
if not gamePacket.game_cars[last_touch_player].is_bot and last_touch_player_name != "":
self.stats["humanGoalsScored"] += 1
print("humanGoalsScored")
def wait_till_cars_spawned(
setup_manager: SetupManager, expected_player_count: int
) -> GameTickPacket:
packet = GameTickPacket()
setup_manager.game_interface.fresh_live_data_packet(packet, 1000, WITNESS_ID)
waiting_start = time.monotonic()
while packet.num_cars != expected_player_count and time.monotonic() - waiting_start < 5:
print("Game started but no cars are in the packets")
time.sleep(0.5)
setup_manager.game_interface.fresh_live_data_packet(packet, 1000, WITNESS_ID)
return packet
def manage_game_state(
challenge: dict, upgrades: dict, setup_manager: SetupManager
) -> Tuple[bool, dict]:
"""
Continuously track the game and adjust state to respect challenge rules and
upgrades.
At the end of the game, calculate results and the challenge completion
and return that
"""
early_failure = False, None
expected_player_count = challenge["humanTeamSize"] + len(challenge["opponentBots"])
# Wait for everything to be initialized
packet = wait_till_cars_spawned(setup_manager, expected_player_count)
if packet.num_cars == 0:
print("The game was initialized with no cars")
return early_failure
tick_rate = 120
results = None
max_boost = 0
if "boost-100" in upgrades:
max_boost = 100
elif "boost-33" in upgrades:
max_boost = 33
half_field = challenge.get("limitations", []).count("half-field") > 0
stats_tracker = ManualStatsTracker(challenge)
last_boost_bump_time = time.monotonic()
while True:
try:
packet = GameTickPacket()
setup_manager.game_interface.fresh_live_data_packet(
packet, 1000, WITNESS_ID
)
if packet.num_cars == 0:
# User seems to have ended the match
print("User ended the match")
return early_failure
stats_tracker.updateStats(packet)
results = packet_to_game_results(packet)
if has_user_perma_failed(challenge, stats_tracker.stats):
time.sleep(1)
setup_failure_freeplay(setup_manager, "You failed the challenge!")
return early_failure
if end_by_mercy(challenge, stats_tracker.stats, results):
time.sleep(3)
setup_failure_freeplay(setup_manager, "Challenge completed by mercy rule!", "green")
return True, results
human_info = packet.game_cars[0]
game_state = GameState()
human_desired_state = CarState()
game_state.cars = {0: human_desired_state}
changed = False
# adjust boost
if human_info.boost > max_boost and not half_field:
# Adjust boost, unless in heatseeker mode
human_desired_state.boost_amount = max_boost
changed = True
if "boost-recharge" in upgrades:
# increase boost at 10% per second
now = time.monotonic()
if human_info.boost < max_boost and (now - last_boost_bump_time > 0.1):
changed = True
last_boost_bump_time = now
human_desired_state.boost_amount = min(human_info.boost + 1, max_boost)
if changed:
setup_manager.game_interface.set_game_state(game_state)
if packet.game_info.is_match_ended:
break
except KeyError:
print_exc()
# it means that the game was interrupted by the user
print("Looks like the game is in a bad state")
setup_failure_freeplay(setup_manager, "The game was interrupted.")
return early_failure
return calculate_completion(challenge, stats_tracker.stats, results), results
def run_challenge(
setup_manager: SetupManager, match_config: MatchConfig, challenge: dict, upgrades: dict, launcher_pref: RocketLeagueLauncherPreference, out: MPQueue
) -> Tuple[bool, dict]:
"""Launch the game and keep track of the state"""
start_match_wrapper(setup_manager, match_config, launcher_pref, out)
setup_manager.game_interface.renderer.clear_screen(RENDERING_GROUP)
game_results = None
try:
game_results = manage_game_state(challenge, upgrades, setup_manager)
except:
# no matter what happens we gotta continue
print_exc()
print("Something failed with the game. Will proceed with shutdown")
# need to make failure apparent to user
setup_failure_freeplay(setup_manager, "The game failed to continue")
return False, None
return game_results
def add_match_result(save_state, challenge_id: str, challenge_completed: bool, game_results):
"""game_results should be the output of packet_to_game_results.
You have to call it anyways to figure out if the player
completed the challenge so that's why we don't call it again here.
"""
if challenge_id not in save_state["challenges_attempts"]:
# no defaultdict because we serialize the data
save_state["challenges_attempts"][challenge_id] = []
save_state["challenges_attempts"][challenge_id].append(
{"game_results": game_results, "challenge_completed": challenge_completed}
)
if challenge_completed:
index = len(save_state["challenges_attempts"][challenge_id]) - 1
save_state["challenges_completed"][challenge_id] = index
save_state["upgrades"]["currency"] += 2
return save_state | /rlbot_smh-1.0.13.tar.gz/rlbot_smh-1.0.13/src/rlbot_smh/story_mode_util.py | 0.521227 | 0.21713 | story_mode_util.py | pypi |
from math import pi
from rlbot.gateway_util import NetworkingRole
from rlbot.matchconfig.loadout_config import LoadoutConfig
from rlbot.matchconfig.match_config import (MatchConfig, MutatorConfig,
PlayerConfig)
from rlbot.parsing.agent_config_parser import (
BOT_CONFIG_LOADOUT_HEADER, BOT_CONFIG_LOADOUT_ORANGE_HEADER,
BOT_CONFIG_LOADOUT_PAINT_BLUE_HEADER,
BOT_CONFIG_LOADOUT_PAINT_ORANGE_HEADER, create_looks_configurations,
load_bot_appearance)
from rlbot.setup_manager import RocketLeagueLauncherPreference, SetupManager
from rlbot.utils.game_state_util import (BallState, CarState, GameInfoState,
GameState, Physics, Rotator, Vector3)
from rlbot.utils.structures.bot_input_struct import PlayerInput
from rlbot.utils.structures.game_data_struct import GameTickPacket
from rlbot.utils.structures.game_data_struct import Physics as PhysicsGTP
def _physics_to_dict(physics: PhysicsGTP):
return {
'location': {
'x': physics.location.x,
'y': physics.location.y,
'z': physics.location.z
},
'velocity': {
'x': physics.velocity.x,
'y': physics.velocity.y,
'z': physics.velocity.z
},
'angular_velocity': {
'x': physics.angular_velocity.x,
'y': physics.angular_velocity.y,
'z': physics.angular_velocity.z
},
'rotation': {
'pitch': physics.rotation.pitch,
'yaw': physics.rotation.yaw,
'roll': physics.rotation.roll,
},
}
def fetch_game_tick_packet(sm: SetupManager) -> GameTickPacket:
if not sm.has_started:
sm.connect_to_game()
game_tick_packet = GameTickPacket()
sm.game_interface.update_live_data_packet(game_tick_packet)
# Make Rust GameTickPacket as dict
return {
"game_ball": {
"physics": _physics_to_dict(game_tick_packet.game_ball.physics),
},
"game_cars": list({
"team": car.team,
"physics": _physics_to_dict(car.physics),
"boost": car.boost
} for car in game_tick_packet.game_cars[:game_tick_packet.num_cars]),
"game_info": {
"seconds_elapsed": game_tick_packet.game_info.seconds_elapsed,
},
}
def dict_to_game_state(state_dict):
gs = GameState()
if 'ball' in state_dict:
gs.ball = BallState()
if 'physics' in state_dict['ball']:
gs.ball.physics = dict_to_physics(state_dict['ball']['physics'])
if 'cars' in state_dict:
gs.cars = {}
for index, car in state_dict['cars'].items():
car_state = CarState()
if 'physics' in car:
car_state.physics = dict_to_physics(car['physics'])
if 'boost_amount' in car:
car_state.boost_amount = car['boost_amount']
gs.cars[int(index)] = car_state
if 'game_info' in state_dict:
gs.game_info = GameInfoState()
if 'paused' in state_dict['game_info']:
gs.game_info.paused = state_dict['game_info']['paused']
if 'world_gravity_z' in state_dict['game_info']:
gs.game_info.world_gravity_z = state_dict['game_info']['world_gravity_z']
if 'game_speed' in state_dict['game_info']:
gs.game_info.game_speed = state_dict['game_info']['game_speed']
if 'console_commands' in state_dict:
gs.console_commands = state_dict['console_commands']
return gs
def dict_to_physics(physics_dict):
phys = Physics()
if 'location' in physics_dict:
phys.location = dict_to_vec(physics_dict['location'])
if 'velocity' in physics_dict:
phys.velocity = dict_to_vec(physics_dict['velocity'])
if 'angular_velocity' in physics_dict:
phys.angular_velocity = dict_to_vec(physics_dict['angular_velocity'])
if 'rotation' in physics_dict:
phys.rotation = dict_to_rot(physics_dict['rotation'])
return phys
def dict_to_vec(v):
vec = Vector3()
if 'x' in v:
vec.x = v['x']
if 'y' in v:
vec.y = v['y']
if 'z' in v:
vec.z = v['z']
return vec
def dict_to_rot(r):
rot = Rotator()
if 'pitch' in r:
rot.pitch = r['pitch']
if 'yaw' in r:
rot.yaw = r['yaw']
if 'roll' in r:
rot.roll = r['roll']
return rot
def set_game_state(sm: SetupManager, state):
if not sm.has_started:
sm.connect_to_game()
game_state = dict_to_game_state(state)
sm.game_interface.set_game_state(game_state)
def convert_to_looks_config(looks: dict):
looks_config = create_looks_configurations()
def deserialize_category(source: dict, header_name: str):
header = looks_config.get_header(header_name)
for key in header.values.keys():
if key in source:
header.set_value(key, source[key])
deserialize_category(looks['blue'], BOT_CONFIG_LOADOUT_HEADER)
deserialize_category(looks['orange'], BOT_CONFIG_LOADOUT_ORANGE_HEADER)
deserialize_category(looks['blue'], BOT_CONFIG_LOADOUT_PAINT_BLUE_HEADER)
deserialize_category(looks['orange'], BOT_CONFIG_LOADOUT_PAINT_ORANGE_HEADER)
return looks_config
def spawn_car_in_showroom(sm: SetupManager, loadout_config: LoadoutConfig, team: int, showcase_type: str, map_name: str,
launcher_prefs: RocketLeagueLauncherPreference):
match_config = MatchConfig()
match_config.game_mode = 'Soccer'
match_config.game_map = map_name
match_config.instant_start = True
match_config.existing_match_behavior = 'Continue And Spawn'
match_config.networking_role = NetworkingRole.none
match_config.enable_state_setting = True
match_config.skip_replays = True
bot_config = PlayerConfig()
bot_config.bot = True
bot_config.rlbot_controlled = True
bot_config.team = team
bot_config.name = "Showroom"
bot_config.loadout_config = loadout_config
match_config.player_configs = [bot_config]
match_config.mutators = MutatorConfig()
match_config.mutators.boost_amount = 'Unlimited'
match_config.mutators.match_length = 'Unlimited'
if not sm.has_started:
sm.connect_to_game(launcher_preference=launcher_prefs)
sm.load_match_config(match_config)
sm.start_match()
game_state = GameState(
cars={0: CarState(physics=Physics(
location=Vector3(0, 0, 20),
velocity=Vector3(0, 0, 0),
angular_velocity=Vector3(0, 0, 0),
rotation=Rotator(0, 0, 0)
))},
ball=BallState(physics=Physics(
location=Vector3(0, 0, -100),
velocity=Vector3(0, 0, 0),
angular_velocity=Vector3(0, 0, 0)
))
)
player_input = PlayerInput()
team_sign = -1 if team == 0 else 1
if showcase_type == "boost":
player_input.boost = True
player_input.steer = 1
game_state.cars[0].physics.location.y = -1140
game_state.cars[0].physics.velocity.x = 2300
game_state.cars[0].physics.angular_velocity.z = 3.5
elif showcase_type == "throttle":
player_input.throttle = 1
player_input.steer = 0.56
game_state.cars[0].physics.location.y = -1140
game_state.cars[0].physics.velocity.x = 1410
game_state.cars[0].physics.angular_velocity.z = 1.5
elif showcase_type == "back-center-kickoff":
game_state.cars[0].physics.location.y = 4608 * team_sign
game_state.cars[0].physics.rotation.yaw = -0.5 * pi * team_sign
elif showcase_type == "goal-explosion":
game_state.cars[0].physics.location.y = -2000 * team_sign
game_state.cars[0].physics.rotation.yaw = -0.5 * pi * team_sign
game_state.cars[0].physics.velocity.y = -2300 * team_sign
game_state.ball.physics.location = Vector3(0, -3500 * team_sign, 93)
sm.game_interface.update_player_input(player_input, 0)
sm.game_interface.set_game_state(game_state)
def spawn_car_for_viewing(sm: SetupManager, looks: dict, team: int, showcase_type: str, map_name: str, launcher_prefs: RocketLeagueLauncherPreference):
looks_config = convert_to_looks_config(looks)
loadout_config = load_bot_appearance(looks_config, team)
spawn_car_in_showroom(sm, loadout_config, team, showcase_type, map_name, launcher_prefs)
print("Opened showroom!") | /rlbot_smh-1.0.13.tar.gz/rlbot_smh-1.0.13/src/rlbot_smh/showroom_util.py | 0.521959 | 0.282042 | showroom_util.py | pypi |
import datetime
from typing import TypeVar
import six
from rlbot_action_server import typing_utils
def _deserialize(data, klass):
"""Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if data is None:
return None
if klass in six.integer_types or klass in (float, str, bool):
return _deserialize_primitive(data, klass)
elif klass == object:
return _deserialize_object(data)
elif klass == datetime.date:
return deserialize_date(data)
elif klass == datetime.datetime:
return deserialize_datetime(data)
elif typing_utils.is_generic(klass): # Modified this according to https://github.com/OpenAPITools/openapi-generator/issues/1866
if typing_utils.is_list(klass):
return _deserialize_list(data, klass.__args__[0])
if typing_utils.is_dict(klass):
return _deserialize_dict(data, klass.__args__[1])
elif isinstance(klass, TypeVar): # Invented this myself.
return data
else:
return deserialize_model(data, klass)
def _deserialize_primitive(data, klass):
"""Deserializes to primitive type.
:param data: data to deserialize.
:param klass: class literal.
:return: int, long, float, str, bool.
:rtype: int | long | float | str | bool
"""
try:
value = klass(data)
except UnicodeEncodeError:
value = six.u(data)
except TypeError:
value = data
return value
def _deserialize_object(value):
"""Return a original value.
:return: object.
"""
return value
def deserialize_date(string):
"""Deserializes string to date.
:param string: str.
:type string: str
:return: date.
:rtype: date
"""
try:
from dateutil.parser import parse
return parse(string).date()
except ImportError:
return string
def deserialize_datetime(string):
"""Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:type string: str
:return: datetime.
:rtype: datetime
"""
try:
from dateutil.parser import parse
return parse(string)
except ImportError:
return string
def deserialize_model(data, klass):
"""Deserializes list or dict to model.
:param data: dict, list.
:type data: dict | list
:param klass: class literal.
:return: model object.
"""
instance = klass()
if not instance.swagger_types:
return data
for attr, attr_type in six.iteritems(instance.swagger_types):
if data is not None \
and instance.attribute_map[attr] in data \
and isinstance(data, (list, dict)):
value = data[instance.attribute_map[attr]]
setattr(instance, attr, _deserialize(value, attr_type))
return instance
def _deserialize_list(data, boxed_type):
"""Deserializes a list and its elements.
:param data: list to deserialize.
:type data: list
:param boxed_type: class literal.
:return: deserialized list.
:rtype: list
"""
return [_deserialize(sub_data, boxed_type)
for sub_data in data]
def _deserialize_dict(data, boxed_type):
"""Deserializes a dict and its elements.
:param data: dict to deserialize.
:type data: dict
:param boxed_type: class literal.
:return: deserialized dict.
:rtype: dict
"""
return {k: _deserialize(v, boxed_type)
for k, v in six.iteritems(data)} | /rlbot_twitch_broker_server-1.0.0.tar.gz/rlbot_twitch_broker_server-1.0.0/rlbot_twitch_broker_server/util.py | 0.797675 | 0.376222 | util.py | pypi |
# RLCard: A Toolkit for Reinforcement Learning in Card Games
<img width="500" src="https://dczha.com/files/rlcard/logo.jpg" alt="Logo" />
[](https://github.com/datamllab/rlcard/actions/workflows/python-package.yml)
[](https://badge.fury.io/py/rlcard)
[](https://coveralls.io/github/datamllab/rlcard?branch=master)
[](https://pepy.tech/project/rlcard)
[](https://pepy.tech/project/rlcard)
[](https://opensource.org/licenses/MIT)
[中文文档](README.zh-CN.md)
RLCard is a toolkit for Reinforcement Learning (RL) in card games. It supports multiple card environments with easy-to-use interfaces for implementing various reinforcement learning and searching algorithms. The goal of RLCard is to bridge reinforcement learning and imperfect information games. RLCard is developed by [DATA Lab](http://faculty.cs.tamu.edu/xiahu/) at Rice and Texas A&M University, and community contributors.
* Official Website: [https://www.rlcard.org](https://www.rlcard.org)
* Tutorial in Jupyter Notebook: [https://github.com/datamllab/rlcard-tutorial](https://github.com/datamllab/rlcard-tutorial)
* Paper: [https://arxiv.org/abs/1910.04376](https://arxiv.org/abs/1910.04376)
* GUI: [RLCard-Showdown](https://github.com/datamllab/rlcard-showdown)
* Dou Dizhu Demo: [Demo](https://douzero.org/)
* Resources: [Awesome-Game-AI](https://github.com/datamllab/awesome-game-ai)
* Related Project: [DouZero Project](https://github.com/kwai/DouZero)
* Zhihu: https://zhuanlan.zhihu.com/p/526723604
**Community:**
* **Slack**: Discuss in our [#rlcard-project](https://join.slack.com/t/rlcard/shared_invite/zt-rkvktsaq-xkMwz8BfKupCM6zGhO01xg) slack channel.
* **QQ Group**: Join our QQ group to discuss. Password: rlcardqqgroup
* Group 1: 665647450
* Group 2: 117349516
**News:**
* We have updated the tutorials in Jupyter Notebook to help you walk through RLCard! Please check [RLCard Tutorial](https://github.com/datamllab/rlcard-tutorial).
* All the algorithms can suppport [PettingZoo](https://github.com/PettingZoo-Team/PettingZoo) now. Please check [here](examples/pettingzoo). Thanks the contribtuion from [Yifei Cheng](https://github.com/ycheng517).
* Please follow [DouZero](https://github.com/kwai/DouZero), a strong Dou Dizhu AI and the [ICML 2021 paper](https://arxiv.org/abs/2106.06135). An online demo is available [here](https://douzero.org/). The algorithm is also integrated in RLCard. See [Training DMC on Dou Dizhu](docs/toy-examples.md#training-dmc-on-dou-dizhu).
* Our package is used in [PettingZoo](https://github.com/PettingZoo-Team/PettingZoo). Please check it out!
* We have released RLCard-Showdown, GUI demo for RLCard. Please check out [here](https://github.com/datamllab/rlcard-showdown)!
* Jupyter Notebook tutorial available! We add some examples in R to call Python interfaces of RLCard with reticulate. See [here](docs/toy-examples-r.md)
* Thanks for the contribution of [@Clarit7](https://github.com/Clarit7) for supporting different number of players in Blackjack. We call for contributions for gradually making the games more configurable. See [here](CONTRIBUTING.md#making-configurable-environments) for more details.
* Thanks for the contribution of [@Clarit7](https://github.com/Clarit7) for the Blackjack and Limit Hold'em human interface.
* Now RLCard supports environment local seeding and multiprocessing. Thanks for the testing scripts provided by [@weepingwillowben](https://github.com/weepingwillowben).
* Human interface of NoLimit Holdem available. The action space of NoLimit Holdem has been abstracted. Thanks for the contribution of [@AdrianP-](https://github.com/AdrianP-).
* New game Gin Rummy and human GUI available. Thanks for the contribution of [@billh0420](https://github.com/billh0420).
* PyTorch implementation available. Thanks for the contribution of [@mjudell](https://github.com/mjudell).
## Cite this work
If you find this repo useful, you may cite:
Zha, Daochen, et al. "RLCard: A Platform for Reinforcement Learning in Card Games." IJCAI. 2020.
```bibtex
@inproceedings{zha2020rlcard,
title={RLCard: A Platform for Reinforcement Learning in Card Games},
author={Zha, Daochen and Lai, Kwei-Herng and Huang, Songyi and Cao, Yuanpu and Reddy, Keerthana and Vargas, Juan and Nguyen, Alex and Wei, Ruzhe and Guo, Junyu and Hu, Xia},
booktitle={IJCAI},
year={2020}
}
```
## Installation
Make sure that you have **Python 3.6+** and **pip** installed. We recommend installing the stable version of `rlcard` with `pip`:
```
pip3 install rlcard
```
The default installation will only include the card environments. To use PyTorch implementation of the training algorithms, run
```
pip3 install rlcard[torch]
```
If you are in China and the above command is too slow, you can use the mirror provided by Tsinghua University:
```
pip3 install rlcard -i https://pypi.tuna.tsinghua.edu.cn/simple
```
Alternatively, you can clone the latest version with (if you are in China and Github is slow, you can use the mirror in [Gitee](https://gitee.com/daochenzha/rlcard)):
```
git clone https://github.com/datamllab/rlcard.git
```
or only clone one branch to make it faster:
```
git clone -b master --single-branch --depth=1 https://github.com/datamllab/rlcard.git
```
Then install with
```
cd rlcard
pip3 install -e .
pip3 install -e .[torch]
```
We also provide [**conda** installation method](https://anaconda.org/toubun/rlcard):
```
conda install -c toubun rlcard
```
Conda installation only provides the card environments, you need to manually install Pytorch on your demands.
## Examples
A **short example** is as below.
```python
import rlcard
from rlcard.agents import RandomAgent
env = rlcard.make('blackjack')
env.set_agents([RandomAgent(num_actions=env.num_actions)])
print(env.num_actions) # 2
print(env.num_players) # 1
print(env.state_shape) # [[2]]
print(env.action_shape) # [None]
trajectories, payoffs = env.run()
```
RLCard can be flexibly connected to various algorithms. See the following examples:
* [Playing with random agents](docs/toy-examples.md#playing-with-random-agents)
* [Deep-Q learning on Blackjack](docs/toy-examples.md#deep-q-learning-on-blackjack)
* [Training CFR (chance sampling) on Leduc Hold'em](docs/toy-examples.md#training-cfr-on-leduc-holdem)
* [Having fun with pretrained Leduc model](docs/toy-examples.md#having-fun-with-pretrained-leduc-model)
* [Training DMC on Dou Dizhu](docs/toy-examples.md#training-dmc-on-dou-dizhu)
* [Evaluating Agents](docs/toy-examples.md#evaluating-agents)
* [Training Agents on PettingZoo](examples/pettingzoo)
## Demo
Run `examples/human/leduc_holdem_human.py` to play with the pre-trained Leduc Hold'em model. Leduc Hold'em is a simplified version of Texas Hold'em. Rules can be found [here](docs/games.md#leduc-holdem).
```
>> Leduc Hold'em pre-trained model
>> Start a new game!
>> Agent 1 chooses raise
=============== Community Card ===============
┌─────────┐
│░░░░░░░░░│
│░░░░░░░░░│
│░░░░░░░░░│
│░░░░░░░░░│
│░░░░░░░░░│
│░░░░░░░░░│
│░░░░░░░░░│
└─────────┘
=============== Your Hand ===============
┌─────────┐
│J │
│ │
│ │
│ ♥ │
│ │
│ │
│ J│
└─────────┘
=============== Chips ===============
Yours: +
Agent 1: +++
=========== Actions You Can Choose ===========
0: call, 1: raise, 2: fold
>> You choose action (integer):
```
We also provide a GUI for easy debugging. Please check [here](https://github.com/datamllab/rlcard-showdown/). Some demos:


## Available Environments
We provide a complexity estimation for the games on several aspects. **InfoSet Number:** the number of information sets; **InfoSet Size:** the average number of states in a single information set; **Action Size:** the size of the action space. **Name:** the name that should be passed to `rlcard.make` to create the game environment. We also provide the link to the documentation and the random example.
| Game | InfoSet Number | InfoSet Size | Action Size | Name | Usage |
| :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------: | :---------------: | :---------: | :-------------: | :-----------------------------------------------------------------------------------------: |
| Blackjack ([wiki](https://en.wikipedia.org/wiki/Blackjack), [baike](https://baike.baidu.com/item/21%E7%82%B9/5481683?fr=aladdin)) | 10^3 | 10^1 | 10^0 | blackjack | [doc](docs/games.md#blackjack), [example](examples/run_random.py) |
| Leduc Hold’em ([paper](http://poker.cs.ualberta.ca/publications/UAI05.pdf)) | 10^2 | 10^2 | 10^0 | leduc-holdem | [doc](docs/games.md#leduc-holdem), [example](examples/run_random.py) |
| Limit Texas Hold'em ([wiki](https://en.wikipedia.org/wiki/Texas_hold_%27em), [baike](https://baike.baidu.com/item/%E5%BE%B7%E5%85%8B%E8%90%A8%E6%96%AF%E6%89%91%E5%85%8B/83440?fr=aladdin)) | 10^14 | 10^3 | 10^0 | limit-holdem | [doc](docs/games.md#limit-texas-holdem), [example](examples/run_random.py) |
| Dou Dizhu ([wiki](https://en.wikipedia.org/wiki/Dou_dizhu), [baike](https://baike.baidu.com/item/%E6%96%97%E5%9C%B0%E4%B8%BB/177997?fr=aladdin)) | 10^53 ~ 10^83 | 10^23 | 10^4 | doudizhu | [doc](docs/games.md#dou-dizhu), [example](examples/run_random.py) |
| Mahjong ([wiki](https://en.wikipedia.org/wiki/Competition_Mahjong_scoring_rules), [baike](https://baike.baidu.com/item/%E9%BA%BB%E5%B0%86/215)) | 10^121 | 10^48 | 10^2 | mahjong | [doc](docs/games.md#mahjong), [example](examples/run_random.py) |
| No-limit Texas Hold'em ([wiki](https://en.wikipedia.org/wiki/Texas_hold_%27em), [baike](https://baike.baidu.com/item/%E5%BE%B7%E5%85%8B%E8%90%A8%E6%96%AF%E6%89%91%E5%85%8B/83440?fr=aladdin)) | 10^162 | 10^3 | 10^4 | no-limit-holdem | [doc](docs/games.md#no-limit-texas-holdem), [example](examples/run_random.py) |
| UNO ([wiki](https://en.wikipedia.org/wiki/Uno_\(card_game\)), [baike](https://baike.baidu.com/item/UNO%E7%89%8C/2249587)) | 10^163 | 10^10 | 10^1 | uno | [doc](docs/games.md#uno), [example](examples/run_random.py) |
| Gin Rummy ([wiki](https://en.wikipedia.org/wiki/Gin_rummy), [baike](https://baike.baidu.com/item/%E9%87%91%E6%8B%89%E7%B1%B3/3471710)) | 10^52 | - | - | gin-rummy | [doc](docs/games.md#gin-rummy), [example](examples/run_random.py) |
| Bridge ([wiki](https://en.wikipedia.org/wiki/Bridge), [baike](https://baike.baidu.com/item/%E6%A1%A5%E7%89%8C/332030)) | | - | - | bridge | [doc](docs/games.md#bridge), [example](examples/run_random.py) |
## Supported Algorithms
| Algorithm | example | reference |
| :--------------------------------------: | :-----------------------------------------: | :------------------------------------------------------------------------------------------------------: |
| Deep Monte-Carlo (DMC) | [examples/run\_dmc.py](examples/run_dmc.py) | [[paper]](https://arxiv.org/abs/2106.06135) |
| Deep Q-Learning (DQN) | [examples/run\_rl.py](examples/run_rl.py) | [[paper]](https://arxiv.org/abs/1312.5602) |
| Neural Fictitious Self-Play (NFSP) | [examples/run\_rl.py](examples/run_rl.py) | [[paper]](https://arxiv.org/abs/1603.01121) |
| Counterfactual Regret Minimization (CFR) | [examples/run\_cfr.py](examples/run_cfr.py) | [[paper]](http://papers.nips.cc/paper/3306-regret-minimization-in-games-with-incomplete-information.pdf) |
## Pre-trained and Rule-based Models
We provide a [model zoo](rlcard/models) to serve as the baselines.
| Model | Explanation |
| :--------------------------------------: | :------------------------------------------------------: |
| leduc-holdem-cfr | Pre-trained CFR (chance sampling) model on Leduc Hold'em |
| leduc-holdem-rule-v1 | Rule-based model for Leduc Hold'em, v1 |
| leduc-holdem-rule-v2 | Rule-based model for Leduc Hold'em, v2 |
| uno-rule-v1 | Rule-based model for UNO, v1 |
| limit-holdem-rule-v1 | Rule-based model for Limit Texas Hold'em, v1 |
| doudizhu-rule-v1 | Rule-based model for Dou Dizhu, v1 |
| gin-rummy-novice-rule | Gin Rummy novice rule model |
## API Cheat Sheet
### How to create an environment
You can use the the following interface to make an environment. You may optionally specify some configurations with a dictionary.
* **env = rlcard.make(env_id, config={})**: Make an environment. `env_id` is a string of a environment; `config` is a dictionary that specifies some environment configurations, which are as follows.
* `seed`: Default `None`. Set a environment local random seed for reproducing the results.
* `allow_step_back`: Default `False`. `True` if allowing `step_back` function to traverse backward in the tree.
* Game specific configurations: These fields start with `game_`. Currently, we only support `game_num_players` in Blackjack, .
Once the environemnt is made, we can access some information of the game.
* **env.num_actions**: The number of actions.
* **env.num_players**: The number of players.
* **env.state_shape**: The shape of the state space of the observations.
* **env.action_shape**: The shape of the action features (Dou Dizhu's action can encoded as features)
### What is state in RLCard
State is a Python dictionary. It consists of observation `state['obs']`, legal actions `state['legal_actions']`, raw observation `state['raw_obs']` and raw legal actions `state['raw_legal_actions']`.
### Basic interfaces
The following interfaces provide a basic usage. It is easy to use but it has assumtions on the agent. The agent must follow [agent template](docs/developping-algorithms.md).
* **env.set_agents(agents)**: `agents` is a list of `Agent` object. The length of the list should be equal to the number of the players in the game.
* **env.run(is_training=False)**: Run a complete game and return trajectories and payoffs. The function can be used after the `set_agents` is called. If `is_training` is `True`, it will use `step` function in the agent to play the game. If `is_training` is `False`, `eval_step` will be called instead.
### Advanced interfaces
For advanced usage, the following interfaces allow flexible operations on the game tree. These interfaces do not make any assumtions on the agent.
* **env.reset()**: Initialize a game. Return the state and the first player ID.
* **env.step(action, raw_action=False)**: Take one step in the environment. `action` can be raw action or integer; `raw_action` should be `True` if the action is raw action (string).
* **env.step_back()**: Available only when `allow_step_back` is `True`. Take one step backward. This can be used for algorithms that operate on the game tree, such as CFR (chance sampling).
* **env.is_over()**: Return `True` if the current game is over. Otherewise, return `False`.
* **env.get_player_id()**: Return the Player ID of the current player.
* **env.get_state(player_id)**: Return the state that corresponds to `player_id`.
* **env.get_payoffs()**: In the end of the game, return a list of payoffs for all the players.
* **env.get_perfect_information()**: (Currently only support some of the games) Obtain the perfect information at the current state.
## Library Structure
The purposes of the main modules are listed as below:
* [/examples](examples): Examples of using RLCard.
* [/docs](docs): Documentation of RLCard.
* [/tests](tests): Testing scripts for RLCard.
* [/rlcard/agents](rlcard/agents): Reinforcement learning algorithms and human agents.
* [/rlcard/envs](rlcard/envs): Environment wrappers (state representation, action encoding etc.)
* [/rlcard/games](rlcard/games): Various game engines.
* [/rlcard/models](rlcard/models): Model zoo including pre-trained models and rule models.
## More Documents
For more documentation, please refer to the [Documents](docs/README.md) for general introductions. API documents are available at our [website](http://www.rlcard.org).
## Contributing
Contribution to this project is greatly appreciated! Please create an issue for feedbacks/bugs. If you want to contribute codes, please refer to [Contributing Guide](./CONTRIBUTING.md). If you have any questions, please contact [Daochen Zha](https://github.com/daochenzha) with [daochen.zha@rice.edu](mailto:daochen.zha@rice.edu).
## Acknowledgements
We would like to thank JJ World Network Technology Co.,LTD for the generous support and all the contributions from the community contributors.
| /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/README.md | 0.530966 | 0.953449 | README.md | pypi |
import importlib
class ModelSpec(object):
''' A specification for a particular Model.
'''
def __init__(self, model_id, entry_point=None):
''' Initilize
Args:
model_id (string): the name of the model
entry_point (string): a string that indicates the location of the model class
'''
self.model_id = model_id
mod_name, class_name = entry_point.split(':')
self._entry_point = getattr(importlib.import_module(mod_name), class_name)
def load(self):
''' Instantiates an instance of the model
Returns:
Model (Model): an instance of the Model
'''
model = self._entry_point()
return model
class ModelRegistry(object):
''' Register a model by ID
'''
def __init__(self):
''' Initilize
'''
self.model_specs = {}
def register(self, model_id, entry_point):
''' Register an model
Args:
model_id (string): the name of the model
entry_point (string): a string the indicates the location of the model class
'''
if model_id in self.model_specs:
raise ValueError('Cannot re-register model_id: {}'.format(model_id))
self.model_specs[model_id] = ModelSpec(model_id, entry_point)
def load(self, model_id):
''' Create a model instance
Args:
model_id (string): the name of the model
'''
if model_id not in self.model_specs:
raise ValueError('Cannot find model_id: {}'.format(model_id))
return self.model_specs[model_id].load()
# Have a global registry
model_registry = ModelRegistry()
def register(model_id, entry_point):
''' Register a model
Args:
model_id (string): the name of the model
entry_point (string): a string the indicates the location of the model class
'''
return model_registry.register(model_id, entry_point)
def load(model_id):
''' Create and model instance
Args:
model_id (string): the name of the model
'''
return model_registry.load(model_id) | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/models/registration.py | 0.78016 | 0.273406 | registration.py | pypi |
import numpy as np
import rlcard
from rlcard.models.model import Model
class UNORuleAgentV1(object):
''' UNO Rule agent version 1
'''
def __init__(self):
self.use_raw = True
def step(self, state):
''' Predict the action given raw state. A naive rule. Choose the color
that appears least in the hand from legal actions. Try to keep wild
cards as long as it can.
Args:
state (dict): Raw state from the game
Returns:
action (str): Predicted action
'''
legal_actions = state['raw_legal_actions']
state = state['raw_obs']
if 'draw' in legal_actions:
return 'draw'
hand = state['hand']
# If we have wild-4 simply play it and choose color that appears most in hand
for action in legal_actions:
if action.split('-')[1] == 'wild_draw_4':
color_nums = self.count_colors(self.filter_wild(hand))
action = max(color_nums, key=color_nums.get) + '-wild_draw_4'
return action
# Without wild-4, we randomly choose one
action = np.random.choice(self.filter_wild(legal_actions))
return action
def eval_step(self, state):
''' Step for evaluation. The same to step
'''
return self.step(state), []
@staticmethod
def filter_wild(hand):
''' Filter the wild cards. If all are wild cards, we do not filter
Args:
hand (list): A list of UNO card string
Returns:
filtered_hand (list): A filtered list of UNO string
'''
filtered_hand = []
for card in hand:
if not card[2:6] == 'wild':
filtered_hand.append(card)
if len(filtered_hand) == 0:
filtered_hand = hand
return filtered_hand
@staticmethod
def count_colors(hand):
''' Count the number of cards in each color in hand
Args:
hand (list): A list of UNO card string
Returns:
color_nums (dict): The number cards of each color
'''
color_nums = {}
for card in hand:
color = card[0]
if color not in color_nums:
color_nums[color] = 0
color_nums[color] += 1
return color_nums
class UNORuleModelV1(Model):
''' UNO Rule Model version 1
'''
def __init__(self):
''' Load pretrained model
'''
env = rlcard.make('uno')
rule_agent = UNORuleAgentV1()
self.rule_agents = [rule_agent for _ in range(env.num_players)]
@property
def agents(self):
''' Get a list of agents for each position in a the game
Returns:
agents (list): A list of agents
Note: Each agent should be just like RL agent with step and eval_step
functioning well.
'''
return self.rule_agents
@property
def use_raw(self):
''' Indicate whether use raw state and action
Returns:
use_raw (boolean): True if using raw state and action
'''
return True | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/models/uno_rule_models.py | 0.812123 | 0.358353 | uno_rule_models.py | pypi |
from typing import TYPE_CHECKING
from collections import OrderedDict
if TYPE_CHECKING:
from rlcard.core import Card
from typing import List
import numpy as np
import rlcard
from rlcard.models.model import Model
from rlcard.games.gin_rummy.utils.action_event import *
import rlcard.games.gin_rummy.utils.melding as melding
import rlcard.games.gin_rummy.utils.utils as utils
class GinRummyNoviceRuleAgent(object):
'''
Agent always discards highest deadwood value card
'''
def __init__(self):
self.use_raw = False # FIXME: should this be True ?
@staticmethod
def step(state):
''' Predict the action given the current state.
Novice strategy:
Case where can gin:
Choose one of the gin actions.
Case where can knock:
Choose one of the knock actions.
Case where can discard:
Gin if can. Knock if can.
Otherwise, put aside cards in some best meld cluster.
Choose one of the remaining cards with highest deadwood value.
Discard that card.
Case otherwise:
Choose a random action.
Args:
state (numpy.array): an numpy array that represents the current state
Returns:
action (int): the action predicted
'''
legal_actions = state['legal_actions']
actions = legal_actions.copy()
legal_action_events = [ActionEvent.decode_action(x) for x in legal_actions]
gin_action_events = [x for x in legal_action_events if isinstance(x, GinAction)]
knock_action_events = [x for x in legal_action_events if isinstance(x, KnockAction)]
discard_action_events = [x for x in legal_action_events if isinstance(x, DiscardAction)]
if gin_action_events:
actions = [x.action_id for x in gin_action_events]
elif knock_action_events:
actions = [x.action_id for x in knock_action_events]
elif discard_action_events:
best_discards = GinRummyNoviceRuleAgent._get_best_discards(discard_action_events=discard_action_events,
state=state)
if best_discards:
actions = [DiscardAction(card=card).action_id for card in best_discards]
if type(actions) == OrderedDict:
actions = list(actions.keys())
return np.random.choice(actions)
def eval_step(self, state):
''' Predict the action given the current state for evaluation.
Since the agents is not trained, this function is equivalent to step function.
Args:
state (numpy.array): an numpy array that represents the current state
Returns:
action (int): the action predicted by the agent
probabilities (list): The list of action probabilities
'''
probabilities = []
return self.step(state), probabilities
@staticmethod
def _get_best_discards(discard_action_events, state) -> List[Card]:
best_discards = [] # type: List[Card]
final_deadwood_count = 999
env_hand = state['obs'][0]
hand = utils.decode_cards(env_cards=env_hand)
for discard_action_event in discard_action_events:
discard_card = discard_action_event.card
next_hand = [card for card in hand if card != discard_card]
meld_clusters = melding.get_meld_clusters(hand=next_hand)
deadwood_counts = []
for meld_cluster in meld_clusters:
deadwood_count = utils.get_deadwood_count(hand=next_hand, meld_cluster=meld_cluster)
deadwood_counts.append(deadwood_count)
best_deadwood_count = min(deadwood_counts,
default=utils.get_deadwood_count(hand=next_hand, meld_cluster=[]))
if best_deadwood_count < final_deadwood_count:
final_deadwood_count = best_deadwood_count
best_discards = [discard_card]
elif best_deadwood_count == final_deadwood_count:
best_discards.append(discard_card)
return best_discards
class GinRummyNoviceRuleModel(Model):
''' Gin Rummy Rule Model
'''
def __init__(self):
''' Load pre-trained model
'''
super().__init__()
env = rlcard.make('gin-rummy')
rule_agent = GinRummyNoviceRuleAgent()
self.rule_agents = [rule_agent for _ in range(env.num_players)]
@property
def agents(self):
''' Get a list of agents for each position in a the game
Returns:
agents (list): A list of agents
Note: Each agent should be just like RL agent with step and eval_step
functioning well.
'''
return self.rule_agents | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/models/gin_rummy_rule_models.py | 0.666714 | 0.346154 | gin_rummy_rule_models.py | pypi |
import numpy as np
import rlcard
from rlcard.games.doudizhu.utils import CARD_TYPE, INDEX
from rlcard.models.model import Model
class DouDizhuRuleAgentV1(object):
''' Dou Dizhu Rule agent version 1
'''
def __init__(self):
self.use_raw = True
def step(self, state):
''' Predict the action given raw state. A naive rule.
Args:
state (dict): Raw state from the game
Returns:
action (str): Predicted action
'''
state = state['raw_obs']
trace = state['trace']
# the rule of leading round
if len(trace) == 0 or (len(trace) >= 3 and trace[-1][1] == 'pass' and trace[-2][1] == 'pass'):
comb = self.combine_cards(state['current_hand'])
min_card = state['current_hand'][0]
for _, actions in comb.items():
for action in actions:
if min_card in action:
return action
# the rule of following cards
else:
target = state['trace'][-1][-1]
target_player = state['trace'][-1][0]
if target == 'pass':
target = state['trace'][-2][-1]
target_player = state['trace'][-1][0]
the_type = CARD_TYPE[0][target][0][0]
chosen_action = ''
rank = 1000
for action in state['actions']:
if action != 'pass' and the_type == CARD_TYPE[0][action][0][0]:
if int(CARD_TYPE[0][action][0][1]) < rank:
rank = int(CARD_TYPE[0][action][0][1])
chosen_action = action
if chosen_action != '':
return chosen_action
landlord = state['landlord']
if target_player != landlord and state['self'] != landlord:
return 'pass'
return np.random.choice(state['actions'])
def eval_step(self, state):
''' Step for evaluation. The same to step
'''
return self.step(state), []
def combine_cards(self, hand):
'''Get optimal combinations of cards in hand
'''
comb = {'rocket': [], 'bomb': [], 'trio': [], 'trio_chain': [],
'solo_chain': [], 'pair_chain': [], 'pair': [], 'solo': []}
# 1. pick rocket
if hand[-2:] == 'BR':
comb['rocket'].append('BR')
hand = hand[:-2]
# 2. pick bomb
hand_cp = hand
for index in range(len(hand_cp) - 3):
if hand_cp[index] == hand_cp[index+3]:
bomb = hand_cp[index: index+4]
comb['bomb'].append(bomb)
hand = hand.replace(bomb, '')
# 3. pick trio and trio_chain
hand_cp = hand
for index in range(len(hand_cp) - 2):
if hand_cp[index] == hand_cp[index+2]:
trio = hand_cp[index: index+3]
if len(comb['trio']) > 0 and INDEX[trio[-1]] < 12 and (INDEX[trio[-1]]-1) == INDEX[comb['trio'][-1][-1]]:
comb['trio'][-1] += trio
else:
comb['trio'].append(trio)
hand = hand.replace(trio, '')
only_trio = []
only_trio_chain = []
for trio in comb['trio']:
if len(trio) == 3:
only_trio.append(trio)
else:
only_trio_chain.append(trio)
comb['trio'] = only_trio
comb['trio_chain'] = only_trio_chain
# 4. pick solo chain
hand_list = self.card_str2list(hand)
chains, hand_list = self.pick_chain(hand_list, 1)
comb['solo_chain'] = chains
# 5. pick par_chain
chains, hand_list = self.pick_chain(hand_list, 2)
comb['pair_chain'] = chains
hand = self.list2card_str(hand_list)
# 6. pick pair and solo
index = 0
while index < len(hand) - 1:
if hand[index] == hand[index+1]:
comb['pair'].append(hand[index] + hand[index+1])
index += 2
else:
comb['solo'].append(hand[index])
index += 1
if index == (len(hand) - 1):
comb['solo'].append(hand[index])
return comb
@staticmethod
def card_str2list(hand):
hand_list = [0 for _ in range(15)]
for card in hand:
hand_list[INDEX[card]] += 1
return hand_list
@staticmethod
def list2card_str(hand_list):
card_str = ''
cards = [card for card in INDEX]
for index, count in enumerate(hand_list):
card_str += cards[index] * count
return card_str
@staticmethod
def pick_chain(hand_list, count):
chains = []
str_card = [card for card in INDEX]
hand_list = [str(card) for card in hand_list]
hand = ''.join(hand_list[:12])
chain_list = hand.split('0')
add = 0
for index, chain in enumerate(chain_list):
if len(chain) > 0:
if len(chain) >= 5:
start = index + add
min_count = int(min(chain)) // count
if min_count != 0:
str_chain = ''
for num in range(len(chain)):
str_chain += str_card[start+num]
hand_list[start+num] = int(hand_list[start+num]) - int(min(chain))
for _ in range(min_count):
chains.append(str_chain)
add += len(chain)
hand_list = [int(card) for card in hand_list]
return (chains, hand_list)
class DouDizhuRuleModelV1(Model):
''' Dou Dizhu Rule Model version 1
'''
def __init__(self):
''' Load pretrained model
'''
env = rlcard.make('doudizhu')
rule_agent = DouDizhuRuleAgentV1()
self.rule_agents = [rule_agent for _ in range(env.num_players)]
@property
def agents(self):
''' Get a list of agents for each position in a the game
Returns:
agents (list): A list of agents
Note: Each agent should be just like RL agent with step and eval_step
functioning well.
'''
return self.rule_agents | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/models/doudizhu_rule_models.py | 0.44553 | 0.307189 | doudizhu_rule_models.py | pypi |
import hashlib
import numpy as np
import os
import struct
def colorize(string, color, bold=False, highlight = False):
"""Return string surrounded by appropriate terminal color codes to
print colorized text. Valid colors: gray, red, green, yellow,
blue, magenta, cyan, white, crimson
"""
attr = []
num = color2num[color]
if highlight: num += 10
attr.append(str(num))
if bold: attr.append('1')
attrs = ';'.join(attr)
return '\x1b[%sm%s\x1b[0m' % (attrs, string)
def error(msg, *args):
print(colorize('%s: %s'%('ERROR', msg % args), 'red'))
def np_random(seed=None):
if seed is not None and not (isinstance(seed, int) and 0 <= seed):
raise error.Error('Seed must be a non-negative integer or omitted, not {}'.format(seed))
seed = create_seed(seed)
rng = np.random.RandomState()
rng.seed(_int_list_from_bigint(hash_seed(seed)))
return rng, seed
def hash_seed(seed=None, max_bytes=8):
"""Any given evaluation is likely to have many PRNG's active at
once. (Most commonly, because the environment is running in
multiple processes.) There's literature indicating that having
linear correlations between seeds of multiple PRNG's can correlate
the outputs:
http://blogs.unity3d.com/2015/01/07/a-primer-on-repeatable-random-numbers/
http://stackoverflow.com/questions/1554958/how-different-do-random-seeds-need-to-be
http://dl.acm.org/citation.cfm?id=1276928
Thus, for sanity we hash the seeds before using them. (This scheme
is likely not crypto-strength, but it should be good enough to get
rid of simple correlations.)
Args:
seed (Optional[int]): None seeds from an operating system specific randomness source.
max_bytes: Maximum number of bytes to use in the hashed seed.
"""
if seed is None:
seed = create_seed(max_bytes=max_bytes)
_hash = hashlib.sha512(str(seed).encode('utf8')).digest()
return _bigint_from_bytes(_hash[:max_bytes])
def create_seed(a=None, max_bytes=8):
"""Create a strong random seed. Otherwise, Python 2 would seed using
the system time, which might be non-robust especially in the
presence of concurrency.
Args:
a (Optional[int, str]): None seeds from an operating system specific randomness source.
max_bytes: Maximum number of bytes to use in the seed.
"""
# Adapted from https://svn.python.org/projects/python/tags/r32/Lib/random.py
if a is None:
a = _bigint_from_bytes(os.urandom(max_bytes))
elif isinstance(a, str):
a = a.encode('utf8')
a += hashlib.sha512(a).digest()
a = _bigint_from_bytes(a[:max_bytes])
elif isinstance(a, int):
a = a % 2**(8 * max_bytes)
else:
raise error.Error('Invalid type for seed: {} ({})'.format(type(a), a))
return a
# TODO: don't hardcode sizeof_int here
def _bigint_from_bytes(_bytes):
sizeof_int = 4
padding = sizeof_int - len(_bytes) % sizeof_int
_bytes += b'\0' * padding
int_count = int(len(_bytes) / sizeof_int)
unpacked = struct.unpack("{}I".format(int_count), _bytes)
accum = 0
for i, val in enumerate(unpacked):
accum += 2 ** (sizeof_int * 8 * i) * val
return accum
def _int_list_from_bigint(bigint):
# Special case 0
if bigint < 0:
raise error.Error('Seed must be non-negative, not {}'.format(bigint))
elif bigint == 0:
return [0]
ints = []
while bigint > 0:
bigint, mod = divmod(bigint, 2 ** 32)
ints.append(mod)
return ints | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/utils/seeding.py | 0.643329 | 0.306203 | seeding.py | pypi |
import numpy as np
from rlcard.games.base import Card
def set_seed(seed):
if seed is not None:
import subprocess
import sys
reqs = subprocess.check_output([sys.executable, '-m', 'pip', 'freeze'])
installed_packages = [r.decode().split('==')[0] for r in reqs.split()]
if 'torch' in installed_packages:
import torch
torch.backends.cudnn.deterministic = True
torch.manual_seed(seed)
np.random.seed(seed)
import random
random.seed(seed)
def get_device():
import torch
if torch.cuda.is_available():
device = torch.device("cuda:0")
print("--> Running on the GPU")
else:
device = torch.device("cpu")
print("--> Running on the CPU")
return device
def init_standard_deck():
''' Initialize a standard deck of 52 cards
Returns:
(list): A list of Card object
'''
suit_list = ['S', 'H', 'D', 'C']
rank_list = ['A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K']
res = [Card(suit, rank) for suit in suit_list for rank in rank_list]
return res
def init_54_deck():
''' Initialize a standard deck of 52 cards, BJ and RJ
Returns:
(list): Alist of Card object
'''
suit_list = ['S', 'H', 'D', 'C']
rank_list = ['A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K']
res = [Card(suit, rank) for suit in suit_list for rank in rank_list]
res.append(Card('BJ', ''))
res.append(Card('RJ', ''))
return res
def rank2int(rank):
''' Get the coresponding number of a rank.
Args:
rank(str): rank stored in Card object
Returns:
(int): the number corresponding to the rank
Note:
1. If the input rank is an empty string, the function will return -1.
2. If the input rank is not valid, the function will return None.
'''
if rank == '':
return -1
elif rank.isdigit():
if int(rank) >= 2 and int(rank) <= 10:
return int(rank)
else:
return None
elif rank == 'A':
return 14
elif rank == 'T':
return 10
elif rank == 'J':
return 11
elif rank == 'Q':
return 12
elif rank == 'K':
return 13
return None
def elegent_form(card):
''' Get a elegent form of a card string
Args:
card (string): A card string
Returns:
elegent_card (string): A nice form of card
'''
suits = {'S': '♠', 'H': '♥', 'D': '♦', 'C': '♣','s': '♠', 'h': '♥', 'd': '♦', 'c': '♣' }
rank = '10' if card[1] == 'T' else card[1]
return suits[card[0]] + rank
def print_card(cards):
''' Nicely print a card or list of cards
Args:
card (string or list): The card(s) to be printed
'''
if cards is None:
cards = [None]
if isinstance(cards, str):
cards = [cards]
lines = [[] for _ in range(9)]
for card in cards:
if card is None:
lines[0].append('┌─────────┐')
lines[1].append('│░░░░░░░░░│')
lines[2].append('│░░░░░░░░░│')
lines[3].append('│░░░░░░░░░│')
lines[4].append('│░░░░░░░░░│')
lines[5].append('│░░░░░░░░░│')
lines[6].append('│░░░░░░░░░│')
lines[7].append('│░░░░░░░░░│')
lines[8].append('└─────────┘')
else:
if isinstance(card, Card):
elegent_card = elegent_form(card.suit + card.rank)
else:
elegent_card = elegent_form(card)
suit = elegent_card[0]
rank = elegent_card[1]
if len(elegent_card) == 3:
space = elegent_card[2]
else:
space = ' '
lines[0].append('┌─────────┐')
lines[1].append('│{}{} │'.format(rank, space))
lines[2].append('│ │')
lines[3].append('│ │')
lines[4].append('│ {} │'.format(suit))
lines[5].append('│ │')
lines[6].append('│ │')
lines[7].append('│ {}{}│'.format(space, rank))
lines[8].append('└─────────┘')
for line in lines:
print (' '.join(line))
def reorganize(trajectories, payoffs):
''' Reorganize the trajectory to make it RL friendly
Args:
trajectory (list): A list of trajectories
payoffs (list): A list of payoffs for the players. Each entry corresponds to one player
Returns:
(list): A new trajectories that can be fed into RL algorithms.
'''
num_players = len(trajectories)
new_trajectories = [[] for _ in range(num_players)]
for player in range(num_players):
for i in range(0, len(trajectories[player])-2, 2):
if i ==len(trajectories[player])-3:
reward = payoffs[player]
done =True
else:
reward, done = 0, False
transition = trajectories[player][i:i+3].copy()
transition.insert(2, reward)
transition.append(done)
new_trajectories[player].append(transition)
return new_trajectories
def remove_illegal(action_probs, legal_actions):
''' Remove illegal actions and normalize the
probability vector
Args:
action_probs (numpy.array): A 1 dimention numpy array.
legal_actions (list): A list of indices of legal actions.
Returns:
probd (numpy.array): A normalized vector without legal actions.
'''
probs = np.zeros(action_probs.shape[0])
probs[legal_actions] = action_probs[legal_actions]
if np.sum(probs) == 0:
probs[legal_actions] = 1 / len(legal_actions)
else:
probs /= sum(probs)
return probs
def tournament(env, num):
''' Evaluate he performance of the agents in the environment
Args:
env (Env class): The environment to be evaluated.
num (int): The number of games to play.
Returns:
A list of avrage payoffs for each player
'''
payoffs = [0 for _ in range(env.num_players)]
counter = 0
while counter < num:
_, _payoffs = env.run(is_training=False)
if isinstance(_payoffs, list):
for _p in _payoffs:
for i, _ in enumerate(payoffs):
payoffs[i] += _p[i]
counter += 1
else:
for i, _ in enumerate(payoffs):
payoffs[i] += _payoffs[i]
counter += 1
for i, _ in enumerate(payoffs):
payoffs[i] /= counter
return payoffs
def plot_curve(csv_path, save_path, algorithm):
''' Read data from csv file and plot the results
'''
import os
import csv
import matplotlib.pyplot as plt
with open(csv_path) as csvfile:
reader = csv.DictReader(csvfile)
xs = []
ys = []
for row in reader:
xs.append(int(row['timestep']))
ys.append(float(row['reward']))
fig, ax = plt.subplots()
ax.plot(xs, ys, label=algorithm)
ax.set(xlabel='timestep', ylabel='reward')
ax.legend()
ax.grid()
save_dir = os.path.dirname(save_path)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
fig.savefig(save_path) | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/utils/utils.py | 0.578686 | 0.327507 | utils.py | pypi |
from collections import defaultdict
import numpy as np
def wrap_state(state):
# check if obs is already wrapped
if "obs" in state and "legal_actions" in state and "raw_legal_actions" in state:
return state
wrapped_state = {}
wrapped_state["obs"] = state["observation"]
legal_actions = np.flatnonzero(state["action_mask"])
# the values of legal_actions isn't available so setting them to None
wrapped_state["legal_actions"] = {l: None for l in legal_actions}
# raw_legal_actions isn't available so setting it to legal actions
wrapped_state["raw_legal_actions"] = list(wrapped_state["legal_actions"].keys())
return wrapped_state
def run_game_pettingzoo(env, agents, is_training=False):
env.reset()
trajectories = defaultdict(list)
for agent_name in env.agent_iter():
obs, reward, done, _ = env.last()
trajectories[agent_name].append((obs, reward, done))
if done:
action = None
else:
if is_training:
action = agents[agent_name].step(obs)
else:
action, _ = agents[agent_name].eval_step(obs)
trajectories[agent_name].append(action)
env.step(action)
return trajectories
def reorganize_pettingzoo(trajectories):
''' Reorganize the trajectory to make it RL friendly
Args:
trajectory (list): A list of trajectories
Returns:
(list): A new trajectories that can be fed into RL algorithms.
'''
new_trajectories = defaultdict(list)
for agent_name, trajectory in trajectories.items():
for i in range(0, len(trajectory)-2, 2):
transition = [
trajectory[i][0], # obs,
trajectory[i+1], # action
trajectory[i+2][1], # reward
trajectory[i+2][0], # next_obs
trajectory[i+2][2], # done
]
new_trajectories[agent_name].append(transition)
return new_trajectories
def tournament_pettingzoo(env, agents, num_episodes):
total_rewards = defaultdict(float)
for _ in range(num_episodes):
trajectories = run_game_pettingzoo(env, agents)
trajectories = reorganize_pettingzoo(trajectories)
for agent_name, trajectory in trajectories.items():
reward = sum([t[2] for t in trajectory])
total_rewards[agent_name] += reward
return {k: v / num_episodes for (k, v) in total_rewards.items()} | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/utils/pettingzoo_utils.py | 0.593374 | 0.307267 | pettingzoo_utils.py | pypi |
import numpy as np
from copy import deepcopy
from rlcard.games.mahjong import Dealer
from rlcard.games.mahjong import Player
from rlcard.games.mahjong import Round
from rlcard.games.mahjong import Judger
class MahjongGame:
def __init__(self, allow_step_back=False):
'''Initialize the class MajongGame
'''
self.allow_step_back = allow_step_back
self.np_random = np.random.RandomState()
self.num_players = 4
def init_game(self):
''' Initialilze the game of Mahjong
This version supports two-player Mahjong
Returns:
(tuple): Tuple containing:
(dict): The first state of the game
(int): Current player's id
'''
# Initialize a dealer that can deal cards
self.dealer = Dealer(self.np_random)
# Initialize four players to play the game
self.players = [Player(i, self.np_random) for i in range(self.num_players)]
self.judger = Judger(self.np_random)
self.round = Round(self.judger, self.dealer, self.num_players, self.np_random)
# Deal 13 cards to each player to prepare for the game
for player in self.players:
self.dealer.deal_cards(player, 13)
# Save the hisory for stepping back to the last state.
self.history = []
self.dealer.deal_cards(self.players[self.round.current_player], 1)
state = self.get_state(self.round.current_player)
self.cur_state = state
return state, self.round.current_player
def step(self, action):
''' Get the next state
Args:
action (str): a specific action. (call, raise, fold, or check)
Returns:
(tuple): Tuple containing:
(dict): next player's state
(int): next plater's id
'''
# First snapshot the current state
if self.allow_step_back:
hist_dealer = deepcopy(self.dealer)
hist_round = deepcopy(self.round)
hist_players = deepcopy(self.players)
self.history.append((hist_dealer, hist_players, hist_round))
self.round.proceed_round(self.players, action)
state = self.get_state(self.round.current_player)
self.cur_state = state
return state, self.round.current_player
def step_back(self):
''' Return to the previous state of the game
Returns:
(bool): True if the game steps back successfully
'''
if not self.history:
return False
self.dealer, self.players, self.round = self.history.pop()
return True
def get_state(self, player_id):
''' Return player's state
Args:
player_id (int): player id
Returns:
(dict): The state of the player
'''
state = self.round.get_state(self.players, player_id)
return state
@staticmethod
def get_legal_actions(state):
''' Return the legal actions for current player
Returns:
(list): A list of legal actions
'''
if state['valid_act'] == ['play']:
state['valid_act'] = state['action_cards']
return state['action_cards']
else:
return state['valid_act']
@staticmethod
def get_num_actions():
''' Return the number of applicable actions
Returns:
(int): The number of actions. There are 4 actions (call, raise, check and fold)
'''
return 38
def get_num_players(self):
''' return the number of players in Mahjong
returns:
(int): the number of players in the game
'''
return self.num_players
def get_player_id(self):
''' return the id of current player in Mahjong
returns:
(int): the number of players in the game
'''
return self.round.current_player
def is_over(self):
''' Check if the game is over
Returns:
(boolean): True if the game is over
'''
win, player, _ = self.judger.judge_game(self)
#pile =[sorted([c.get_str() for c in s ]) for s in self.players[player].pile if self.players[player].pile != None]
#cards = sorted([c.get_str() for c in self.players[player].hand])
#count = len(cards) + sum([len(p) for p in pile])
self.winner = player
#print(win, player, players_val)
#print(win, self.round.current_player, player, cards, pile, count)
return win | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/games/mahjong/game.py | 0.612657 | 0.335024 | game.py | pypi |
class MahjongRound:
def __init__(self, judger, dealer, num_players, np_random):
''' Initialize the round class
Args:
judger (object): the object of MahjongJudger
dealer (object): the object of MahjongDealer
num_players (int): the number of players in game
'''
self.np_random = np_random
self.judger = judger
self.dealer = dealer
self.target = None
self.current_player = 0
self.last_player = None
self.num_players = num_players
self.direction = 1
self.played_cards = []
self.is_over = False
self.player_before_act = 0
self.prev_status = None
self.valid_act = False
self.last_cards = []
def proceed_round(self, players, action):
''' Call other Classes's functions to keep one round running
Args:
player (object): object of UnoPlayer
action (str): string of legal action
'''
#hand_len = [len(p.hand) for p in players]
#pile_len = [sum([len([c for c in p]) for p in pp.pile]) for pp in players]
#total_len = [i + j for i, j in zip(hand_len, pile_len)]
if action == 'stand':
(valid_act, player, cards) = self.judger.judge_chow(self.dealer, players, self.last_player)
if valid_act:
self.valid_act = valid_act
self.last_cards = cards
self.last_player = self.current_player
self.current_player = player.player_id
else:
self.last_player = self.current_player
self.current_player = (self.player_before_act + 1) % 4
self.dealer.deal_cards(players[self.current_player], 1)
self.valid_act = False
elif action == 'gong':
players[self.current_player].gong(self.dealer, self.last_cards)
self.last_player = self.current_player
self.valid_act = False
elif action == 'pong':
players[self.current_player].pong(self.dealer, self.last_cards)
self.last_player = self.current_player
self.valid_act = False
elif action == 'chow':
players[self.current_player].chow(self.dealer, self.last_cards)
self.last_player = self.current_player
self.valid_act = False
else: # Play game: Proceed to next player
players[self.current_player].play_card(self.dealer, action)
self.player_before_act = self.current_player
self.last_player = self.current_player
(valid_act, player, cards) = self.judger.judge_pong_gong(self.dealer, players, self.last_player)
if valid_act:
self.valid_act = valid_act
self.last_cards = cards
self.last_player = self.current_player
self.current_player = player.player_id
else:
self.last_player = self.current_player
self.current_player = (self.current_player + 1) % 4
self.dealer.deal_cards(players[self.current_player], 1)
#hand_len = [len(p.hand) for p in players]
#pile_len = [sum([len([c for c in p]) for p in pp.pile]) for pp in players]
#total_len = [i + j for i, j in zip(hand_len, pile_len)]
def get_state(self, players, player_id):
''' Get player's state
Args:
players (list): The list of MahjongPlayer
player_id (int): The id of the player
Return:
state (dict): The information of the state
'''
state = {}
#(valid_act, player, cards) = self.judger.judge_pong_gong(self.dealer, players, self.last_player)
if self.valid_act: # PONG/GONG/CHOW
state['valid_act'] = [self.valid_act, 'stand']
state['table'] = self.dealer.table
state['player'] = self.current_player
state['current_hand'] = players[self.current_player].hand
state['players_pile'] = {p.player_id: p.pile for p in players}
state['action_cards'] = self.last_cards # For doing action (pong, chow, gong)
else: # Regular Play
state['valid_act'] = ['play']
state['table'] = self.dealer.table
state['player'] = self.current_player
state['current_hand'] = players[player_id].hand
state['players_pile'] = {p.player_id: p.pile for p in players}
state['action_cards'] = players[player_id].hand # For doing action (pong, chow, gong)
return state | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/games/mahjong/round.py | 0.559892 | 0.312459 | round.py | pypi |
from termcolor import colored
class UnoCard:
info = {'type': ['number', 'action', 'wild'],
'color': ['r', 'g', 'b', 'y'],
'trait': ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'skip', 'reverse', 'draw_2', 'wild', 'wild_draw_4']
}
def __init__(self, card_type, color, trait):
''' Initialize the class of UnoCard
Args:
card_type (str): The type of card
color (str): The color of card
trait (str): The trait of card
'''
self.type = card_type
self.color = color
self.trait = trait
self.str = self.get_str()
def get_str(self):
''' Get the string representation of card
Return:
(str): The string of card's color and trait
'''
return self.color + '-' + self.trait
@staticmethod
def print_cards(cards, wild_color=False):
''' Print out card in a nice form
Args:
card (str or list): The string form or a list of a UNO card
wild_color (boolean): True if assign collor to wild cards
'''
if isinstance(cards, str):
cards = [cards]
for i, card in enumerate(cards):
if card == 'draw':
trait = 'Draw'
else:
color, trait = card.split('-')
if trait == 'skip':
trait = 'Skip'
elif trait == 'reverse':
trait = 'Reverse'
elif trait == 'draw_2':
trait = 'Draw-2'
elif trait == 'wild':
trait = 'Wild'
elif trait == 'wild_draw_4':
trait = 'Wild-Draw-4'
if trait == 'Draw' or (trait[:4] == 'Wild' and not wild_color):
print(trait, end='')
elif color == 'r':
print(colored(trait, 'red'), end='')
elif color == 'g':
print(colored(trait, 'green'), end='')
elif color == 'b':
print(colored(trait, 'blue'), end='')
elif color == 'y':
print(colored(trait, 'yellow'), end='')
if i < len(cards) - 1:
print(', ', end='') | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/games/uno/card.py | 0.59561 | 0.285795 | card.py | pypi |
from copy import deepcopy
import numpy as np
from rlcard.games.uno import Dealer
from rlcard.games.uno import Player
from rlcard.games.uno import Round
class UnoGame:
# def __init__(self, allow_step_back=False, num_players=2):
def __init__(self, allow_step_back=False):
self.allow_step_back = allow_step_back
self.np_random = np.random.RandomState()
# self.num_players = num_players
# self.payoffs = [0 for _ in range(self.num_players)]
def configure(self, game_config):
''' Specifiy some game specific parameters, such as number of players
'''
self.num_players = game_config['game_num_players']
def init_game(self):
''' Initialize players and state
Returns:
(tuple): Tuple containing:
(dict): The first state in one game
(int): Current player's id
'''
# Initalize payoffs
self.payoffs = [0 for _ in range(self.num_players)]
# Initialize a dealer that can deal cards
self.dealer = Dealer(self.np_random)
# Initialize four players to play the game
self.players = [Player(i, self.np_random) for i in range(self.num_players)]
# Deal 7 cards to each player to prepare for the game
for player in self.players:
self.dealer.deal_cards(player, 7)
# Initialize a Round
self.round = Round(self.dealer, self.num_players, self.np_random)
# flip and perfrom top card
top_card = self.round.flip_top_card()
self.round.perform_top_card(self.players, top_card)
# Save the hisory for stepping back to the last state.
self.history = []
player_id = self.round.current_player
state = self.get_state(player_id)
return state, player_id
def step(self, action):
''' Get the next state
Args:
action (str): A specific action
Returns:
(tuple): Tuple containing:
(dict): next player's state
(int): next plater's id
'''
if self.allow_step_back:
# First snapshot the current state
his_dealer = deepcopy(self.dealer)
his_round = deepcopy(self.round)
his_players = deepcopy(self.players)
self.history.append((his_dealer, his_players, his_round))
self.round.proceed_round(self.players, action)
player_id = self.round.current_player
state = self.get_state(player_id)
return state, player_id
def step_back(self):
''' Return to the previous state of the game
Returns:
(bool): True if the game steps back successfully
'''
if not self.history:
return False
self.dealer, self.players, self.round = self.history.pop()
return True
def get_state(self, player_id):
''' Return player's state
Args:
player_id (int): player id
Returns:
(dict): The state of the player
'''
state = self.round.get_state(self.players, player_id)
state['num_players'] = self.get_num_players()
state['current_player'] = self.round.current_player
return state
def get_payoffs(self):
''' Return the payoffs of the game
Returns:
(list): Each entry corresponds to the payoff of one player
'''
winner = self.round.winner
if winner is not None and len(winner) == 1:
self.payoffs[winner[0]] = 1
self.payoffs[1 - winner[0]] = -1
return self.payoffs
def get_legal_actions(self):
''' Return the legal actions for current player
Returns:
(list): A list of legal actions
'''
return self.round.get_legal_actions(self.players, self.round.current_player)
def get_num_players(self):
''' Return the number of players in Limit Texas Hold'em
Returns:
(int): The number of players in the game
'''
return self.num_players
@staticmethod
def get_num_actions():
''' Return the number of applicable actions
Returns:
(int): The number of actions. There are 61 actions
'''
return 61
def get_player_id(self):
''' Return the current player's id
Returns:
(int): current player's id
'''
return self.round.current_player
def is_over(self):
''' Check if the game is over
Returns:
(boolean): True if the game is over
'''
return self.round.is_over | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/games/uno/game.py | 0.753557 | 0.245797 | game.py | pypi |
import os
import json
import numpy as np
from collections import OrderedDict
import rlcard
from rlcard.games.uno.card import UnoCard as Card
# Read required docs
ROOT_PATH = rlcard.__path__[0]
# a map of abstract action to its index and a list of abstract action
with open(os.path.join(ROOT_PATH, 'games/uno/jsondata/action_space.json'), 'r') as file:
ACTION_SPACE = json.load(file, object_pairs_hook=OrderedDict)
ACTION_LIST = list(ACTION_SPACE.keys())
# a map of color to its index
COLOR_MAP = {'r': 0, 'g': 1, 'b': 2, 'y': 3}
# a map of trait to its index
TRAIT_MAP = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7,
'8': 8, '9': 9, 'skip': 10, 'reverse': 11, 'draw_2': 12,
'wild': 13, 'wild_draw_4': 14}
WILD = ['r-wild', 'g-wild', 'b-wild', 'y-wild']
WILD_DRAW_4 = ['r-wild_draw_4', 'g-wild_draw_4', 'b-wild_draw_4', 'y-wild_draw_4']
def init_deck():
''' Generate uno deck of 108 cards
'''
deck = []
card_info = Card.info
for color in card_info['color']:
# init number cards
for num in card_info['trait'][:10]:
deck.append(Card('number', color, num))
if num != '0':
deck.append(Card('number', color, num))
# init action cards
for action in card_info['trait'][10:13]:
deck.append(Card('action', color, action))
deck.append(Card('action', color, action))
# init wild cards
for wild in card_info['trait'][-2:]:
deck.append(Card('wild', color, wild))
return deck
def cards2list(cards):
''' Get the corresponding string representation of cards
Args:
cards (list): list of UnoCards objects
Returns:
(string): string representation of cards
'''
cards_list = []
for card in cards:
cards_list.append(card.get_str())
return cards_list
def hand2dict(hand):
''' Get the corresponding dict representation of hand
Args:
hand (list): list of string of hand's card
Returns:
(dict): dict of hand
'''
hand_dict = {}
for card in hand:
if card not in hand_dict:
hand_dict[card] = 1
else:
hand_dict[card] += 1
return hand_dict
def encode_hand(plane, hand):
''' Encode hand and represerve it into plane
Args:
plane (array): 3*4*15 numpy array
hand (list): list of string of hand's card
Returns:
(array): 3*4*15 numpy array
'''
# plane = np.zeros((3, 4, 15), dtype=int)
plane[0] = np.ones((4, 15), dtype=int)
hand = hand2dict(hand)
for card, count in hand.items():
card_info = card.split('-')
color = COLOR_MAP[card_info[0]]
trait = TRAIT_MAP[card_info[1]]
if trait >= 13:
if plane[1][0][trait] == 0:
for index in range(4):
plane[0][index][trait] = 0
plane[1][index][trait] = 1
else:
plane[0][color][trait] = 0
plane[count][color][trait] = 1
return plane
def encode_target(plane, target):
''' Encode target and represerve it into plane
Args:
plane (array): 1*4*15 numpy array
target(str): string of target card
Returns:
(array): 1*4*15 numpy array
'''
target_info = target.split('-')
color = COLOR_MAP[target_info[0]]
trait = TRAIT_MAP[target_info[1]]
plane[color][trait] = 1
return plane | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/games/uno/utils.py | 0.455441 | 0.236406 | utils.py | pypi |
from copy import deepcopy, copy
import numpy as np
from rlcard.games.limitholdem import Dealer
from rlcard.games.limitholdem import Player, PlayerStatus
from rlcard.games.limitholdem import Judger
from rlcard.games.limitholdem import Round
class LimitHoldemGame:
def __init__(self, allow_step_back=False, num_players=2):
"""Initialize the class limit holdem game"""
self.allow_step_back = allow_step_back
self.np_random = np.random.RandomState()
# Some configurations of the game
# These arguments can be specified for creating new games
# Small blind and big blind
self.small_blind = 1
self.big_blind = 2 * self.small_blind
# Raise amount and allowed times
self.raise_amount = self.big_blind
self.allowed_raise_num = 4
self.num_players = num_players
# Save betting history
self.history_raise_nums = [0 for _ in range(4)]
self.dealer = None
self.players = None
self.judger = None
self.public_cards = None
self.game_pointer = None
self.round = None
self.round_counter = None
self.history = None
self.history_raises_nums = None
def configure(self, game_config):
"""Specify some game specific parameters, such as number of players"""
self.num_players = game_config['game_num_players']
def init_game(self):
"""
Initialize the game of limit texas holdem
This version supports two-player limit texas holdem
Returns:
(tuple): Tuple containing:
(dict): The first state of the game
(int): Current player's id
"""
# Initialize a dealer that can deal cards
self.dealer = Dealer(self.np_random)
# Initialize two players to play the game
self.players = [Player(i, self.np_random) for i in range(self.num_players)]
# Initialize a judger class which will decide who wins in the end
self.judger = Judger(self.np_random)
# Deal cards to each player to prepare for the first round
for i in range(2 * self.num_players):
self.players[i % self.num_players].hand.append(self.dealer.deal_card())
# Initialize public cards
self.public_cards = []
# Randomly choose a small blind and a big blind
s = self.np_random.randint(0, self.num_players)
b = (s + 1) % self.num_players
self.players[b].in_chips = self.big_blind
self.players[s].in_chips = self.small_blind
# The player next to the big blind plays the first
self.game_pointer = (b + 1) % self.num_players
# Initialize a bidding round, in the first round, the big blind and the small blind needs to
# be passed to the round for processing.
self.round = Round(raise_amount=self.raise_amount,
allowed_raise_num=self.allowed_raise_num,
num_players=self.num_players,
np_random=self.np_random)
self.round.start_new_round(game_pointer=self.game_pointer, raised=[p.in_chips for p in self.players])
# Count the round. There are 4 rounds in each game.
self.round_counter = 0
# Save the history for stepping back to the last state.
self.history = []
state = self.get_state(self.game_pointer)
# Save betting history
self.history_raise_nums = [0 for _ in range(4)]
return state, self.game_pointer
def step(self, action):
"""
Get the next state
Args:
action (str): a specific action. (call, raise, fold, or check)
Returns:
(tuple): Tuple containing:
(dict): next player's state
(int): next player id
"""
if self.allow_step_back:
# First snapshot the current state
r = deepcopy(self.round)
b = self.game_pointer
r_c = self.round_counter
d = deepcopy(self.dealer)
p = deepcopy(self.public_cards)
ps = deepcopy(self.players)
rn = copy(self.history_raise_nums)
self.history.append((r, b, r_c, d, p, ps, rn))
# Then we proceed to the next round
self.game_pointer = self.round.proceed_round(self.players, action)
# Save the current raise num to history
self.history_raise_nums[self.round_counter] = self.round.have_raised
# If a round is over, we deal more public cards
if self.round.is_over():
# For the first round, we deal 3 cards
if self.round_counter == 0:
self.public_cards.append(self.dealer.deal_card())
self.public_cards.append(self.dealer.deal_card())
self.public_cards.append(self.dealer.deal_card())
# For the following rounds, we deal only 1 card
elif self.round_counter <= 2:
self.public_cards.append(self.dealer.deal_card())
# Double the raise amount for the last two rounds
if self.round_counter == 1:
self.round.raise_amount = 2 * self.raise_amount
self.round_counter += 1
self.round.start_new_round(self.game_pointer)
state = self.get_state(self.game_pointer)
return state, self.game_pointer
def step_back(self):
"""
Return to the previous state of the game
Returns:
(bool): True if the game steps back successfully
"""
if len(self.history) > 0:
self.round, self.game_pointer, self.round_counter, self.dealer, self.public_cards, \
self.players, self.history_raises_nums = self.history.pop()
return True
return False
def get_num_players(self):
"""
Return the number of players in limit texas holdem
Returns:
(int): The number of players in the game
"""
return self.num_players
@staticmethod
def get_num_actions():
"""
Return the number of applicable actions
Returns:
(int): The number of actions. There are 4 actions (call, raise, check and fold)
"""
return 4
def get_player_id(self):
"""
Return the current player's id
Returns:
(int): current player's id
"""
return self.game_pointer
def get_state(self, player):
"""
Return player's state
Args:
player (int): player id
Returns:
(dict): The state of the player
"""
chips = [self.players[i].in_chips for i in range(self.num_players)]
legal_actions = self.get_legal_actions()
state = self.players[player].get_state(self.public_cards, chips, legal_actions)
state['raise_nums'] = self.history_raise_nums
return state
def is_over(self):
"""
Check if the game is over
Returns:
(boolean): True if the game is over
"""
alive_players = [1 if p.status in (PlayerStatus.ALIVE, PlayerStatus.ALLIN) else 0 for p in self.players]
# If only one player is alive, the game is over.
if sum(alive_players) == 1:
return True
# If all rounds are finished
if self.round_counter >= 4:
return True
return False
def get_payoffs(self):
"""
Return the payoffs of the game
Returns:
(list): Each entry corresponds to the payoff of one player
"""
hands = [p.hand + self.public_cards if p.status == PlayerStatus.ALIVE else None for p in self.players]
chips_payoffs = self.judger.judge_game(self.players, hands)
payoffs = np.array(chips_payoffs) / self.big_blind
return payoffs
def get_legal_actions(self):
"""
Return the legal actions for current player
Returns:
(list): A list of legal actions
"""
return self.round.get_legal_actions() | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/games/limitholdem/game.py | 0.770767 | 0.287561 | game.py | pypi |
class LimitHoldemRound:
"""Round can call other Classes' functions to keep the game running"""
def __init__(self, raise_amount, allowed_raise_num, num_players, np_random):
"""
Initialize the round class
Args:
raise_amount (int): the raise amount for each raise
allowed_raise_num (int): The number of allowed raise num
num_players (int): The number of players
"""
self.np_random = np_random
self.game_pointer = None
self.raise_amount = raise_amount
self.allowed_raise_num = allowed_raise_num
self.num_players = num_players
# Count the number of raise
self.have_raised = 0
# Count the number without raise
# If every player agree to not raise, the round is over
self.not_raise_num = 0
# Raised amount for each player
self.raised = [0 for _ in range(self.num_players)]
self.player_folded = None
def start_new_round(self, game_pointer, raised=None):
"""
Start a new bidding round
Args:
game_pointer (int): The game_pointer that indicates the next player
raised (list): Initialize the chips for each player
Note: For the first round of the game, we need to setup the big/small blind
"""
self.game_pointer = game_pointer
self.have_raised = 0
self.not_raise_num = 0
if raised:
self.raised = raised
else:
self.raised = [0 for _ in range(self.num_players)]
def proceed_round(self, players, action):
"""
Call other classes functions to keep one round running
Args:
players (list): The list of players that play the game
action (str): An legal action taken by the player
Returns:
(int): The game_pointer that indicates the next player
"""
if action not in self.get_legal_actions():
raise Exception('{} is not legal action. Legal actions: {}'.format(action, self.get_legal_actions()))
if action == 'call':
diff = max(self.raised) - self.raised[self.game_pointer]
self.raised[self.game_pointer] = max(self.raised)
players[self.game_pointer].in_chips += diff
self.not_raise_num += 1
elif action == 'raise':
diff = max(self.raised) - self.raised[self.game_pointer] + self.raise_amount
self.raised[self.game_pointer] = max(self.raised) + self.raise_amount
players[self.game_pointer].in_chips += diff
self.have_raised += 1
self.not_raise_num = 1
elif action == 'fold':
players[self.game_pointer].status = 'folded'
self.player_folded = True
elif action == 'check':
self.not_raise_num += 1
self.game_pointer = (self.game_pointer + 1) % self.num_players
# Skip the folded players
while players[self.game_pointer].status == 'folded':
self.game_pointer = (self.game_pointer + 1) % self.num_players
return self.game_pointer
def get_legal_actions(self):
"""
Obtain the legal actions for the current player
Returns:
(list): A list of legal actions
"""
full_actions = ['call', 'raise', 'fold', 'check']
# If the the number of raises already reaches the maximum number raises, we can not raise any more
if self.have_raised >= self.allowed_raise_num:
full_actions.remove('raise')
# If the current chips are less than that of the highest one in the round, we can not check
if self.raised[self.game_pointer] < max(self.raised):
full_actions.remove('check')
# If the current player has put in the chips that are more than others, we can not call
if self.raised[self.game_pointer] == max(self.raised):
full_actions.remove('call')
return full_actions
def is_over(self):
"""
Check whether the round is over
Returns:
(boolean): True if the current round is over
"""
if self.not_raise_num >= self.num_players:
return True
return False | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/games/limitholdem/round.py | 0.910496 | 0.633609 | round.py | pypi |
from enum import Enum
import numpy as np
from copy import deepcopy
from rlcard.games.limitholdem import Game
from rlcard.games.limitholdem import PlayerStatus
from rlcard.games.nolimitholdem import Dealer
from rlcard.games.nolimitholdem import Player
from rlcard.games.nolimitholdem import Judger
from rlcard.games.nolimitholdem import Round, Action
class Stage(Enum):
PREFLOP = 0
FLOP = 1
TURN = 2
RIVER = 3
END_HIDDEN = 4
SHOWDOWN = 5
class NolimitholdemGame(Game):
def __init__(self, allow_step_back=False, num_players=2):
"""Initialize the class no limit holdem Game"""
super().__init__(allow_step_back, num_players)
self.np_random = np.random.RandomState()
# small blind and big blind
self.small_blind = 1
self.big_blind = 2 * self.small_blind
# config players
self.init_chips = [100] * num_players
# If None, the dealer will be randomly chosen
self.dealer_id = None
def configure(self, game_config):
"""
Specify some game specific parameters, such as number of players, initial chips, and dealer id.
If dealer_id is None, he will be randomly chosen
"""
self.num_players = game_config['game_num_players']
# must have num_players length
self.init_chips = [game_config['chips_for_each']] * game_config["game_num_players"]
self.dealer_id = game_config['dealer_id']
def init_game(self):
"""
Initialize the game of not limit holdem
This version supports two-player no limit texas holdem
Returns:
(tuple): Tuple containing:
(dict): The first state of the game
(int): Current player's id
"""
if self.dealer_id is None:
self.dealer_id = self.np_random.randint(0, self.num_players)
# Initialize a dealer that can deal cards
self.dealer = Dealer(self.np_random)
# Initialize players to play the game
self.players = [Player(i, self.init_chips[i], self.np_random) for i in range(self.num_players)]
# Initialize a judger class which will decide who wins in the end
self.judger = Judger(self.np_random)
# Deal cards to each player to prepare for the first round
for i in range(2 * self.num_players):
self.players[i % self.num_players].hand.append(self.dealer.deal_card())
# Initialize public cards
self.public_cards = []
self.stage = Stage.PREFLOP
# Big blind and small blind
s = (self.dealer_id + 1) % self.num_players
b = (self.dealer_id + 2) % self.num_players
self.players[b].bet(chips=self.big_blind)
self.players[s].bet(chips=self.small_blind)
# The player next to the big blind plays the first
self.game_pointer = (b + 1) % self.num_players
# Initialize a bidding round, in the first round, the big blind and the small blind needs to
# be passed to the round for processing.
self.round = Round(self.num_players, self.big_blind, dealer=self.dealer, np_random=self.np_random)
self.round.start_new_round(game_pointer=self.game_pointer, raised=[p.in_chips for p in self.players])
# Count the round. There are 4 rounds in each game.
self.round_counter = 0
# Save the history for stepping back to the last state.
self.history = []
state = self.get_state(self.game_pointer)
return state, self.game_pointer
def get_legal_actions(self):
"""
Return the legal actions for current player
Returns:
(list): A list of legal actions
"""
return self.round.get_nolimit_legal_actions(players=self.players)
def step(self, action):
"""
Get the next state
Args:
action (str): a specific action. (call, raise, fold, or check)
Returns:
(tuple): Tuple containing:
(dict): next player's state
(int): next player id
"""
if action not in self.get_legal_actions():
print(action, self.get_legal_actions())
print(self.get_state(self.game_pointer))
raise Exception('Action not allowed')
if self.allow_step_back:
# First snapshot the current state
r = deepcopy(self.round)
b = self.game_pointer
r_c = self.round_counter
d = deepcopy(self.dealer)
p = deepcopy(self.public_cards)
ps = deepcopy(self.players)
self.history.append((r, b, r_c, d, p, ps))
# Then we proceed to the next round
self.game_pointer = self.round.proceed_round(self.players, action)
players_in_bypass = [1 if player.status in (PlayerStatus.FOLDED, PlayerStatus.ALLIN) else 0 for player in self.players]
if self.num_players - sum(players_in_bypass) == 1:
last_player = players_in_bypass.index(0)
if self.round.raised[last_player] >= max(self.round.raised):
# If the last player has put enough chips, he is also bypassed
players_in_bypass[last_player] = 1
# If a round is over, we deal more public cards
if self.round.is_over():
# Game pointer goes to the first player not in bypass after the dealer, if there is one
self.game_pointer = (self.dealer_id + 1) % self.num_players
if sum(players_in_bypass) < self.num_players:
while players_in_bypass[self.game_pointer]:
self.game_pointer = (self.game_pointer + 1) % self.num_players
# For the first round, we deal 3 cards
if self.round_counter == 0:
self.stage = Stage.FLOP
self.public_cards.append(self.dealer.deal_card())
self.public_cards.append(self.dealer.deal_card())
self.public_cards.append(self.dealer.deal_card())
if len(self.players) == np.sum(players_in_bypass):
self.round_counter += 1
# For the following rounds, we deal only 1 card
if self.round_counter == 1:
self.stage = Stage.TURN
self.public_cards.append(self.dealer.deal_card())
if len(self.players) == np.sum(players_in_bypass):
self.round_counter += 1
if self.round_counter == 2:
self.stage = Stage.RIVER
self.public_cards.append(self.dealer.deal_card())
if len(self.players) == np.sum(players_in_bypass):
self.round_counter += 1
self.round_counter += 1
self.round.start_new_round(self.game_pointer)
state = self.get_state(self.game_pointer)
return state, self.game_pointer
def get_state(self, player_id):
"""
Return player's state
Args:
player_id (int): player id
Returns:
(dict): The state of the player
"""
self.dealer.pot = np.sum([player.in_chips for player in self.players])
chips = [self.players[i].in_chips for i in range(self.num_players)]
legal_actions = self.get_legal_actions()
state = self.players[player_id].get_state(self.public_cards, chips, legal_actions)
state['stakes'] = [self.players[i].remained_chips for i in range(self.num_players)]
state['current_player'] = self.game_pointer
state['pot'] = self.dealer.pot
state['stage'] = self.stage
return state
def step_back(self):
"""
Return to the previous state of the game
Returns:
(bool): True if the game steps back successfully
"""
if len(self.history) > 0:
self.round, self.game_pointer, self.round_counter, self.dealer, self.public_cards, self.players = self.history.pop()
self.stage = Stage(self.round_counter)
return True
return False
def get_num_players(self):
"""
Return the number of players in no limit texas holdem
Returns:
(int): The number of players in the game
"""
return self.num_players
def get_payoffs(self):
"""
Return the payoffs of the game
Returns:
(list): Each entry corresponds to the payoff of one player
"""
hands = [p.hand + self.public_cards if p.status in (PlayerStatus.ALIVE, PlayerStatus.ALLIN) else None for p in self.players]
chips_payoffs = self.judger.judge_game(self.players, hands)
return chips_payoffs
@staticmethod
def get_num_actions():
"""
Return the number of applicable actions
Returns:
(int): The number of actions. There are 6 actions (call, raise_half_pot, raise_pot, all_in, check and fold)
"""
return len(Action) | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/games/nolimitholdem/game.py | 0.624866 | 0.205615 | game.py | pypi |
"""Implement no limit texas holdem Round class"""
from enum import Enum
from rlcard.games.limitholdem import PlayerStatus
class Action(Enum):
FOLD = 0
CHECK_CALL = 1
#CALL = 2
# RAISE_3BB = 3
RAISE_HALF_POT = 2
RAISE_POT = 3
# RAISE_2POT = 5
ALL_IN = 4
# SMALL_BLIND = 7
# BIG_BLIND = 8
class NolimitholdemRound:
"""Round can call functions from other classes to keep the game running"""
def __init__(self, num_players, init_raise_amount, dealer, np_random):
"""
Initialize the round class
Args:
num_players (int): The number of players
init_raise_amount (int): The min raise amount when every round starts
"""
self.np_random = np_random
self.game_pointer = None
self.num_players = num_players
self.init_raise_amount = init_raise_amount
self.dealer = dealer
# Count the number without raise
# If every player agree to not raise, the round is over
self.not_raise_num = 0
# Count players that are not playing anymore (folded or all-in)
self.not_playing_num = 0
# Raised amount for each player
self.raised = [0 for _ in range(self.num_players)]
def start_new_round(self, game_pointer, raised=None):
"""
Start a new bidding round
Args:
game_pointer (int): The game_pointer that indicates the next player
raised (list): Initialize the chips for each player
Note: For the first round of the game, we need to setup the big/small blind
"""
self.game_pointer = game_pointer
self.not_raise_num = 0
if raised:
self.raised = raised
else:
self.raised = [0 for _ in range(self.num_players)]
def proceed_round(self, players, action):
"""
Call functions from other classes to keep one round running
Args:
players (list): The list of players that play the game
action (str/int): An legal action taken by the player
Returns:
(int): The game_pointer that indicates the next player
"""
player = players[self.game_pointer]
if action == Action.CHECK_CALL:
diff = max(self.raised) - self.raised[self.game_pointer]
self.raised[self.game_pointer] = max(self.raised)
player.bet(chips=diff)
self.not_raise_num += 1
elif action == Action.ALL_IN:
all_in_quantity = player.remained_chips
self.raised[self.game_pointer] = all_in_quantity + self.raised[self.game_pointer]
player.bet(chips=all_in_quantity)
self.not_raise_num = 1
elif action == Action.RAISE_POT:
self.raised[self.game_pointer] += self.dealer.pot
player.bet(chips=self.dealer.pot)
self.not_raise_num = 1
elif action == Action.RAISE_HALF_POT:
quantity = int(self.dealer.pot / 2)
self.raised[self.game_pointer] += quantity
player.bet(chips=quantity)
self.not_raise_num = 1
elif action == Action.FOLD:
player.status = PlayerStatus.FOLDED
if player.remained_chips < 0:
raise Exception("Player in negative stake")
if player.remained_chips == 0 and player.status != PlayerStatus.FOLDED:
player.status = PlayerStatus.ALLIN
self.game_pointer = (self.game_pointer + 1) % self.num_players
if player.status == PlayerStatus.ALLIN:
self.not_playing_num += 1
self.not_raise_num -= 1 # Because already counted in not_playing_num
if player.status == PlayerStatus.FOLDED:
self.not_playing_num += 1
# Skip the folded players
while players[self.game_pointer].status == PlayerStatus.FOLDED:
self.game_pointer = (self.game_pointer + 1) % self.num_players
return self.game_pointer
def get_nolimit_legal_actions(self, players):
"""
Obtain the legal actions for the current player
Args:
players (list): The players in the game
Returns:
(list): A list of legal actions
"""
full_actions = list(Action)
# The player can always check or call
player = players[self.game_pointer]
diff = max(self.raised) - self.raised[self.game_pointer]
# If the current player has no more chips after call, we cannot raise
if diff > 0 and diff >= player.remained_chips:
full_actions.remove(Action.RAISE_HALF_POT)
full_actions.remove(Action.RAISE_POT)
full_actions.remove(Action.ALL_IN)
# Even if we can raise, we have to check remained chips
else:
if self.dealer.pot > player.remained_chips:
full_actions.remove(Action.RAISE_POT)
if int(self.dealer.pot / 2) > player.remained_chips:
full_actions.remove(Action.RAISE_HALF_POT)
# Can't raise if the total raise amount is leq than the max raise amount of this round
# If raise by pot, there is no such concern
if Action.RAISE_HALF_POT in full_actions and \
int(self.dealer.pot / 2) + self.raised[self.game_pointer] <= max(self.raised):
full_actions.remove(Action.RAISE_HALF_POT)
return full_actions
def is_over(self):
"""
Check whether the round is over
Returns:
(boolean): True if the current round is over
"""
if self.not_raise_num + self.not_playing_num >= self.num_players:
return True
return False | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/games/nolimitholdem/round.py | 0.832475 | 0.35782 | round.py | pypi |
''' Implement Doudizhu Game class
'''
import functools
from heapq import merge
import numpy as np
from rlcard.games.doudizhu.utils import cards2str, doudizhu_sort_card, CARD_RANK_STR
from rlcard.games.doudizhu import Player
from rlcard.games.doudizhu import Round
from rlcard.games.doudizhu import Judger
class DoudizhuGame:
''' Provide game APIs for env to run doudizhu and get corresponding state
information.
'''
def __init__(self, allow_step_back=False):
self.allow_step_back = allow_step_back
self.np_random = np.random.RandomState()
self.num_players = 3
def init_game(self):
''' Initialize players and state.
Returns:
dict: first state in one game
int: current player's id
'''
# initialize public variables
self.winner_id = None
self.history = []
# initialize players
self.players = [Player(num, self.np_random)
for num in range(self.num_players)]
# initialize round to deal cards and determine landlord
self.played_cards = [np.zeros((len(CARD_RANK_STR), ), dtype=np.int32)
for _ in range(self.num_players)]
self.round = Round(self.np_random, self.played_cards)
self.round.initiate(self.players)
# initialize judger
self.judger = Judger(self.players, self.np_random)
# get state of first player
player_id = self.round.current_player
self.state = self.get_state(player_id)
return self.state, player_id
def step(self, action):
''' Perform one draw of the game
Args:
action (str): specific action of doudizhu. Eg: '33344'
Returns:
dict: next player's state
int: next player's id
'''
if self.allow_step_back:
# TODO: don't record game.round, game.players, game.judger if allow_step_back not set
pass
# perfrom action
player = self.players[self.round.current_player]
self.round.proceed_round(player, action)
if (action != 'pass'):
self.judger.calc_playable_cards(player)
if self.judger.judge_game(self.players, self.round.current_player):
self.winner_id = self.round.current_player
next_id = (player.player_id+1) % len(self.players)
self.round.current_player = next_id
# get next state
state = self.get_state(next_id)
self.state = state
return state, next_id
def step_back(self):
''' Return to the previous state of the game
Returns:
(bool): True if the game steps back successfully
'''
if not self.round.trace:
return False
#winner_id will be always None no matter step_back from any case
self.winner_id = None
#reverse round
player_id, cards = self.round.step_back(self.players)
#reverse player
if (cards != 'pass'):
self.players[player_id].played_cards = self.round.find_last_played_cards_in_trace(player_id)
self.players[player_id].play_back()
#reverse judger.played_cards if needed
if (cards != 'pass'):
self.judger.restore_playable_cards(player_id)
self.state = self.get_state(self.round.current_player)
return True
def get_state(self, player_id):
''' Return player's state
Args:
player_id (int): player id
Returns:
(dict): The state of the player
'''
player = self.players[player_id]
others_hands = self._get_others_current_hand(player)
num_cards_left = [len(self.players[i].current_hand) for i in range(self.num_players)]
if self.is_over():
actions = []
else:
actions = list(player.available_actions(self.round.greater_player, self.judger))
state = player.get_state(self.round.public, others_hands, num_cards_left, actions)
return state
@staticmethod
def get_num_actions():
''' Return the total number of abstract acitons
Returns:
int: the total number of abstract actions of doudizhu
'''
return 27472
def get_player_id(self):
''' Return current player's id
Returns:
int: current player's id
'''
return self.round.current_player
def get_num_players(self):
''' Return the number of players in doudizhu
Returns:
int: the number of players in doudizhu
'''
return self.num_players
def is_over(self):
''' Judge whether a game is over
Returns:
Bool: True(over) / False(not over)
'''
if self.winner_id is None:
return False
return True
def _get_others_current_hand(self, player):
player_up = self.players[(player.player_id+1) % len(self.players)]
player_down = self.players[(player.player_id-1) % len(self.players)]
others_hand = merge(player_up.current_hand, player_down.current_hand, key=functools.cmp_to_key(doudizhu_sort_card))
return cards2str(others_hand) | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/games/doudizhu/game.py | 0.49048 | 0.171651 | game.py | pypi |
''' Implement Doudizhu Player class
'''
import functools
from rlcard.games.doudizhu.utils import get_gt_cards
from rlcard.games.doudizhu.utils import cards2str, doudizhu_sort_card
class DoudizhuPlayer:
''' Player can store cards in the player's hand and the role,
determine the actions can be made according to the rules,
and can perfrom corresponding action
'''
def __init__(self, player_id, np_random):
''' Give the player an id in one game
Args:
player_id (int): the player_id of a player
Notes:
1. role: A player's temporary role in one game(landlord or peasant)
2. played_cards: The cards played in one round
3. hand: Initial cards
4. _current_hand: The rest of the cards after playing some of them
'''
self.np_random = np_random
self.player_id = player_id
self.initial_hand = None
self._current_hand = []
self.role = ''
self.played_cards = None
self.singles = '3456789TJQKA2BR'
#record cards removed from self._current_hand for each play()
# and restore cards back to self._current_hand when play_back()
self._recorded_played_cards = []
@property
def current_hand(self):
return self._current_hand
def set_current_hand(self, value):
self._current_hand = value
def get_state(self, public, others_hands, num_cards_left, actions):
state = {}
state['seen_cards'] = public['seen_cards']
state['landlord'] = public['landlord']
state['trace'] = public['trace'].copy()
state['played_cards'] = public['played_cards']
state['self'] = self.player_id
state['current_hand'] = cards2str(self._current_hand)
state['others_hand'] = others_hands
state['num_cards_left'] = num_cards_left
state['actions'] = actions
return state
def available_actions(self, greater_player=None, judger=None):
''' Get the actions can be made based on the rules
Args:
greater_player (DoudizhuPlayer object): player who played
current biggest cards.
judger (DoudizhuJudger object): object of DoudizhuJudger
Returns:
list: list of string of actions. Eg: ['pass', '8', '9', 'T', 'J']
'''
actions = []
if greater_player is None or greater_player.player_id == self.player_id:
actions = judger.get_playable_cards(self)
else:
actions = get_gt_cards(self, greater_player)
return actions
def play(self, action, greater_player=None):
''' Perfrom action
Args:
action (string): specific action
greater_player (DoudizhuPlayer object): The player who played current biggest cards.
Returns:
object of DoudizhuPlayer: If there is a new greater_player, return it, if not, return None
'''
trans = {'B': 'BJ', 'R': 'RJ'}
if action == 'pass':
self._recorded_played_cards.append([])
return greater_player
else:
removed_cards = []
self.played_cards = action
for play_card in action:
if play_card in trans:
play_card = trans[play_card]
for _, remain_card in enumerate(self._current_hand):
if remain_card.rank != '':
remain_card = remain_card.rank
else:
remain_card = remain_card.suit
if play_card == remain_card:
removed_cards.append(self.current_hand[_])
self._current_hand.remove(self._current_hand[_])
break
self._recorded_played_cards.append(removed_cards)
return self
def play_back(self):
''' Restore recorded cards back to self._current_hand
'''
removed_cards = self._recorded_played_cards.pop()
self._current_hand.extend(removed_cards)
self._current_hand.sort(key=functools.cmp_to_key(doudizhu_sort_card)) | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/games/doudizhu/player.py | 0.646237 | 0.224363 | player.py | pypi |
import numpy as np
from .player import GinRummyPlayer
from .round import GinRummyRound
from .judge import GinRummyJudge
from .utils.settings import Settings, DealerForRound
from .utils.action_event import *
class GinRummyGame:
''' Game class. This class will interact with outer environment.
'''
def __init__(self, allow_step_back=False):
'''Initialize the class GinRummyGame
'''
self.allow_step_back = allow_step_back
self.np_random = np.random.RandomState()
self.judge = GinRummyJudge(game=self)
self.settings = Settings()
self.actions = None # type: List[ActionEvent] or None # must reset in init_game
self.round = None # round: GinRummyRound or None, must reset in init_game
self.num_players = 2
def init_game(self):
''' Initialize all characters in the game and start round 1
'''
dealer_id = self.np_random.choice([0, 1])
if self.settings.dealer_for_round == DealerForRound.North:
dealer_id = 0
elif self.settings.dealer_for_round == DealerForRound.South:
dealer_id = 1
self.actions = []
self.round = GinRummyRound(dealer_id=dealer_id, np_random=self.np_random)
for i in range(2):
num = 11 if i == 0 else 10
player = self.round.players[(dealer_id + 1 + i) % 2]
self.round.dealer.deal_cards(player=player, num=num)
current_player_id = self.round.current_player_id
state = self.get_state(player_id=current_player_id)
return state, current_player_id
def step(self, action: ActionEvent):
''' Perform game action and return next player number, and the state for next player
'''
if isinstance(action, ScoreNorthPlayerAction):
self.round.score_player_0(action)
elif isinstance(action, ScoreSouthPlayerAction):
self.round.score_player_1(action)
elif isinstance(action, DrawCardAction):
self.round.draw_card(action)
elif isinstance(action, PickUpDiscardAction):
self.round.pick_up_discard(action)
elif isinstance(action, DeclareDeadHandAction):
self.round.declare_dead_hand(action)
elif isinstance(action, GinAction):
self.round.gin(action, going_out_deadwood_count=self.settings.going_out_deadwood_count)
elif isinstance(action, DiscardAction):
self.round.discard(action)
elif isinstance(action, KnockAction):
self.round.knock(action)
else:
raise Exception('Unknown step action={}'.format(action))
self.actions.append(action)
next_player_id = self.round.current_player_id
next_state = self.get_state(player_id=next_player_id)
return next_state, next_player_id
def step_back(self):
''' Takes one step backward and restore to the last state
'''
raise NotImplementedError
def get_num_players(self):
''' Return the number of players in the game
'''
return 2
def get_num_actions(self):
''' Return the number of possible actions in the game
'''
return ActionEvent.get_num_actions()
def get_player_id(self):
''' Return the current player that will take actions soon
'''
return self.round.current_player_id
def is_over(self):
''' Return whether the current game is over
'''
return self.round.is_over
def get_current_player(self) -> GinRummyPlayer or None:
return self.round.get_current_player()
def get_last_action(self) -> ActionEvent or None:
return None if len(self.actions) == 0 else self.actions[-1]
def get_state(self, player_id: int):
''' Get player's state
Return:
state (dict): The information of the state
'''
state = {}
if not self.is_over():
discard_pile = self.round.dealer.discard_pile
top_discard = [] if not discard_pile else [discard_pile[-1]]
dead_cards = discard_pile[:-1]
last_action = self.get_last_action()
opponent_id = (player_id + 1) % 2
opponent = self.round.players[opponent_id]
known_cards = opponent.known_cards
if isinstance(last_action, ScoreNorthPlayerAction) or isinstance(last_action, ScoreSouthPlayerAction):
known_cards = opponent.hand
unknown_cards = self.round.dealer.stock_pile + [card for card in opponent.hand if card not in known_cards]
state['player_id'] = self.round.current_player_id
state['hand'] = [x.get_index() for x in self.round.players[self.round.current_player_id].hand]
state['top_discard'] = [x.get_index() for x in top_discard]
state['dead_cards'] = [x.get_index() for x in dead_cards]
state['opponent_known_cards'] = [x.get_index() for x in known_cards]
state['unknown_cards'] = [x.get_index() for x in unknown_cards]
return state
@staticmethod
def decode_action(action_id) -> ActionEvent: # FIXME 200213 should return str
''' Action id -> the action_event in the game.
Args:
action_id (int): the id of the action
Returns:
action (ActionEvent): the action that will be passed to the game engine.
'''
return ActionEvent.decode_action(action_id=action_id) | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/games/gin_rummy/game.py | 0.534855 | 0.18717 | game.py | pypi |
from typing import List
from rlcard.games.base import Card
from .utils import utils
from .utils import melding
class GinRummyPlayer:
def __init__(self, player_id: int, np_random):
''' Initialize a GinRummy player class
Args:
player_id (int): id for the player
'''
self.np_random = np_random
self.player_id = player_id
self.hand = [] # type: List[Card]
self.known_cards = [] # type: List[Card] # opponent knows cards picked up by player and not yet discarded
# memoization for speed
self.meld_kinds_by_rank_id = [[] for _ in range(13)] # type: List[List[List[Card]]]
self.meld_run_by_suit_id = [[] for _ in range(4)] # type: List[List[List[Card]]]
def get_player_id(self) -> int:
''' Return player's id
'''
return self.player_id
def get_meld_clusters(self) -> List[List[List[Card]]]:
result = [] # type: List[List[List[Card]]]
all_run_melds = [frozenset(meld_kind) for meld_kinds in self.meld_kinds_by_rank_id for meld_kind in meld_kinds]
all_set_melds = [frozenset(meld_run) for meld_runs in self.meld_run_by_suit_id for meld_run in meld_runs]
all_melds = all_run_melds + all_set_melds
all_melds_count = len(all_melds)
for i in range(0, all_melds_count):
first_meld = all_melds[i]
first_meld_list = list(first_meld)
meld_cluster_1 = [first_meld_list]
result.append(meld_cluster_1)
for j in range(i + 1, all_melds_count):
second_meld = all_melds[j]
second_meld_list = list(second_meld)
if not second_meld.isdisjoint(first_meld):
continue
meld_cluster_2 = [first_meld_list, second_meld_list]
result.append(meld_cluster_2)
for k in range(j + 1, all_melds_count):
third_meld = all_melds[k]
third_meld_list = list(third_meld)
if not third_meld.isdisjoint(first_meld) or not third_meld.isdisjoint(second_meld):
continue
meld_cluster_3 = [first_meld_list, second_meld_list, third_meld_list]
result.append(meld_cluster_3)
return result
def did_populate_hand(self):
self.meld_kinds_by_rank_id = [[] for _ in range(13)]
self.meld_run_by_suit_id = [[] for _ in range(4)]
all_set_melds = melding.get_all_set_melds(hand=self.hand)
for set_meld in all_set_melds:
rank_id = utils.get_rank_id(set_meld[0])
self.meld_kinds_by_rank_id[rank_id].append(set_meld)
all_run_melds = melding.get_all_run_melds(hand=self.hand)
for run_meld in all_run_melds:
suit_id = utils.get_suit_id(run_meld[0])
self.meld_run_by_suit_id[suit_id].append(run_meld)
def add_card_to_hand(self, card: Card):
self.hand.append(card)
self._increase_meld_kinds_by_rank_id(card=card)
self._increase_run_kinds_by_suit_id(card=card)
def remove_card_from_hand(self, card: Card):
self.hand.remove(card)
self._reduce_meld_kinds_by_rank_id(card=card)
self._reduce_run_kinds_by_suit_id(card=card)
def __str__(self):
return "N" if self.player_id == 0 else "S"
@staticmethod
def short_name_of(player_id: int) -> str:
return "N" if player_id == 0 else "S"
@staticmethod
def opponent_id_of(player_id: int) -> int:
return (player_id + 1) % 2
# private methods
def _increase_meld_kinds_by_rank_id(self, card: Card):
rank_id = utils.get_rank_id(card)
meld_kinds = self.meld_kinds_by_rank_id[rank_id]
if len(meld_kinds) == 0:
card_rank = card.rank
meld_kind = [card for card in self.hand if card.rank == card_rank]
if len(meld_kind) >= 3:
self.meld_kinds_by_rank_id[rank_id].append(meld_kind)
else: # must have all cards of given rank
suits = ['S', 'H', 'D', 'C']
max_kind_meld = [Card(suit, card.rank) for suit in suits]
self.meld_kinds_by_rank_id[rank_id] = [max_kind_meld]
for meld_card in max_kind_meld:
self.meld_kinds_by_rank_id[rank_id].append([card for card in max_kind_meld if card != meld_card])
def _reduce_meld_kinds_by_rank_id(self, card: Card):
rank_id = utils.get_rank_id(card)
meld_kinds = self.meld_kinds_by_rank_id[rank_id]
if len(meld_kinds) > 1:
suits = ['S', 'H', 'D', 'C']
self.meld_kinds_by_rank_id[rank_id] = [[Card(suit, card.rank) for suit in suits if suit != card.suit]]
else:
self.meld_kinds_by_rank_id[rank_id] = []
def _increase_run_kinds_by_suit_id(self, card: Card):
suit_id = utils.get_suit_id(card=card)
self.meld_run_by_suit_id[suit_id] = melding.get_all_run_melds_for_suit(cards=self.hand, suit=card.suit)
def _reduce_run_kinds_by_suit_id(self, card: Card):
suit_id = utils.get_suit_id(card=card)
meld_runs = self.meld_run_by_suit_id[suit_id]
self.meld_run_by_suit_id[suit_id] = [meld_run for meld_run in meld_runs if card not in meld_run] | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/games/gin_rummy/player.py | 0.691289 | 0.17515 | player.py | pypi |
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .utils.move import GinRummyMove
from typing import List
from rlcard.games.gin_rummy.dealer import GinRummyDealer
from .utils.action_event import DrawCardAction, PickUpDiscardAction, DeclareDeadHandAction
from .utils.action_event import DiscardAction, KnockAction, GinAction
from .utils.action_event import ScoreNorthPlayerAction, ScoreSouthPlayerAction
from .utils.move import DealHandMove
from .utils.move import DrawCardMove, PickupDiscardMove, DeclareDeadHandMove
from .utils.move import DiscardMove, KnockMove, GinMove
from .utils.move import ScoreNorthMove, ScoreSouthMove
from .utils.gin_rummy_error import GinRummyProgramError
from .player import GinRummyPlayer
from . import judge
from rlcard.games.gin_rummy.utils import melding
from rlcard.games.gin_rummy.utils import utils
class GinRummyRound:
def __init__(self, dealer_id: int, np_random):
''' Initialize the round class
The round class maintains the following instances:
1) dealer: the dealer of the round; dealer has stock_pile and discard_pile
2) players: the players in the round; each player has his own hand_pile
3) current_player_id: the id of the current player who has the move
4) is_over: true if the round is over
5) going_out_action: knock or gin or None
6) going_out_player_id: id of player who went out or None
7) move_sheet: history of the moves of the player (including the deal_hand_move)
The round class maintains a list of moves made by the players in self.move_sheet.
move_sheet is similar to a chess score sheet.
I didn't want to call it a score_sheet since it is not keeping score.
I could have called move_sheet just moves, but that might conflict with the name moves used elsewhere.
I settled on the longer name "move_sheet" to indicate that it is the official list of moves being made.
Args:
dealer_id: int
'''
self.np_random = np_random
self.dealer_id = dealer_id
self.dealer = GinRummyDealer(self.np_random)
self.players = [GinRummyPlayer(player_id=0, np_random=self.np_random), GinRummyPlayer(player_id=1, np_random=self.np_random)]
self.current_player_id = (dealer_id + 1) % 2
self.is_over = False
self.going_out_action = None # going_out_action: int or None
self.going_out_player_id = None # going_out_player_id: int or None
self.move_sheet = [] # type: List[GinRummyMove]
player_dealing = GinRummyPlayer(player_id=dealer_id, np_random=self.np_random)
shuffled_deck = self.dealer.shuffled_deck
self.move_sheet.append(DealHandMove(player_dealing=player_dealing, shuffled_deck=shuffled_deck))
def get_current_player(self) -> GinRummyPlayer or None:
current_player_id = self.current_player_id
return None if current_player_id is None else self.players[current_player_id]
def draw_card(self, action: DrawCardAction):
# when current_player takes DrawCardAction step, the move is recorded and executed
# current_player keeps turn
current_player = self.players[self.current_player_id]
if not len(current_player.hand) == 10:
raise GinRummyProgramError("len(current_player.hand) is {}: should be 10.".format(len(current_player.hand)))
card = self.dealer.stock_pile.pop()
self.move_sheet.append(DrawCardMove(current_player, action=action, card=card))
current_player.add_card_to_hand(card=card)
def pick_up_discard(self, action: PickUpDiscardAction):
# when current_player takes PickUpDiscardAction step, the move is recorded and executed
# opponent knows that the card is in current_player hand
# current_player keeps turn
current_player = self.players[self.current_player_id]
if not len(current_player.hand) == 10:
raise GinRummyProgramError("len(current_player.hand) is {}: should be 10.".format(len(current_player.hand)))
card = self.dealer.discard_pile.pop()
self.move_sheet.append(PickupDiscardMove(current_player, action, card=card))
current_player.add_card_to_hand(card=card)
current_player.known_cards.append(card)
def declare_dead_hand(self, action: DeclareDeadHandAction):
# when current_player takes DeclareDeadHandAction step, the move is recorded and executed
# north becomes current_player to score his hand
current_player = self.players[self.current_player_id]
self.move_sheet.append(DeclareDeadHandMove(current_player, action))
self.going_out_action = action
self.going_out_player_id = self.current_player_id
if not len(current_player.hand) == 10:
raise GinRummyProgramError("len(current_player.hand) is {}: should be 10.".format(len(current_player.hand)))
self.current_player_id = 0
def discard(self, action: DiscardAction):
# when current_player takes DiscardAction step, the move is recorded and executed
# opponent knows that the card is no longer in current_player hand
# current_player loses his turn and the opponent becomes the current player
current_player = self.players[self.current_player_id]
if not len(current_player.hand) == 11:
raise GinRummyProgramError("len(current_player.hand) is {}: should be 11.".format(len(current_player.hand)))
self.move_sheet.append(DiscardMove(current_player, action))
card = action.card
current_player.remove_card_from_hand(card=card)
if card in current_player.known_cards:
current_player.known_cards.remove(card)
self.dealer.discard_pile.append(card)
self.current_player_id = (self.current_player_id + 1) % 2
def knock(self, action: KnockAction):
# when current_player takes KnockAction step, the move is recorded and executed
# opponent knows that the card is no longer in current_player hand
# north becomes current_player to score his hand
current_player = self.players[self.current_player_id]
self.move_sheet.append(KnockMove(current_player, action))
self.going_out_action = action
self.going_out_player_id = self.current_player_id
if not len(current_player.hand) == 11:
raise GinRummyProgramError("len(current_player.hand) is {}: should be 11.".format(len(current_player.hand)))
card = action.card
current_player.remove_card_from_hand(card=card)
if card in current_player.known_cards:
current_player.known_cards.remove(card)
self.current_player_id = 0
def gin(self, action: GinAction, going_out_deadwood_count: int):
# when current_player takes GinAction step, the move is recorded and executed
# opponent knows that the card is no longer in current_player hand
# north becomes current_player to score his hand
current_player = self.players[self.current_player_id]
self.move_sheet.append(GinMove(current_player, action))
self.going_out_action = action
self.going_out_player_id = self.current_player_id
if not len(current_player.hand) == 11:
raise GinRummyProgramError("len(current_player.hand) is {}: should be 11.".format(len(current_player.hand)))
_, gin_cards = judge.get_going_out_cards(current_player.hand, going_out_deadwood_count)
card = gin_cards[0]
current_player.remove_card_from_hand(card=card)
if card in current_player.known_cards:
current_player.known_cards.remove(card)
self.current_player_id = 0
def score_player_0(self, action: ScoreNorthPlayerAction):
# when current_player takes ScoreNorthPlayerAction step, the move is recorded and executed
# south becomes current player
if not self.current_player_id == 0:
raise GinRummyProgramError("current_player_id is {}: should be 0.".format(self.current_player_id))
current_player = self.get_current_player()
best_meld_clusters = melding.get_best_meld_clusters(hand=current_player.hand)
best_meld_cluster = [] if not best_meld_clusters else best_meld_clusters[0]
deadwood_count = utils.get_deadwood_count(hand=current_player.hand, meld_cluster=best_meld_cluster)
self.move_sheet.append(ScoreNorthMove(player=current_player,
action=action,
best_meld_cluster=best_meld_cluster,
deadwood_count=deadwood_count))
self.current_player_id = 1
def score_player_1(self, action: ScoreSouthPlayerAction):
# when current_player takes ScoreSouthPlayerAction step, the move is recorded and executed
# south remains current player
# the round is over
if not self.current_player_id == 1:
raise GinRummyProgramError("current_player_id is {}: should be 1.".format(self.current_player_id))
current_player = self.get_current_player()
best_meld_clusters = melding.get_best_meld_clusters(hand=current_player.hand)
best_meld_cluster = [] if not best_meld_clusters else best_meld_clusters[0]
deadwood_count = utils.get_deadwood_count(hand=current_player.hand, meld_cluster=best_meld_cluster)
self.move_sheet.append(ScoreSouthMove(player=current_player,
action=action,
best_meld_cluster=best_meld_cluster,
deadwood_count=deadwood_count))
self.is_over = True | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/games/gin_rummy/round.py | 0.695648 | 0.17266 | round.py | pypi |
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ..game import GinRummyGame
from typing import Callable
from .action_event import *
from ..player import GinRummyPlayer
from .move import ScoreNorthMove, ScoreSouthMove
from .gin_rummy_error import GinRummyProgramError
from rlcard.games.gin_rummy.utils import melding
from rlcard.games.gin_rummy.utils import utils
class GinRummyScorer:
def __init__(self, name: str = None, get_payoff: Callable[[GinRummyPlayer, 'GinRummyGame'], int or float] = None):
self.name = name if name is not None else "GinRummyScorer"
self.get_payoff = get_payoff if get_payoff else get_payoff_gin_rummy_v1
def get_payoffs(self, game: 'GinRummyGame'):
payoffs = [0, 0]
for i in range(2):
player = game.round.players[i]
payoff = self.get_payoff(player=player, game=game)
payoffs[i] = payoff
return payoffs
def get_payoff_gin_rummy_v0(player: GinRummyPlayer, game: 'GinRummyGame') -> int:
''' Get the payoff of player: deadwood_count of player
Returns:
payoff (int or float): payoff for player (lower is better)
'''
moves = game.round.move_sheet
if player.player_id == 0:
score_player_move = moves[-2]
if not isinstance(score_player_move, ScoreNorthMove):
raise GinRummyProgramError("score_player_move must be ScoreNorthMove.")
else:
score_player_move = moves[-1]
if not isinstance(score_player_move, ScoreSouthMove):
raise GinRummyProgramError("score_player_move must be ScoreSouthMove.")
deadwood_count = score_player_move.deadwood_count
return deadwood_count
def get_payoff_gin_rummy_v1(player: GinRummyPlayer, game: 'GinRummyGame') -> float:
''' Get the payoff of player:
a) 1.0 if player gins
b) 0.2 if player knocks
c) -deadwood_count / 100 otherwise
Returns:
payoff (int or float): payoff for player (higher is better)
'''
# payoff is 1.0 if player gins
# payoff is 0.2 if player knocks
# payoff is -deadwood_count / 100 if otherwise
# The goal is to have the agent learn how to knock and gin.
# The negative payoff when the agent fails to knock or gin should encourage the agent to form melds.
# The payoff is scaled to lie between -1 and 1.
going_out_action = game.round.going_out_action
going_out_player_id = game.round.going_out_player_id
if going_out_player_id == player.player_id and isinstance(going_out_action, KnockAction):
payoff = 0.2
elif going_out_player_id == player.player_id and isinstance(going_out_action, GinAction):
payoff = 1
else:
hand = player.hand
best_meld_clusters = melding.get_best_meld_clusters(hand=hand)
best_meld_cluster = [] if not best_meld_clusters else best_meld_clusters[0]
deadwood_count = utils.get_deadwood_count(hand, best_meld_cluster)
payoff = -deadwood_count / 100
return payoff | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/games/gin_rummy/utils/scorers.py | 0.761361 | 0.251958 | scorers.py | pypi |
from typing import List, Iterable
import numpy as np
from rlcard.games.base import Card
from .gin_rummy_error import GinRummyProgramError
valid_rank = ['A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K']
valid_suit = ['S', 'H', 'D', 'C']
rank_to_deadwood_value = {"A": 1, "2": 2, "3": 3, "4": 4, "5": 5, "6": 6, "7": 7, "8": 8, "9": 9,
"T": 10, "J": 10, "Q": 10, "K": 10}
def card_from_card_id(card_id: int) -> Card:
''' Make card from its card_id
Args:
card_id: int in range(0, 52)
'''
if not (0 <= card_id < 52):
raise GinRummyProgramError("card_id is {}: should be 0 <= card_id < 52.".format(card_id))
rank_id = card_id % 13
suit_id = card_id // 13
rank = Card.valid_rank[rank_id]
suit = Card.valid_suit[suit_id]
return Card(rank=rank, suit=suit)
# deck is always in order from AS, 2S, ..., AH, 2H, ..., AD, 2D, ..., AC, 2C, ... QC, KC
_deck = [card_from_card_id(card_id) for card_id in range(52)] # want this to be read-only
def card_from_text(text: str) -> Card:
if len(text) != 2:
raise GinRummyProgramError("len(text) is {}: should be 2.".format(len(text)))
return Card(rank=text[0], suit=text[1])
def get_deck() -> List[Card]:
return _deck.copy()
def get_card(card_id: int):
return _deck[card_id]
def get_card_id(card: Card) -> int:
rank_id = get_rank_id(card)
suit_id = get_suit_id(card)
return rank_id + 13 * suit_id
def get_rank_id(card: Card) -> int:
return Card.valid_rank.index(card.rank)
def get_suit_id(card: Card) -> int:
return Card.valid_suit.index(card.suit)
def get_deadwood_value(card: Card) -> int:
rank = card.rank
deadwood_value = rank_to_deadwood_value.get(rank, 10) # default to 10 is key does not exist
return deadwood_value
def get_deadwood(hand: Iterable[Card], meld_cluster: List[Iterable[Card]]) -> List[Card]:
if len(list(hand)) != 10:
raise GinRummyProgramError("Hand contain {} cards: should be 10 cards.".format(len(list(hand))))
meld_cards = [card for meld_pile in meld_cluster for card in meld_pile]
deadwood = [card for card in hand if card not in meld_cards]
return deadwood
def get_deadwood_count(hand: List[Card], meld_cluster: List[Iterable[Card]]) -> int:
if len(hand) != 10:
raise GinRummyProgramError("Hand contain {} cards: should be 10 cards.".format(len(hand)))
deadwood = get_deadwood(hand=hand, meld_cluster=meld_cluster)
deadwood_values = [get_deadwood_value(card) for card in deadwood]
return sum(deadwood_values)
def decode_cards(env_cards: np.ndarray) -> List[Card]:
result = [] # type: List[Card]
if len(env_cards) != 52:
raise GinRummyProgramError("len(env_cards) is {}: should be 52.".format(len(env_cards)))
for i in range(52):
if env_cards[i] == 1:
card = _deck[i]
result.append(card)
return result
def encode_cards(cards: List[Card]) -> np.ndarray:
plane = np.zeros(52, dtype=int)
for card in cards:
card_id = get_card_id(card)
plane[card_id] = 1
return plane | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/games/gin_rummy/utils/utils.py | 0.799599 | 0.319267 | utils.py | pypi |
from typing import List
from rlcard.games.base import Card
from rlcard.games.gin_rummy.utils import utils
from rlcard.games.gin_rummy.utils.gin_rummy_error import GinRummyProgramError
# ===============================================================
# Terminology:
# run_meld - three or more cards of same suit in sequence
# set_meld - three or more cards of same rank
# meld_pile - a run_meld or a set_meld
# meld_piles - a list of meld_pile
# meld_cluster - same as meld_piles, but usually with the piles being mutually disjoint
# meld_clusters - a list of meld_cluster
# ===============================================================
def get_meld_clusters(hand: List[Card]) -> List[List[List[Card]]]:
result = [] # type: List[List[List[Card]]]
all_run_melds = [frozenset(x) for x in get_all_run_melds(hand)]
all_set_melds = [frozenset(x) for x in get_all_set_melds(hand)]
all_melds = all_run_melds + all_set_melds
all_melds_count = len(all_melds)
for i in range(0, all_melds_count):
first_meld = all_melds[i]
first_meld_list = list(first_meld)
meld_cluster_1 = [first_meld_list]
result.append(meld_cluster_1)
for j in range(i + 1, all_melds_count):
second_meld = all_melds[j]
second_meld_list = list(second_meld)
if not second_meld.isdisjoint(first_meld):
continue
meld_cluster_2 = [first_meld_list, second_meld_list]
result.append(meld_cluster_2)
for k in range(j + 1, all_melds_count):
third_meld = all_melds[k]
third_meld_list = list(third_meld)
if not third_meld.isdisjoint(first_meld) or not third_meld.isdisjoint(second_meld):
continue
meld_cluster_3 = [first_meld_list, second_meld_list, third_meld_list]
result.append(meld_cluster_3)
return result
def get_best_meld_clusters(hand: List[Card]) -> List[List[List[Card]]]:
if len(hand) != 10:
raise GinRummyProgramError("Hand contain {} cards: should be 10 cards.".format(len(hand)))
result = [] # type: List[List[List[Card]]]
meld_clusters = get_meld_clusters(hand=hand) # type: List[List[List[Card]]]
meld_clusters_count = len(meld_clusters)
if meld_clusters_count > 0:
deadwood_counts = [utils.get_deadwood_count(hand=hand, meld_cluster=meld_cluster)
for meld_cluster in meld_clusters]
best_deadwood_count = min(deadwood_counts)
for i in range(meld_clusters_count):
if deadwood_counts[i] == best_deadwood_count:
result.append(meld_clusters[i])
return result
def get_all_run_melds(hand: List[Card]) -> List[List[Card]]:
card_count = len(hand)
hand_by_suit = sorted(hand, key=utils.get_card_id)
max_run_melds = []
i = 0
while i < card_count - 2:
card_i = hand_by_suit[i]
j = i + 1
card_j = hand_by_suit[j]
while utils.get_rank_id(card_j) == utils.get_rank_id(card_i) + j - i and card_j.suit == card_i.suit:
j += 1
if j < card_count:
card_j = hand_by_suit[j]
else:
break
max_run_meld = hand_by_suit[i:j]
if len(max_run_meld) >= 3:
max_run_melds.append(max_run_meld)
i = j
result = []
for max_run_meld in max_run_melds:
max_run_meld_count = len(max_run_meld)
for i in range(max_run_meld_count - 2):
for j in range(i + 3, max_run_meld_count + 1):
result.append(max_run_meld[i:j])
return result
def get_all_set_melds(hand: List[Card]) -> List[List[Card]]:
max_set_melds = []
hand_by_rank = sorted(hand, key=lambda x: x.rank)
set_meld = []
current_rank = None
for card in hand_by_rank:
if current_rank is None or current_rank == card.rank:
set_meld.append(card)
else:
if len(set_meld) >= 3:
max_set_melds.append(set_meld)
set_meld = [card]
current_rank = card.rank
if len(set_meld) >= 3:
max_set_melds.append(set_meld)
result = []
for max_set_meld in max_set_melds:
result.append(max_set_meld)
if len(max_set_meld) == 4:
for meld_card in max_set_meld:
result.append([card for card in max_set_meld if card != meld_card])
return result
def get_all_run_melds_for_suit(cards: List[Card], suit: str) -> List[List[Card]]:
cards_for_suit = [card for card in cards if card.suit == suit]
cards_for_suit_count = len(cards_for_suit)
cards_for_suit = sorted(cards_for_suit, key=utils.get_card_id)
max_run_melds = []
i = 0
while i < cards_for_suit_count - 2:
card_i = cards_for_suit[i]
j = i + 1
card_j = cards_for_suit[j]
while utils.get_rank_id(card_j) == utils.get_rank_id(card_i) + j - i:
j += 1
if j < cards_for_suit_count:
card_j = cards_for_suit[j]
else:
break
max_run_meld = cards_for_suit[i:j]
if len(max_run_meld) >= 3:
max_run_melds.append(max_run_meld)
i = j
result = []
for max_run_meld in max_run_melds:
max_run_meld_count = len(max_run_meld)
for i in range(max_run_meld_count - 2):
for j in range(i + 3, max_run_meld_count + 1):
result.append(max_run_meld[i:j])
return result | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/games/gin_rummy/utils/melding.py | 0.417509 | 0.250798 | melding.py | pypi |
from typing import List
import numpy as np
from .judger import BridgeJudger
from .round import BridgeRound
from .utils.action_event import ActionEvent, CallActionEvent, PlayCardAction
class BridgeGame:
''' Game class. This class will interact with outer environment.
'''
def __init__(self, allow_step_back=False):
'''Initialize the class BridgeGame
'''
self.allow_step_back: bool = allow_step_back
self.np_random = np.random.RandomState()
self.judger: BridgeJudger = BridgeJudger(game=self)
self.actions: [ActionEvent] = [] # must reset in init_game
self.round: BridgeRound or None = None # must reset in init_game
self.num_players: int = 4
def init_game(self):
''' Initialize all characters in the game and start round 1
'''
board_id = self.np_random.choice([1, 2, 3, 4])
self.actions: List[ActionEvent] = []
self.round = BridgeRound(num_players=self.num_players, board_id=board_id, np_random=self.np_random)
for player_id in range(4):
player = self.round.players[player_id]
self.round.dealer.deal_cards(player=player, num=13)
current_player_id = self.round.current_player_id
state = self.get_state(player_id=current_player_id)
return state, current_player_id
def step(self, action: ActionEvent):
''' Perform game action and return next player number, and the state for next player
'''
if isinstance(action, CallActionEvent):
self.round.make_call(action=action)
elif isinstance(action, PlayCardAction):
self.round.play_card(action=action)
else:
raise Exception(f'Unknown step action={action}')
self.actions.append(action)
next_player_id = self.round.current_player_id
next_state = self.get_state(player_id=next_player_id)
return next_state, next_player_id
def get_num_players(self) -> int:
''' Return the number of players in the game
'''
return self.num_players
@staticmethod
def get_num_actions() -> int:
''' Return the number of possible actions in the game
'''
return ActionEvent.get_num_actions()
def get_player_id(self):
''' Return the current player that will take actions soon
'''
return self.round.current_player_id
def is_over(self) -> bool:
''' Return whether the current game is over
'''
return self.round.is_over()
def get_state(self, player_id: int): # wch: not really used
''' Get player's state
Return:
state (dict): The information of the state
'''
state = {}
if not self.is_over():
state['player_id'] = player_id
state['current_player_id'] = self.round.current_player_id
state['hand'] = self.round.players[player_id].hand
else:
state['player_id'] = player_id
state['current_player_id'] = self.round.current_player_id
state['hand'] = self.round.players[player_id].hand
return state | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/games/bridge/game.py | 0.780788 | 0.177383 | game.py | pypi |
from typing import List
from .dealer import BridgeDealer
from .player import BridgePlayer
from .utils.action_event import CallActionEvent, PassAction, DblAction, RdblAction, BidAction, PlayCardAction
from .utils.move import BridgeMove, DealHandMove, PlayCardMove, MakeBidMove, MakePassMove, MakeDblMove, MakeRdblMove, CallMove
from .utils.tray import Tray
class BridgeRound:
@property
def dealer_id(self) -> int:
return self.tray.dealer_id
@property
def vul(self):
return self.tray.vul
@property
def board_id(self) -> int:
return self.tray.board_id
@property
def round_phase(self):
if self.is_over():
result = 'game over'
elif self.is_bidding_over():
result = 'play card'
else:
result = 'make bid'
return result
def __init__(self, num_players: int, board_id: int, np_random):
''' Initialize the round class
The round class maintains the following instances:
1) dealer: the dealer of the round; dealer has trick_pile
2) players: the players in the round; each player has his own hand_pile
3) current_player_id: the id of the current player who has the move
4) doubling_cube: 2 if contract is doubled; 4 if contract is redoubled; else 1
5) play_card_count: count of PlayCardMoves
5) move_sheet: history of the moves of the players (including the deal_hand_move)
The round class maintains a list of moves made by the players in self.move_sheet.
move_sheet is similar to a chess score sheet.
I didn't want to call it a score_sheet since it is not keeping score.
I could have called move_sheet just moves, but that might conflict with the name moves used elsewhere.
I settled on the longer name "move_sheet" to indicate that it is the official list of moves being made.
Args:
num_players: int
board_id: int
np_random
'''
tray = Tray(board_id=board_id)
dealer_id = tray.dealer_id
self.tray = tray
self.np_random = np_random
self.dealer: BridgeDealer = BridgeDealer(self.np_random)
self.players: List[BridgePlayer] = []
for player_id in range(num_players):
self.players.append(BridgePlayer(player_id=player_id, np_random=self.np_random))
self.current_player_id: int = dealer_id
self.doubling_cube: int = 1
self.play_card_count: int = 0
self.contract_bid_move: MakeBidMove or None = None
self.won_trick_counts = [0, 0] # count of won tricks by side
self.move_sheet: List[BridgeMove] = []
self.move_sheet.append(DealHandMove(dealer=self.players[dealer_id], shuffled_deck=self.dealer.shuffled_deck))
def is_bidding_over(self) -> bool:
''' Return whether the current bidding is over
'''
is_bidding_over = True
if len(self.move_sheet) < 5:
is_bidding_over = False
else:
last_make_pass_moves: List[MakePassMove] = []
for move in reversed(self.move_sheet):
if isinstance(move, MakePassMove):
last_make_pass_moves.append(move)
if len(last_make_pass_moves) == 3:
break
elif isinstance(move, CallMove):
is_bidding_over = False
break
else:
break
return is_bidding_over
def is_over(self) -> bool:
''' Return whether the current game is over
'''
is_over = True
if not self.is_bidding_over():
is_over = False
elif self.contract_bid_move:
for player in self.players:
if player.hand:
is_over = False
break
return is_over
def get_current_player(self) -> BridgePlayer or None:
current_player_id = self.current_player_id
return None if current_player_id is None else self.players[current_player_id]
def get_trick_moves(self) -> List[PlayCardMove]:
trick_moves: List[PlayCardMove] = []
if self.is_bidding_over():
if self.play_card_count > 0:
trick_pile_count = self.play_card_count % 4
if trick_pile_count == 0:
trick_pile_count = 4 # wch: note this
for move in self.move_sheet[-trick_pile_count:]:
if isinstance(move, PlayCardMove):
trick_moves.append(move)
if len(trick_moves) != trick_pile_count:
raise Exception(f'get_trick_moves: count of trick_moves={[str(move.card) for move in trick_moves]} does not equal {trick_pile_count}')
return trick_moves
def get_trump_suit(self) -> str or None:
trump_suit = None
if self.contract_bid_move:
trump_suit = self.contract_bid_move.action.bid_suit
return trump_suit
def make_call(self, action: CallActionEvent):
# when current_player takes CallActionEvent step, the move is recorded and executed
current_player = self.players[self.current_player_id]
if isinstance(action, PassAction):
self.move_sheet.append(MakePassMove(current_player))
elif isinstance(action, BidAction):
self.doubling_cube = 1
make_bid_move = MakeBidMove(current_player, action)
self.contract_bid_move = make_bid_move
self.move_sheet.append(make_bid_move)
elif isinstance(action, DblAction):
self.doubling_cube = 2
self.move_sheet.append(MakeDblMove(current_player))
elif isinstance(action, RdblAction):
self.doubling_cube = 4
self.move_sheet.append(MakeRdblMove(current_player))
if self.is_bidding_over():
if not self.is_over():
self.current_player_id = self.get_left_defender().player_id
else:
self.current_player_id = (self.current_player_id + 1) % 4
def play_card(self, action: PlayCardAction):
# when current_player takes PlayCardAction step, the move is recorded and executed
current_player = self.players[self.current_player_id]
self.move_sheet.append(PlayCardMove(current_player, action))
card = action.card
current_player.remove_card_from_hand(card=card)
self.play_card_count += 1
# update current_player_id
trick_moves = self.get_trick_moves()
if len(trick_moves) == 4:
trump_suit = self.get_trump_suit()
winning_card = trick_moves[0].card
trick_winner = trick_moves[0].player
for move in trick_moves[1:]:
trick_card = move.card
trick_player = move.player
if trick_card.suit == winning_card.suit:
if trick_card.card_id > winning_card.card_id:
winning_card = trick_card
trick_winner = trick_player
elif trick_card.suit == trump_suit:
winning_card = trick_card
trick_winner = trick_player
self.current_player_id = trick_winner.player_id
self.won_trick_counts[trick_winner.player_id % 2] += 1
else:
self.current_player_id = (self.current_player_id + 1) % 4
def get_declarer(self) -> BridgePlayer or None:
declarer = None
if self.contract_bid_move:
trump_suit = self.contract_bid_move.action.bid_suit
side = self.contract_bid_move.player.player_id % 2
for move in self.move_sheet:
if isinstance(move, MakeBidMove) and move.action.bid_suit == trump_suit and move.player.player_id % 2 == side:
declarer = move.player
break
return declarer
def get_dummy(self) -> BridgePlayer or None:
dummy = None
declarer = self.get_declarer()
if declarer:
dummy = self.players[(declarer.player_id + 2) % 4]
return dummy
def get_left_defender(self) -> BridgePlayer or None:
left_defender = None
declarer = self.get_declarer()
if declarer:
left_defender = self.players[(declarer.player_id + 1) % 4]
return left_defender
def get_right_defender(self) -> BridgePlayer or None:
right_defender = None
declarer = self.get_declarer()
if declarer:
right_defender = self.players[(declarer.player_id + 3) % 4]
return right_defender
def get_perfect_information(self):
state = {}
last_call_move = None
if not self.is_bidding_over() or self.play_card_count == 0:
last_move = self.move_sheet[-1]
if isinstance(last_move, CallMove):
last_call_move = last_move
trick_moves = [None, None, None, None]
if self.is_bidding_over():
for trick_move in self.get_trick_moves():
trick_moves[trick_move.player.player_id] = trick_move.card
state['move_count'] = len(self.move_sheet)
state['tray'] = self.tray
state['current_player_id'] = self.current_player_id
state['round_phase'] = self.round_phase
state['last_call_move'] = last_call_move
state['doubling_cube'] = self.doubling_cube
state['contact'] = self.contract_bid_move if self.is_bidding_over() and self.contract_bid_move else None
state['hands'] = [player.hand for player in self.players]
state['trick_moves'] = trick_moves
return state
def print_scene(self):
print(f'===== Board: {self.tray.board_id} move: {len(self.move_sheet)} player: {self.players[self.current_player_id]} phase: {self.round_phase} =====')
print(f'dealer={self.players[self.tray.dealer_id]}')
print(f'vul={self.vul}')
if not self.is_bidding_over() or self.play_card_count == 0:
last_move = self.move_sheet[-1]
last_call_text = f'{last_move}' if isinstance(last_move, CallMove) else 'None'
print(f'last call: {last_call_text}')
if self.is_bidding_over() and self.contract_bid_move:
bid_suit = self.contract_bid_move.action.bid_suit
doubling_cube = self.doubling_cube
if not bid_suit:
bid_suit = 'NT'
doubling_cube_text = "" if doubling_cube == 1 else "dbl" if doubling_cube == 2 else "rdbl"
print(f'contract: {self.contract_bid_move.player} {self.contract_bid_move.action.bid_amount}{bid_suit} {doubling_cube_text}')
for player in self.players:
print(f'{player}: {[str(card) for card in player.hand]}')
if self.is_bidding_over():
trick_pile = ['None', 'None', 'None', 'None']
for trick_move in self.get_trick_moves():
trick_pile[trick_move.player.player_id] = trick_move.card
print(f'trick_pile: {[str(card) for card in trick_pile]}') | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/games/bridge/round.py | 0.787523 | 0.178204 | round.py | pypi |
from typing import List
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .game import BridgeGame
from .utils.action_event import PlayCardAction
from .utils.action_event import ActionEvent, BidAction, PassAction, DblAction, RdblAction
from .utils.move import MakeBidMove, MakeDblMove, MakeRdblMove
from .utils.bridge_card import BridgeCard
class BridgeJudger:
'''
Judger decides legal actions for current player
'''
def __init__(self, game: 'BridgeGame'):
''' Initialize the class BridgeJudger
:param game: BridgeGame
'''
self.game: BridgeGame = game
def get_legal_actions(self) -> List[ActionEvent]:
"""
:return: List[ActionEvent] of legal actions
"""
legal_actions: List[ActionEvent] = []
if not self.game.is_over():
current_player = self.game.round.get_current_player()
if not self.game.round.is_bidding_over():
legal_actions.append(PassAction())
last_make_bid_move: MakeBidMove or None = None
last_dbl_move: MakeDblMove or None = None
last_rdbl_move: MakeRdblMove or None = None
for move in reversed(self.game.round.move_sheet):
if isinstance(move, MakeBidMove):
last_make_bid_move = move
break
elif isinstance(move, MakeRdblMove):
last_rdbl_move = move
elif isinstance(move, MakeDblMove) and not last_rdbl_move:
last_dbl_move = move
first_bid_action_id = ActionEvent.first_bid_action_id
next_bid_action_id = last_make_bid_move.action.action_id + 1 if last_make_bid_move else first_bid_action_id
for bid_action_id in range(next_bid_action_id, first_bid_action_id + 35):
action = BidAction.from_action_id(action_id=bid_action_id)
legal_actions.append(action)
if last_make_bid_move and last_make_bid_move.player.player_id % 2 != current_player.player_id % 2 and not last_dbl_move and not last_rdbl_move:
legal_actions.append(DblAction())
if last_dbl_move and last_dbl_move.player.player_id % 2 != current_player.player_id % 2:
legal_actions.append(RdblAction())
else:
trick_moves = self.game.round.get_trick_moves()
hand = self.game.round.players[current_player.player_id].hand
legal_cards = hand
if trick_moves and len(trick_moves) < 4:
led_card: BridgeCard = trick_moves[0].card
cards_of_led_suit = [card for card in hand if card.suit == led_card.suit]
if cards_of_led_suit:
legal_cards = cards_of_led_suit
for card in legal_cards:
action = PlayCardAction(card=card)
legal_actions.append(action)
return legal_actions | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/games/bridge/judger.py | 0.663778 | 0.160825 | judger.py | pypi |
class BlackjackJudger:
def __init__(self, np_random):
''' Initialize a BlackJack judger class
'''
self.np_random = np_random
self.rank2score = {"A":11, "2":2, "3":3, "4":4, "5":5, "6":6, "7":7, "8":8, "9":9, "T":10, "J":10, "Q":10, "K":10}
def judge_round(self, player):
''' Judge the target player's status
Args:
player (int): target player's id
Returns:
status (str): the status of the target player
score (int): the current score of the player
'''
score = self.judge_score(player.hand)
if score <= 21:
return "alive", score
else:
return "bust", score
def judge_game(self, game, game_pointer):
''' Judge the winner of the game
Args:
game (class): target game class
'''
'''
game.winner['dealer'] doesn't need anymore if we change code like this
player bust (whether dealer bust or not) => game.winner[playerX] = -1
player and dealer tie => game.winner[playerX] = 1
dealer bust and player not bust => game.winner[playerX] = 2
player get higher score than dealer => game.winner[playerX] = 2
dealer get higher score than player => game.winner[playerX] = -1
game.winner[playerX] = 0 => the game is still ongoing
'''
if game.players[game_pointer].status == 'bust':
game.winner['player' + str(game_pointer)] = -1
elif game.dealer.status == 'bust':
game.winner['player' + str(game_pointer)] = 2
else:
if game.players[game_pointer].score > game.dealer.score:
game.winner['player' + str(game_pointer)] = 2
elif game.players[game_pointer].score < game.dealer.score:
game.winner['player' + str(game_pointer)] = -1
else:
game.winner['player' + str(game_pointer)] = 1
def judge_score(self, cards):
''' Judge the score of a given cards set
Args:
cards (list): a list of cards
Returns:
score (int): the score of the given cards set
'''
score = 0
count_a = 0
for card in cards:
card_score = self.rank2score[card.rank]
score += card_score
if card.rank == 'A':
count_a += 1
while score > 21 and count_a > 0:
count_a -= 1
score -= 10
return score | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/games/blackjack/judger.py | 0.727685 | 0.46308 | judger.py | pypi |
import numpy as np
from copy import copy
from rlcard.games.leducholdem import Dealer
from rlcard.games.leducholdem import Player
from rlcard.games.leducholdem import Judger
from rlcard.games.leducholdem import Round
from rlcard.games.limitholdem import Game
class LeducholdemGame(Game):
def __init__(self, allow_step_back=False, num_players=2):
''' Initialize the class leducholdem Game
'''
self.allow_step_back = allow_step_back
self.np_random = np.random.RandomState()
''' No big/small blind
# Some configarations of the game
# These arguments are fixed in Leduc Hold'em Game
# Raise amount and allowed times
self.raise_amount = 2
self.allowed_raise_num = 2
self.num_players = 2
'''
# Some configarations of the game
# These arguments can be specified for creating new games
# Small blind and big blind
self.small_blind = 1
self.big_blind = 2 * self.small_blind
# Raise amount and allowed times
self.raise_amount = self.big_blind
self.allowed_raise_num = 2
self.num_players = num_players
def configure(self, game_config):
''' Specifiy some game specific parameters, such as number of players
'''
self.num_players = game_config['game_num_players']
def init_game(self):
''' Initialilze the game of Limit Texas Hold'em
This version supports two-player limit texas hold'em
Returns:
(tuple): Tuple containing:
(dict): The first state of the game
(int): Current player's id
'''
# Initilize a dealer that can deal cards
self.dealer = Dealer(self.np_random)
# Initilize two players to play the game
self.players = [Player(i, self.np_random) for i in range(self.num_players)]
# Initialize a judger class which will decide who wins in the end
self.judger = Judger(self.np_random)
# Prepare for the first round
for i in range(self.num_players):
self.players[i].hand = self.dealer.deal_card()
# Randomly choose a small blind and a big blind
s = self.np_random.randint(0, self.num_players)
b = (s + 1) % self.num_players
self.players[b].in_chips = self.big_blind
self.players[s].in_chips = self.small_blind
self.public_card = None
# The player with small blind plays the first
self.game_pointer = s
# Initilize a bidding round, in the first round, the big blind and the small blind needs to
# be passed to the round for processing.
self.round = Round(raise_amount=self.raise_amount,
allowed_raise_num=self.allowed_raise_num,
num_players=self.num_players,
np_random=self.np_random)
self.round.start_new_round(game_pointer=self.game_pointer, raised=[p.in_chips for p in self.players])
# Count the round. There are 2 rounds in each game.
self.round_counter = 0
# Save the hisory for stepping back to the last state.
self.history = []
state = self.get_state(self.game_pointer)
return state, self.game_pointer
def step(self, action):
''' Get the next state
Args:
action (str): a specific action. (call, raise, fold, or check)
Returns:
(tuple): Tuple containing:
(dict): next player's state
(int): next plater's id
'''
if self.allow_step_back:
# First snapshot the current state
r = copy(self.round)
r_raised = copy(self.round.raised)
gp = self.game_pointer
r_c = self.round_counter
d_deck = copy(self.dealer.deck)
p = copy(self.public_card)
ps = [copy(self.players[i]) for i in range(self.num_players)]
ps_hand = [copy(self.players[i].hand) for i in range(self.num_players)]
self.history.append((r, r_raised, gp, r_c, d_deck, p, ps, ps_hand))
# Then we proceed to the next round
self.game_pointer = self.round.proceed_round(self.players, action)
# If a round is over, we deal more public cards
if self.round.is_over():
# For the first round, we deal 1 card as public card. Double the raise amount for the second round
if self.round_counter == 0:
self.public_card = self.dealer.deal_card()
self.round.raise_amount = 2 * self.raise_amount
self.round_counter += 1
self.round.start_new_round(self.game_pointer)
state = self.get_state(self.game_pointer)
return state, self.game_pointer
def get_state(self, player):
''' Return player's state
Args:
player_id (int): player id
Returns:
(dict): The state of the player
'''
chips = [self.players[i].in_chips for i in range(self.num_players)]
legal_actions = self.get_legal_actions()
state = self.players[player].get_state(self.public_card, chips, legal_actions)
state['current_player'] = self.game_pointer
return state
def is_over(self):
''' Check if the game is over
Returns:
(boolean): True if the game is over
'''
alive_players = [1 if p.status=='alive' else 0 for p in self.players]
# If only one player is alive, the game is over.
if sum(alive_players) == 1:
return True
# If all rounds are finshed
if self.round_counter >= 2:
return True
return False
def get_payoffs(self):
''' Return the payoffs of the game
Returns:
(list): Each entry corresponds to the payoff of one player
'''
chips_payoffs = self.judger.judge_game(self.players, self.public_card)
payoffs = np.array(chips_payoffs) / (self.big_blind)
return payoffs
def step_back(self):
''' Return to the previous state of the game
Returns:
(bool): True if the game steps back successfully
'''
if len(self.history) > 0:
self.round, r_raised, self.game_pointer, self.round_counter, d_deck, self.public_card, self.players, ps_hand = self.history.pop()
self.round.raised = r_raised
self.dealer.deck = d_deck
for i, hand in enumerate(ps_hand):
self.players[i].hand = hand
return True
return False | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/games/leducholdem/game.py | 0.71103 | 0.314471 | game.py | pypi |
from rlcard.utils.utils import rank2int
class LeducholdemJudger:
''' The Judger class for Leduc Hold'em
'''
def __init__(self, np_random):
''' Initialize a judger class
'''
self.np_random = np_random
@staticmethod
def judge_game(players, public_card):
''' Judge the winner of the game.
Args:
players (list): The list of players who play the game
public_card (object): The public card that seen by all the players
Returns:
(list): Each entry of the list corresponds to one entry of the
'''
# Judge who are the winners
winners = [0] * len(players)
fold_count = 0
ranks = []
# If every player folds except one, the alive player is the winner
for idx, player in enumerate(players):
ranks.append(rank2int(player.hand.rank))
if player.status == 'folded':
fold_count += 1
elif player.status == 'alive':
alive_idx = idx
if fold_count == (len(players) - 1):
winners[alive_idx] = 1
# If any of the players matches the public card wins
if sum(winners) < 1:
for idx, player in enumerate(players):
if player.hand.rank == public_card.rank:
winners[idx] = 1
break
# If non of the above conditions, the winner player is the one with the highest card rank
if sum(winners) < 1:
max_rank = max(ranks)
max_index = [i for i, j in enumerate(ranks) if j == max_rank]
for idx in max_index:
winners[idx] = 1
# Compute the total chips
total = 0
for p in players:
total += p.in_chips
each_win = float(total) / sum(winners)
payoffs = []
for i, _ in enumerate(players):
if winners[i] == 1:
payoffs.append(each_win - players[i].in_chips)
else:
payoffs.append(float(-players[i].in_chips))
return payoffs | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/games/leducholdem/judger.py | 0.673943 | 0.356503 | judger.py | pypi |
import numpy as np
import collections
import os
import pickle
from rlcard.utils.utils import *
class CFRAgent():
''' Implement CFR (chance sampling) algorithm
'''
def __init__(self, env, model_path='./cfr_model'):
''' Initilize Agent
Args:
env (Env): Env class
'''
self.use_raw = False
self.env = env
self.model_path = model_path
# A policy is a dict state_str -> action probabilities
self.policy = collections.defaultdict(list)
self.average_policy = collections.defaultdict(np.array)
# Regret is a dict state_str -> action regrets
self.regrets = collections.defaultdict(np.array)
self.iteration = 0
def train(self):
''' Do one iteration of CFR
'''
self.iteration += 1
# Firstly, traverse tree to compute counterfactual regret for each player
# The regrets are recorded in traversal
for player_id in range(self.env.num_players):
self.env.reset()
probs = np.ones(self.env.num_players)
self.traverse_tree(probs, player_id)
# Update policy
self.update_policy()
def traverse_tree(self, probs, player_id):
''' Traverse the game tree, update the regrets
Args:
probs: The reach probability of the current node
player_id: The player to update the value
Returns:
state_utilities (list): The expected utilities for all the players
'''
if self.env.is_over():
return self.env.get_payoffs()
current_player = self.env.get_player_id()
action_utilities = {}
state_utility = np.zeros(self.env.num_players)
obs, legal_actions = self.get_state(current_player)
action_probs = self.action_probs(obs, legal_actions, self.policy)
for action in legal_actions:
action_prob = action_probs[action]
new_probs = probs.copy()
new_probs[current_player] *= action_prob
# Keep traversing the child state
self.env.step(action)
utility = self.traverse_tree(new_probs, player_id)
self.env.step_back()
state_utility += action_prob * utility
action_utilities[action] = utility
if not current_player == player_id:
return state_utility
# If it is current player, we record the policy and compute regret
player_prob = probs[current_player]
counterfactual_prob = (np.prod(probs[:current_player]) *
np.prod(probs[current_player + 1:]))
player_state_utility = state_utility[current_player]
if obs not in self.regrets:
self.regrets[obs] = np.zeros(self.env.num_actions)
if obs not in self.average_policy:
self.average_policy[obs] = np.zeros(self.env.num_actions)
for action in legal_actions:
action_prob = action_probs[action]
regret = counterfactual_prob * (action_utilities[action][current_player]
- player_state_utility)
self.regrets[obs][action] += regret
self.average_policy[obs][action] += self.iteration * player_prob * action_prob
return state_utility
def update_policy(self):
''' Update policy based on the current regrets
'''
for obs in self.regrets:
self.policy[obs] = self.regret_matching(obs)
def regret_matching(self, obs):
''' Apply regret matching
Args:
obs (string): The state_str
'''
regret = self.regrets[obs]
positive_regret_sum = sum([r for r in regret if r > 0])
action_probs = np.zeros(self.env.num_actions)
if positive_regret_sum > 0:
for action in range(self.env.num_actions):
action_probs[action] = max(0.0, regret[action] / positive_regret_sum)
else:
for action in range(self.env.num_actions):
action_probs[action] = 1.0 / self.env.num_actions
return action_probs
def action_probs(self, obs, legal_actions, policy):
''' Obtain the action probabilities of the current state
Args:
obs (str): state_str
legal_actions (list): List of leagel actions
player_id (int): The current player
policy (dict): The used policy
Returns:
(tuple) that contains:
action_probs(numpy.array): The action probabilities
legal_actions (list): Indices of legal actions
'''
if obs not in policy.keys():
action_probs = np.array([1.0/self.env.num_actions for _ in range(self.env.num_actions)])
self.policy[obs] = action_probs
else:
action_probs = policy[obs]
action_probs = remove_illegal(action_probs, legal_actions)
return action_probs
def eval_step(self, state):
''' Given a state, predict action based on average policy
Args:
state (numpy.array): State representation
Returns:
action (int): Predicted action
info (dict): A dictionary containing information
'''
probs = self.action_probs(state['obs'].tostring(), list(state['legal_actions'].keys()), self.average_policy)
action = np.random.choice(len(probs), p=probs)
info = {}
info['probs'] = {state['raw_legal_actions'][i]: float(probs[list(state['legal_actions'].keys())[i]]) for i in range(len(state['legal_actions']))}
return action, info
def get_state(self, player_id):
''' Get state_str of the player
Args:
player_id (int): The player id
Returns:
(tuple) that contains:
state (str): The state str
legal_actions (list): Indices of legal actions
'''
state = self.env.get_state(player_id)
return state['obs'].tostring(), list(state['legal_actions'].keys())
def save(self):
''' Save model
'''
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
policy_file = open(os.path.join(self.model_path, 'policy.pkl'),'wb')
pickle.dump(self.policy, policy_file)
policy_file.close()
average_policy_file = open(os.path.join(self.model_path, 'average_policy.pkl'),'wb')
pickle.dump(self.average_policy, average_policy_file)
average_policy_file.close()
regrets_file = open(os.path.join(self.model_path, 'regrets.pkl'),'wb')
pickle.dump(self.regrets, regrets_file)
regrets_file.close()
iteration_file = open(os.path.join(self.model_path, 'iteration.pkl'),'wb')
pickle.dump(self.iteration, iteration_file)
iteration_file.close()
def load(self):
''' Load model
'''
if not os.path.exists(self.model_path):
return
policy_file = open(os.path.join(self.model_path, 'policy.pkl'),'rb')
self.policy = pickle.load(policy_file)
policy_file.close()
average_policy_file = open(os.path.join(self.model_path, 'average_policy.pkl'),'rb')
self.average_policy = pickle.load(average_policy_file)
average_policy_file.close()
regrets_file = open(os.path.join(self.model_path, 'regrets.pkl'),'rb')
self.regrets = pickle.load(regrets_file)
regrets_file.close()
iteration_file = open(os.path.join(self.model_path, 'iteration.pkl'),'rb')
self.iteration = pickle.load(iteration_file)
iteration_file.close() | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/agents/cfr_agent.py | 0.74382 | 0.323086 | cfr_agent.py | pypi |
import random
import numpy as np
import torch
import torch.nn as nn
from collections import namedtuple
from copy import deepcopy
from rlcard.utils.utils import remove_illegal
Transition = namedtuple('Transition', ['state', 'action', 'reward', 'next_state', 'legal_actions', 'done'])
class DQNAgent(object):
'''
Approximate clone of rlcard.agents.dqn_agent.DQNAgent
that depends on PyTorch instead of Tensorflow
'''
def __init__(self,
replay_memory_size=20000,
replay_memory_init_size=100,
update_target_estimator_every=1000,
discount_factor=0.99,
epsilon_start=1.0,
epsilon_end=0.1,
epsilon_decay_steps=20000,
batch_size=32,
num_actions=2,
state_shape=None,
train_every=1,
mlp_layers=None,
learning_rate=0.00005,
device=None):
'''
Q-Learning algorithm for off-policy TD control using Function Approximation.
Finds the optimal greedy policy while following an epsilon-greedy policy.
Args:
replay_memory_size (int): Size of the replay memory
replay_memory_init_size (int): Number of random experiences to sample when initializing
the reply memory.
update_target_estimator_every (int): Copy parameters from the Q estimator to the
target estimator every N steps
discount_factor (float): Gamma discount factor
epsilon_start (float): Chance to sample a random action when taking an action.
Epsilon is decayed over time and this is the start value
epsilon_end (float): The final minimum value of epsilon after decaying is done
epsilon_decay_steps (int): Number of steps to decay epsilon over
batch_size (int): Size of batches to sample from the replay memory
evaluate_every (int): Evaluate every N steps
num_actions (int): The number of the actions
state_space (list): The space of the state vector
train_every (int): Train the network every X steps.
mlp_layers (list): The layer number and the dimension of each layer in MLP
learning_rate (float): The learning rate of the DQN agent.
device (torch.device): whether to use the cpu or gpu
'''
self.use_raw = False
self.replay_memory_init_size = replay_memory_init_size
self.update_target_estimator_every = update_target_estimator_every
self.discount_factor = discount_factor
self.epsilon_decay_steps = epsilon_decay_steps
self.batch_size = batch_size
self.num_actions = num_actions
self.train_every = train_every
# Torch device
if device is None:
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
else:
self.device = device
# Total timesteps
self.total_t = 0
# Total training step
self.train_t = 0
# The epsilon decay scheduler
self.epsilons = np.linspace(epsilon_start, epsilon_end, epsilon_decay_steps)
# Create estimators
self.q_estimator = Estimator(num_actions=num_actions, learning_rate=learning_rate, state_shape=state_shape, \
mlp_layers=mlp_layers, device=self.device)
self.target_estimator = Estimator(num_actions=num_actions, learning_rate=learning_rate, state_shape=state_shape, \
mlp_layers=mlp_layers, device=self.device)
# Create replay memory
self.memory = Memory(replay_memory_size, batch_size)
def feed(self, ts):
''' Store data in to replay buffer and train the agent. There are two stages.
In stage 1, populate the memory without training
In stage 2, train the agent every several timesteps
Args:
ts (list): a list of 5 elements that represent the transition
'''
(state, action, reward, next_state, done) = tuple(ts)
self.feed_memory(state['obs'], action, reward, next_state['obs'], list(next_state['legal_actions'].keys()), done)
self.total_t += 1
tmp = self.total_t - self.replay_memory_init_size
if tmp>=0 and tmp%self.train_every == 0:
self.train()
def step(self, state):
''' Predict the action for genrating training data but
have the predictions disconnected from the computation graph
Args:
state (numpy.array): current state
Returns:
action (int): an action id
'''
q_values = self.predict(state)
epsilon = self.epsilons[min(self.total_t, self.epsilon_decay_steps-1)]
legal_actions = list(state['legal_actions'].keys())
probs = np.ones(len(legal_actions), dtype=float) * epsilon / len(legal_actions)
best_action_idx = legal_actions.index(np.argmax(q_values))
probs[best_action_idx] += (1.0 - epsilon)
action_idx = np.random.choice(np.arange(len(probs)), p=probs)
return legal_actions[action_idx]
def eval_step(self, state):
''' Predict the action for evaluation purpose.
Args:
state (numpy.array): current state
Returns:
action (int): an action id
info (dict): A dictionary containing information
'''
q_values = self.predict(state)
best_action = np.argmax(q_values)
info = {}
info['values'] = {state['raw_legal_actions'][i]: float(q_values[list(state['legal_actions'].keys())[i]]) for i in range(len(state['legal_actions']))}
return best_action, info
def predict(self, state):
''' Predict the masked Q-values
Args:
state (numpy.array): current state
Returns:
q_values (numpy.array): a 1-d array where each entry represents a Q value
'''
q_values = self.q_estimator.predict_nograd(np.expand_dims(state['obs'], 0))[0]
masked_q_values = -np.inf * np.ones(self.num_actions, dtype=float)
legal_actions = list(state['legal_actions'].keys())
masked_q_values[legal_actions] = q_values[legal_actions]
return masked_q_values
def train(self):
''' Train the network
Returns:
loss (float): The loss of the current batch.
'''
state_batch, action_batch, reward_batch, next_state_batch, legal_actions_batch, done_batch = self.memory.sample()
# Calculate best next actions using Q-network (Double DQN)
q_values_next = self.q_estimator.predict_nograd(next_state_batch)
legal_actions = []
for b in range(self.batch_size):
legal_actions.extend([i + b * self.num_actions for i in legal_actions_batch[b]])
masked_q_values = -np.inf * np.ones(self.num_actions * self.batch_size, dtype=float)
masked_q_values[legal_actions] = q_values_next.flatten()[legal_actions]
masked_q_values = masked_q_values.reshape((self.batch_size, self.num_actions))
best_actions = np.argmax(masked_q_values, axis=1)
# Evaluate best next actions using Target-network (Double DQN)
q_values_next_target = self.target_estimator.predict_nograd(next_state_batch)
target_batch = reward_batch + np.invert(done_batch).astype(np.float32) * \
self.discount_factor * q_values_next_target[np.arange(self.batch_size), best_actions]
# Perform gradient descent update
state_batch = np.array(state_batch)
loss = self.q_estimator.update(state_batch, action_batch, target_batch)
print('\rINFO - Step {}, rl-loss: {}'.format(self.total_t, loss), end='')
# Update the target estimator
if self.train_t % self.update_target_estimator_every == 0:
self.target_estimator = deepcopy(self.q_estimator)
print("\nINFO - Copied model parameters to target network.")
self.train_t += 1
def feed_memory(self, state, action, reward, next_state, legal_actions, done):
''' Feed transition to memory
Args:
state (numpy.array): the current state
action (int): the performed action ID
reward (float): the reward received
next_state (numpy.array): the next state after performing the action
legal_actions (list): the legal actions of the next state
done (boolean): whether the episode is finished
'''
self.memory.save(state, action, reward, next_state, legal_actions, done)
def set_device(self, device):
self.device = device
self.q_estimator.device = device
self.target_estimator.device = device
class Estimator(object):
'''
Approximate clone of rlcard.agents.dqn_agent.Estimator that
uses PyTorch instead of Tensorflow. All methods input/output np.ndarray.
Q-Value Estimator neural network.
This network is used for both the Q-Network and the Target Network.
'''
def __init__(self, num_actions=2, learning_rate=0.001, state_shape=None, mlp_layers=None, device=None):
''' Initilalize an Estimator object.
Args:
num_actions (int): the number output actions
state_shape (list): the shape of the state space
mlp_layers (list): size of outputs of mlp layers
device (torch.device): whether to use cpu or gpu
'''
self.num_actions = num_actions
self.learning_rate=learning_rate
self.state_shape = state_shape
self.mlp_layers = mlp_layers
self.device = device
# set up Q model and place it in eval mode
qnet = EstimatorNetwork(num_actions, state_shape, mlp_layers)
qnet = qnet.to(self.device)
self.qnet = qnet
self.qnet.eval()
# initialize the weights using Xavier init
for p in self.qnet.parameters():
if len(p.data.shape) > 1:
nn.init.xavier_uniform_(p.data)
# set up loss function
self.mse_loss = nn.MSELoss(reduction='mean')
# set up optimizer
self.optimizer = torch.optim.Adam(self.qnet.parameters(), lr=self.learning_rate)
def predict_nograd(self, s):
''' Predicts action values, but prediction is not included
in the computation graph. It is used to predict optimal next
actions in the Double-DQN algorithm.
Args:
s (np.ndarray): (batch, state_len)
Returns:
np.ndarray of shape (batch_size, NUM_VALID_ACTIONS) containing the estimated
action values.
'''
with torch.no_grad():
s = torch.from_numpy(s).float().to(self.device)
q_as = self.qnet(s).cpu().numpy()
return q_as
def update(self, s, a, y):
''' Updates the estimator towards the given targets.
In this case y is the target-network estimated
value of the Q-network optimal actions, which
is labeled y in Algorithm 1 of Minh et al. (2015)
Args:
s (np.ndarray): (batch, state_shape) state representation
a (np.ndarray): (batch,) integer sampled actions
y (np.ndarray): (batch,) value of optimal actions according to Q-target
Returns:
The calculated loss on the batch.
'''
self.optimizer.zero_grad()
self.qnet.train()
s = torch.from_numpy(s).float().to(self.device)
a = torch.from_numpy(a).long().to(self.device)
y = torch.from_numpy(y).float().to(self.device)
# (batch, state_shape) -> (batch, num_actions)
q_as = self.qnet(s)
# (batch, num_actions) -> (batch, )
Q = torch.gather(q_as, dim=-1, index=a.unsqueeze(-1)).squeeze(-1)
# update model
batch_loss = self.mse_loss(Q, y)
batch_loss.backward()
self.optimizer.step()
batch_loss = batch_loss.item()
self.qnet.eval()
return batch_loss
class EstimatorNetwork(nn.Module):
''' The function approximation network for Estimator
It is just a series of tanh layers. All in/out are torch.tensor
'''
def __init__(self, num_actions=2, state_shape=None, mlp_layers=None):
''' Initialize the Q network
Args:
num_actions (int): number of legal actions
state_shape (list): shape of state tensor
mlp_layers (list): output size of each fc layer
'''
super(EstimatorNetwork, self).__init__()
self.num_actions = num_actions
self.state_shape = state_shape
self.mlp_layers = mlp_layers
# build the Q network
layer_dims = [np.prod(self.state_shape)] + self.mlp_layers
fc = [nn.Flatten()]
fc.append(nn.BatchNorm1d(layer_dims[0]))
for i in range(len(layer_dims)-1):
fc.append(nn.Linear(layer_dims[i], layer_dims[i+1], bias=True))
fc.append(nn.Tanh())
fc.append(nn.Linear(layer_dims[-1], self.num_actions, bias=True))
self.fc_layers = nn.Sequential(*fc)
def forward(self, s):
''' Predict action values
Args:
s (Tensor): (batch, state_shape)
'''
return self.fc_layers(s)
class Memory(object):
''' Memory for saving transitions
'''
def __init__(self, memory_size, batch_size):
''' Initialize
Args:
memory_size (int): the size of the memroy buffer
'''
self.memory_size = memory_size
self.batch_size = batch_size
self.memory = []
def save(self, state, action, reward, next_state, legal_actions, done):
''' Save transition into memory
Args:
state (numpy.array): the current state
action (int): the performed action ID
reward (float): the reward received
next_state (numpy.array): the next state after performing the action
legal_actions (list): the legal actions of the next state
done (boolean): whether the episode is finished
'''
if len(self.memory) == self.memory_size:
self.memory.pop(0)
transition = Transition(state, action, reward, next_state, legal_actions, done)
self.memory.append(transition)
def sample(self):
''' Sample a minibatch from the replay memory
Returns:
state_batch (list): a batch of states
action_batch (list): a batch of actions
reward_batch (list): a batch of rewards
next_state_batch (list): a batch of states
done_batch (list): a batch of dones
'''
samples = random.sample(self.memory, self.batch_size)
return map(np.array, zip(*samples)) | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/agents/dqn_agent.py | 0.88981 | 0.519521 | dqn_agent.py | pypi |
import random
import collections
import enum
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from rlcard.agents.dqn_agent import DQNAgent
from rlcard.utils.utils import remove_illegal
Transition = collections.namedtuple('Transition', 'info_state action_probs')
class NFSPAgent(object):
''' An approximate clone of rlcard.agents.nfsp_agent that uses
pytorch instead of tensorflow. Note that this implementation
differs from Henrich and Silver (2016) in that the supervised
training minimizes cross-entropy with respect to the stored
action probabilities rather than the realized actions.
'''
def __init__(self,
num_actions=4,
state_shape=None,
hidden_layers_sizes=None,
reservoir_buffer_capacity=20000,
anticipatory_param=0.1,
batch_size=256,
train_every=1,
rl_learning_rate=0.1,
sl_learning_rate=0.005,
min_buffer_size_to_learn=100,
q_replay_memory_size=20000,
q_replay_memory_init_size=100,
q_update_target_estimator_every=1000,
q_discount_factor=0.99,
q_epsilon_start=0.06,
q_epsilon_end=0,
q_epsilon_decay_steps=int(1e6),
q_batch_size=32,
q_train_every=1,
q_mlp_layers=None,
evaluate_with='average_policy',
device=None):
''' Initialize the NFSP agent.
Args:
num_actions (int): The number of actions.
state_shape (list): The shape of the state space.
hidden_layers_sizes (list): The hidden layers sizes for the layers of
the average policy.
reservoir_buffer_capacity (int): The size of the buffer for average policy.
anticipatory_param (float): The hyper-parameter that balances rl/avarage policy.
batch_size (int): The batch_size for training average policy.
train_every (int): Train the SL policy every X steps.
rl_learning_rate (float): The learning rate of the RL agent.
sl_learning_rate (float): the learning rate of the average policy.
min_buffer_size_to_learn (int): The minimum buffer size to learn for average policy.
q_replay_memory_size (int): The memory size of inner DQN agent.
q_replay_memory_init_size (int): The initial memory size of inner DQN agent.
q_update_target_estimator_every (int): The frequency of updating target network for
inner DQN agent.
q_discount_factor (float): The discount factor of inner DQN agent.
q_epsilon_start (float): The starting epsilon of inner DQN agent.
q_epsilon_end (float): the end epsilon of inner DQN agent.
q_epsilon_decay_steps (int): The decay steps of inner DQN agent.
q_batch_size (int): The batch size of inner DQN agent.
q_train_step (int): Train the model every X steps.
q_mlp_layers (list): The layer sizes of inner DQN agent.
device (torch.device): Whether to use the cpu or gpu
'''
self.use_raw = False
self._num_actions = num_actions
self._state_shape = state_shape
self._layer_sizes = hidden_layers_sizes + [num_actions]
self._batch_size = batch_size
self._train_every = train_every
self._sl_learning_rate = sl_learning_rate
self._anticipatory_param = anticipatory_param
self._min_buffer_size_to_learn = min_buffer_size_to_learn
self._reservoir_buffer = ReservoirBuffer(reservoir_buffer_capacity)
self._prev_timestep = None
self._prev_action = None
self.evaluate_with = evaluate_with
if device is None:
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
else:
self.device = device
# Total timesteps
self.total_t = 0
# Step counter to keep track of learning.
self._step_counter = 0
# Build the action-value network
self._rl_agent = DQNAgent(q_replay_memory_size, q_replay_memory_init_size, \
q_update_target_estimator_every, q_discount_factor, q_epsilon_start, q_epsilon_end, \
q_epsilon_decay_steps, q_batch_size, num_actions, state_shape, q_train_every, q_mlp_layers, \
rl_learning_rate, device)
# Build the average policy supervised model
self._build_model()
self.sample_episode_policy()
def _build_model(self):
''' Build the average policy network
'''
# configure the average policy network
policy_network = AveragePolicyNetwork(self._num_actions, self._state_shape, self._layer_sizes)
policy_network = policy_network.to(self.device)
self.policy_network = policy_network
self.policy_network.eval()
# xavier init
for p in self.policy_network.parameters():
if len(p.data.shape) > 1:
nn.init.xavier_uniform_(p.data)
# configure optimizer
self.policy_network_optimizer = torch.optim.Adam(self.policy_network.parameters(), lr=self._sl_learning_rate)
def feed(self, ts):
''' Feed data to inner RL agent
Args:
ts (list): A list of 5 elements that represent the transition.
'''
self._rl_agent.feed(ts)
self.total_t += 1
if self.total_t>0 and len(self._reservoir_buffer) >= self._min_buffer_size_to_learn and self.total_t%self._train_every == 0:
sl_loss = self.train_sl()
print('\rINFO - Step {}, sl-loss: {}'.format(self.total_t, sl_loss), end='')
def step(self, state):
''' Returns the action to be taken.
Args:
state (dict): The current state
Returns:
action (int): An action id
'''
obs = state['obs']
legal_actions = list(state['legal_actions'].keys())
if self._mode == 'best_response':
action = self._rl_agent.step(state)
one_hot = np.zeros(self._num_actions)
one_hot[action] = 1
self._add_transition(obs, one_hot)
elif self._mode == 'average_policy':
probs = self._act(obs)
probs = remove_illegal(probs, legal_actions)
action = np.random.choice(len(probs), p=probs)
return action
def eval_step(self, state):
''' Use the average policy for evaluation purpose
Args:
state (dict): The current state.
Returns:
action (int): An action id.
info (dict): A dictionary containing information
'''
if self.evaluate_with == 'best_response':
action, info = self._rl_agent.eval_step(state)
elif self.evaluate_with == 'average_policy':
obs = state['obs']
legal_actions = list(state['legal_actions'].keys())
probs = self._act(obs)
probs = remove_illegal(probs, legal_actions)
action = np.random.choice(len(probs), p=probs)
info = {}
info['probs'] = {state['raw_legal_actions'][i]: float(probs[list(state['legal_actions'].keys())[i]]) for i in range(len(state['legal_actions']))}
else:
raise ValueError("'evaluate_with' should be either 'average_policy' or 'best_response'.")
return action, info
def sample_episode_policy(self):
''' Sample average/best_response policy
'''
if np.random.rand() < self._anticipatory_param:
self._mode = 'best_response'
else:
self._mode = 'average_policy'
def _act(self, info_state):
''' Predict action probability givin the observation and legal actions
Not connected to computation graph
Args:
info_state (numpy.array): An obervation.
Returns:
action_probs (numpy.array): The predicted action probability.
'''
info_state = np.expand_dims(info_state, axis=0)
info_state = torch.from_numpy(info_state).float().to(self.device)
with torch.no_grad():
log_action_probs = self.policy_network(info_state).cpu().numpy()
action_probs = np.exp(log_action_probs)[0]
return action_probs
def _add_transition(self, state, probs):
''' Adds the new transition to the reservoir buffer.
Transitions are in the form (state, probs).
Args:
state (numpy.array): The state.
probs (numpy.array): The probabilities of each action.
'''
transition = Transition(
info_state=state,
action_probs=probs)
self._reservoir_buffer.add(transition)
def train_sl(self):
''' Compute the loss on sampled transitions and perform a avg-network update.
If there are not enough elements in the buffer, no loss is computed and
`None` is returned instead.
Returns:
loss (float): The average loss obtained on this batch of transitions or `None`.
'''
if (len(self._reservoir_buffer) < self._batch_size or
len(self._reservoir_buffer) < self._min_buffer_size_to_learn):
return None
transitions = self._reservoir_buffer.sample(self._batch_size)
info_states = [t.info_state for t in transitions]
action_probs = [t.action_probs for t in transitions]
self.policy_network_optimizer.zero_grad()
self.policy_network.train()
# (batch, state_size)
info_states = torch.from_numpy(np.array(info_states)).float().to(self.device)
# (batch, num_actions)
eval_action_probs = torch.from_numpy(np.array(action_probs)).float().to(self.device)
# (batch, num_actions)
log_forecast_action_probs = self.policy_network(info_states)
ce_loss = - (eval_action_probs * log_forecast_action_probs).sum(dim=-1).mean()
ce_loss.backward()
self.policy_network_optimizer.step()
ce_loss = ce_loss.item()
self.policy_network.eval()
return ce_loss
def set_device(self, device):
self.device = device
self._rl_agent.set_device(device)
class AveragePolicyNetwork(nn.Module):
'''
Approximates the history of action probabilities
given state (average policy). Forward pass returns
log probabilities of actions.
'''
def __init__(self, num_actions=2, state_shape=None, mlp_layers=None):
''' Initialize the policy network. It's just a bunch of ReLU
layers with no activation on the final one, initialized with
Xavier (sonnet.nets.MLP and tensorflow defaults)
Args:
num_actions (int): number of output actions
state_shape (list): shape of state tensor for each sample
mlp_laters (list): output size of each mlp layer including final
'''
super(AveragePolicyNetwork, self).__init__()
self.num_actions = num_actions
self.state_shape = state_shape
self.mlp_layers = mlp_layers
# set up mlp w/ relu activations
layer_dims = [np.prod(self.state_shape)] + self.mlp_layers
mlp = [nn.Flatten()]
mlp.append(nn.BatchNorm1d(layer_dims[0]))
for i in range(len(layer_dims)-1):
mlp.append(nn.Linear(layer_dims[i], layer_dims[i+1]))
if i != len(layer_dims) - 2: # all but final have relu
mlp.append(nn.ReLU())
self.mlp = nn.Sequential(*mlp)
def forward(self, s):
''' Log action probabilities of each action from state
Args:
s (Tensor): (batch, state_shape) state tensor
Returns:
log_action_probs (Tensor): (batch, num_actions)
'''
logits = self.mlp(s)
log_action_probs = F.log_softmax(logits, dim=-1)
return log_action_probs
class ReservoirBuffer(object):
''' Allows uniform sampling over a stream of data.
This class supports the storage of arbitrary elements, such as observation
tensors, integer actions, etc.
See https://en.wikipedia.org/wiki/Reservoir_sampling for more details.
'''
def __init__(self, reservoir_buffer_capacity):
''' Initialize the buffer.
'''
self._reservoir_buffer_capacity = reservoir_buffer_capacity
self._data = []
self._add_calls = 0
def add(self, element):
''' Potentially adds `element` to the reservoir buffer.
Args:
element (object): data to be added to the reservoir buffer.
'''
if len(self._data) < self._reservoir_buffer_capacity:
self._data.append(element)
else:
idx = np.random.randint(0, self._add_calls + 1)
if idx < self._reservoir_buffer_capacity:
self._data[idx] = element
self._add_calls += 1
def sample(self, num_samples):
''' Returns `num_samples` uniformly sampled from the buffer.
Args:
num_samples (int): The number of samples to draw.
Returns:
An iterable over `num_samples` random elements of the buffer.
Raises:
ValueError: If there are less than `num_samples` elements in the buffer
'''
if len(self._data) < num_samples:
raise ValueError("{} elements could not be sampled from size {}".format(
num_samples, len(self._data)))
return random.sample(self._data, num_samples)
def clear(self):
''' Clear the buffer
'''
self._data = []
self._add_calls = 0
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data) | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/agents/nfsp_agent.py | 0.850189 | 0.429669 | nfsp_agent.py | pypi |
from rlcard.utils.utils import print_card
class HumanAgent(object):
''' A human agent for Blackjack. It can be used to play alone for understand how the blackjack code runs
'''
def __init__(self, num_actions):
''' Initilize the human agent
Args:
num_actions (int): the size of the output action space
'''
self.use_raw = True
self.num_actions = num_actions
@staticmethod
def step(state):
''' Human agent will display the state and make decisions through interfaces
Args:
state (dict): A dictionary that represents the current state
Returns:
action (int): The action decided by human
'''
_print_state(state['raw_obs'], state['raw_legal_actions'], state['action_record'])
action = int(input('>> You choose action (integer): '))
while action < 0 or action >= len(state['legal_actions']):
print('Action illegel...')
action = int(input('>> Re-choose action (integer): '))
return state['raw_legal_actions'][action]
def eval_step(self, state):
''' Predict the action given the current state for evaluation. The same to step here.
Args:
state (numpy.array): an numpy array that represents the current state
Returns:
action (int): the action predicted (randomly chosen) by the random agent
'''
return self.step(state), {}
def _print_state(state, raw_legal_actions, action_record):
''' Print out the state
Args:
state (dict): A dictionary of the raw state
action_record (list): A list of the each player's historical actions
'''
_action_list = []
for i in range(1, len(action_record)+1):
_action_list.insert(0, action_record[-i])
for pair in _action_list:
print('>> Player', pair[0], 'chooses', pair[1])
print('\n============= Dealer Hand ===============')
print_card(state['dealer hand'])
num_players = len(state) - 3
for i in range(num_players):
print('=============== Player {} Hand ==============='.format(i))
print_card(state['player' + str(i) + ' hand'])
print('\n=========== Actions You Can Choose ===========')
print(', '.join([str(index) + ': ' + action for index, action in enumerate(raw_legal_actions)]))
print('') | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/agents/human_agents/blackjack_human_agent.py | 0.68616 | 0.522019 | blackjack_human_agent.py | pypi |
from rlcard.games.uno.card import UnoCard
class HumanAgent(object):
''' A human agent for Leduc Holdem. It can be used to play against trained models
'''
def __init__(self, num_actions):
''' Initilize the human agent
Args:
num_actions (int): the size of the ouput action space
'''
self.use_raw = True
self.num_actions = num_actions
@staticmethod
def step(state):
''' Human agent will display the state and make decisions through interfaces
Args:
state (dict): A dictionary that represents the current state
Returns:
action (int): The action decided by human
'''
print(state['raw_obs'])
_print_state(state['raw_obs'], state['action_record'])
action = int(input('>> You choose action (integer): '))
while action < 0 or action >= len(state['legal_actions']):
print('Action illegel...')
action = int(input('>> Re-choose action (integer): '))
return state['raw_legal_actions'][action]
def eval_step(self, state):
''' Predict the action given the curent state for evaluation. The same to step here.
Args:
state (numpy.array): an numpy array that represents the current state
Returns:
action (int): the action predicted (randomly chosen) by the random agent
'''
return self.step(state), {}
def _print_state(state, action_record):
''' Print out the state of a given player
Args:
player (int): Player id
'''
_action_list = []
for i in range(1, len(action_record)+1):
if action_record[-i][0] == state['current_player']:
break
_action_list.insert(0, action_record[-i])
for pair in _action_list:
print('>> Player', pair[0], 'chooses ', end='')
_print_action(pair[1])
print('')
print('\n=============== Your Hand ===============')
UnoCard.print_cards(state['hand'])
print('')
print('=============== Last Card ===============')
UnoCard.print_cards(state['target'], wild_color=True)
print('')
print('========== Players Card Number ===========')
for i in range(state['num_players']):
if i != state['current_player']:
print('Player {} has {} cards.'.format(i, state['num_cards'][i]))
print('======== Actions You Can Choose =========')
for i, action in enumerate(state['legal_actions']):
print(str(i)+': ', end='')
UnoCard.print_cards(action, wild_color=True)
if i < len(state['legal_actions']) - 1:
print(', ', end='')
print('\n')
def _print_action(action):
''' Print out an action in a nice form
Args:
action (str): A string a action
'''
UnoCard.print_cards(action, wild_color=True) | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/agents/human_agents/uno_human_agent.py | 0.676834 | 0.391144 | uno_human_agent.py | pypi |
from rlcard.utils.utils import print_card
class HumanAgent(object):
''' A human agent for Limit Holdem. It can be used to play against trained models
'''
def __init__(self, num_actions):
''' Initilize the human agent
Args:
num_actions (int): the size of the ouput action space
'''
self.use_raw = True
self.num_actions = num_actions
@staticmethod
def step(state):
''' Human agent will display the state and make decisions through interfaces
Args:
state (dict): A dictionary that represents the current state
Returns:
action (int): The action decided by human
'''
_print_state(state['raw_obs'], state['action_record'])
action = int(input('>> You choose action (integer): '))
while action < 0 or action >= len(state['legal_actions']):
print('Action illegel...')
action = int(input('>> Re-choose action (integer): '))
return state['raw_legal_actions'][action]
def eval_step(self, state):
''' Predict the action given the curent state for evaluation. The same to step here.
Args:
state (numpy.array): an numpy array that represents the current state
Returns:
action (int): the action predicted (randomly chosen) by the random agent
'''
return self.step(state), {}
def _print_state(state, action_record):
''' Print out the state
Args:
state (dict): A dictionary of the raw state
action_record (list): A list of the each player's historical actions
'''
_action_list = []
for i in range(1, len(action_record)+1):
_action_list.insert(0, action_record[-i])
for pair in _action_list:
print('>> Player', pair[0], 'chooses', pair[1])
print('\n=============== Community Card ===============')
print_card(state['public_cards'])
print('=============== Your Hand ===============')
print_card(state['hand'])
print('=============== Chips ===============')
print('Yours: ', end='')
for _ in range(state['my_chips']):
print('+', end='')
print('')
for i in range(len(state['all_chips'])):
for _ in range(state['all_chips'][i]):
print('+', end='')
print('\n=========== Actions You Can Choose ===========')
print(', '.join([str(index) + ': ' + action for index, action in enumerate(state['legal_actions'])]))
print('') | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/agents/human_agents/limit_holdem_human_agent.py | 0.675551 | 0.501221 | limit_holdem_human_agent.py | pypi |
from rlcard.utils.utils import print_card
class HumanAgent(object):
''' A human agent for No Limit Holdem. It can be used to play against trained models
'''
def __init__(self, num_actions):
''' Initilize the human agent
Args:
num_actions (int): the size of the ouput action space
'''
self.use_raw = True
self.num_actions = num_actions
@staticmethod
def step(state):
''' Human agent will display the state and make decisions through interfaces
Args:
state (dict): A dictionary that represents the current state
Returns:
action (int): The action decided by human
'''
_print_state(state['raw_obs'], state['action_record'])
action = int(input('>> You choose action (integer): '))
while action < 0 or action >= len(state['legal_actions']):
print('Action illegel...')
action = int(input('>> Re-choose action (integer): '))
return state['raw_legal_actions'][action]
def eval_step(self, state):
''' Predict the action given the curent state for evaluation. The same to step here.
Args:
state (numpy.array): an numpy array that represents the current state
Returns:
action (int): the action predicted (randomly chosen) by the random agent
'''
return self.step(state), {}
def _print_state(state, action_record):
''' Print out the state
Args:
state (dict): A dictionary of the raw state
action_record (list): A list of the historical actions
'''
_action_list = []
for i in range(1, len(action_record)+1):
if action_record[-i][0] == state['current_player']:
break
_action_list.insert(0, action_record[-i])
for pair in _action_list:
print('>> Player', pair[0], 'chooses', pair[1])
print('\n=============== Community Card ===============')
print_card(state['public_cards'])
print('============= Player',state["current_player"],'- Hand =============')
print_card(state['hand'])
print('=============== Chips ===============')
print('In Pot:',state["pot"])
print('Remaining:',state["stakes"])
print('\n=========== Actions You Can Choose ===========')
print(', '.join([str(index) + ': ' + str(action) for index, action in enumerate(state['legal_actions'])]))
print('')
print(state) | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/agents/human_agents/nolimit_holdem_human_agent.py | 0.705684 | 0.431824 | nolimit_holdem_human_agent.py | pypi |
from rlcard.utils.utils import print_card
class HumanAgent(object):
''' A human agent for Leduc Holdem. It can be used to play against trained models
'''
def __init__(self, num_actions):
''' Initilize the human agent
Args:
num_actions (int): the size of the ouput action space
'''
self.use_raw = True
self.num_actions = num_actions
@staticmethod
def step(state):
''' Human agent will display the state and make decisions through interfaces
Args:
state (dict): A dictionary that represents the current state
Returns:
action (int): The action decided by human
'''
_print_state(state['raw_obs'], state['action_record'])
action = int(input('>> You choose action (integer): '))
while action < 0 or action >= len(state['legal_actions']):
print('Action illegel...')
action = int(input('>> Re-choose action (integer): '))
return state['raw_legal_actions'][action]
def eval_step(self, state):
''' Predict the action given the curent state for evaluation. The same to step here.
Args:
state (numpy.array): an numpy array that represents the current state
Returns:
action (int): the action predicted (randomly chosen) by the random agent
'''
return self.step(state), {}
def _print_state(state, action_record):
''' Print out the state
Args:
state (dict): A dictionary of the raw state
action_record (list): A list of the historical actions
'''
_action_list = []
for i in range(1, len(action_record)+1):
if action_record[-i][0] == state['current_player']:
break
_action_list.insert(0, action_record[-i])
for pair in _action_list:
print('>> Player', pair[0], 'chooses', pair[1])
print('\n=============== Community Card ===============')
print_card(state['public_card'])
print('=============== Your Hand ===============')
print_card(state['hand'])
print('=============== Chips ===============')
print('Yours: ', end='')
for _ in range(state['my_chips']):
print('+', end='')
print('')
for i in range(len(state['all_chips'])):
if i != state['current_player']:
print('Agent {}: '.format(i) , end='')
for _ in range(state['all_chips'][i]):
print('+', end='')
print('\n=========== Actions You Can Choose ===========')
print(', '.join([str(index) + ': ' + action for index, action in enumerate(state['legal_actions'])]))
print('') | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/agents/human_agents/leduc_holdem_human_agent.py | 0.603815 | 0.475423 | leduc_holdem_human_agent.py | pypi |
import time
from rlcard.games.gin_rummy.utils.action_event import ActionEvent
from rlcard.games.gin_rummy.utils.gin_rummy_error import GinRummyProgramError
class HumanAgent(object):
''' A human agent for Gin Rummy. It can be used to play against trained models.
'''
def __init__(self, num_actions):
''' Initialize the human agent
Args:
num_actions (int): the size of the output action space
'''
self.use_raw = True
self.num_actions = num_actions
self.is_choosing_action_id = False
self.chosen_action_id = None # type: int or None
self.state = None
def step(self, state):
''' Human agent will display the state and make decisions through interfaces
Args:
state (dict): A dictionary that represents the current state
Returns:
action (int): The action decided by human
'''
if self.is_choosing_action_id:
raise GinRummyProgramError("self.is_choosing_action_id must be False.")
if self.state is not None:
raise GinRummyProgramError("self.state must be None.")
if self.chosen_action_id is not None:
raise GinRummyProgramError("self.chosen_action_id={} must be None.".format(self.chosen_action_id))
self.state = state
self.is_choosing_action_id = True
while not self.chosen_action_id:
time.sleep(0.001)
if self.chosen_action_id is None:
raise GinRummyProgramError("self.chosen_action_id cannot be None.")
chosen_action_event = ActionEvent.decode_action(action_id=self.chosen_action_id)
self.state = None
self.is_choosing_action_id = False
self.chosen_action_id = None
return chosen_action_event
def eval_step(self, state):
''' Predict the action given the current state for evaluation. The same to step here.
Args:
state (numpy.array): an numpy array that represents the current state
Returns:
action (int): the action predicted (randomly chosen) by the random agent
'''
return self.step(state), {} | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/agents/human_agents/gin_rummy_human_agent/gin_rummy_human_agent.py | 0.670716 | 0.264067 | gin_rummy_human_agent.py | pypi |
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .game_canvas import GameCanvas
from typing import List
from rlcard.games.gin_rummy.game import GinRummyGame
from rlcard.games.gin_rummy.utils.action_event import DrawCardAction, PickUpDiscardAction, DeclareDeadHandAction
from rlcard.games.gin_rummy.utils.action_event import DiscardAction, KnockAction, GinAction
from rlcard.games.gin_rummy.utils.move import ScoreSouthMove
from . import configurations
from .configurations import SCORE_PLAYER_0_ACTION_ID, SCORE_PLAYER_1_ACTION_ID
from .player_type import PlayerType
class GameCanvasQuery(object):
def __init__(self, game_canvas: 'GameCanvas'):
self.game_canvas = game_canvas
def get_game(self) -> GinRummyGame:
return self.game_canvas.game_canvas_updater.env_thread.gin_rummy_env.game
def is_game_over(self) -> bool:
result = False
game = self.get_game()
mark = self.game_canvas.game_canvas_updater.mark
if game.round:
moves = game.round.move_sheet[:mark]
if moves:
last_move = moves[-1]
result = isinstance(last_move, ScoreSouthMove)
return result
def is_human(self, player_id: int or None) -> bool:
return False if player_id is None else self.game_canvas.player_types[player_id] is PlayerType.human_player
def is_dead_hand_button_visible(self):
return self.game_canvas.dead_hand_button.place_info() != {}
def is_going_out_button_visible(self):
return self.game_canvas.going_out_button.place_info() != {}
def can_draw_from_stock_pile(self, player_id: int) -> bool:
legal_actions = self.game_canvas.getter.get_legal_actions(player_id=player_id)
draw_card_actions = [x for x in legal_actions if isinstance(x, DrawCardAction)]
return len(draw_card_actions) > 0
def can_draw_from_discard_pile(self, player_id: int) -> bool:
legal_actions = self.game_canvas.getter.get_legal_actions(player_id=player_id)
pick_up_discard_actions = [x for x in legal_actions if isinstance(x, PickUpDiscardAction)]
return len(pick_up_discard_actions) > 0
def can_declare_dead_hand(self, player_id: int) -> bool:
legal_actions = self.game_canvas.getter.get_legal_actions(player_id=player_id)
declare_dead_hand_actions = [x for x in legal_actions if isinstance(x, DeclareDeadHandAction)]
return len(declare_dead_hand_actions) > 0
def can_discard_card(self, player_id: int) -> bool:
legal_actions = self.game_canvas.getter.get_legal_actions(player_id=player_id)
discard_actions = [action for action in legal_actions if isinstance(action, DiscardAction)]
return len(discard_actions) > 0
def can_knock(self, player_id: int) -> bool:
legal_actions = self.game_canvas.getter.get_legal_actions(player_id=player_id)
knock_actions = [action for action in legal_actions if isinstance(action, KnockAction)]
return len(knock_actions) > 0
def can_gin(self, player_id: int) -> bool:
legal_actions = self.game_canvas.getter.get_legal_actions(player_id=player_id)
gin_actions = [action for action in legal_actions if isinstance(action, GinAction)]
return len(gin_actions) > 0
def is_top_discard_pile_item_drawn(self) -> bool:
result = False
top_discard_pile_item_id = self.game_canvas.getter.get_top_discard_pile_item_id()
if top_discard_pile_item_id:
result = configurations.DRAWN_TAG in self.game_canvas.getter.get_tags(top_discard_pile_item_id)
return result
def is_top_stock_pile_item_drawn(self) -> bool:
result = False
top_stock_pile_item_id = self.game_canvas.getter.get_top_stock_pile_item_id()
if top_stock_pile_item_id:
result = configurations.DRAWN_TAG in self.game_canvas.getter.get_tags(top_stock_pile_item_id)
return result
def is_item_id_selected(self, item_id) -> bool:
item_tags = self.game_canvas.getter.get_tags(item_id)
return configurations.SELECTED_TAG in item_tags
@staticmethod
def is_scoring(legal_actions: List[int]) -> bool:
return SCORE_PLAYER_0_ACTION_ID in legal_actions or SCORE_PLAYER_1_ACTION_ID in legal_actions | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/agents/human_agents/gin_rummy_human_agent/gui_gin_rummy/game_canvas_query.py | 0.732209 | 0.166337 | game_canvas_query.py | pypi |
import os
import threading
import time
import timeit
import pprint
from collections import deque
import torch
from torch import multiprocessing as mp
from torch import nn
from .file_writer import FileWriter
from .model import DMCModel
from .pettingzoo_model import DMCModelPettingZoo
from .utils import (
get_batch,
create_buffers,
create_optimizers,
act,
log,
)
from .pettingzoo_utils import (
create_buffers_pettingzoo,
act_pettingzoo,
)
def compute_loss(logits, targets):
loss = ((logits - targets)**2).mean()
return loss
def learn(
position,
actor_models,
agent,
batch,
optimizer,
training_device,
max_grad_norm,
mean_episode_return_buf,
lock
):
"""Performs a learning (optimization) step."""
device = "cuda:"+str(training_device) if training_device != "cpu" else "cpu"
state = torch.flatten(batch['state'].to(device), 0, 1).float()
action = torch.flatten(batch['action'].to(device), 0, 1).float()
target = torch.flatten(batch['target'].to(device), 0, 1)
episode_returns = batch['episode_return'][batch['done']]
mean_episode_return_buf[position].append(torch.mean(episode_returns).to(device))
with lock:
values = agent.forward(state, action)
loss = compute_loss(values, target)
stats = {
'mean_episode_return_'+str(position): torch.mean(torch.stack([_r for _r in mean_episode_return_buf[position]])).item(),
'loss_'+str(position): loss.item(),
}
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(agent.parameters(), max_grad_norm)
optimizer.step()
for actor_model in actor_models.values():
actor_model.get_agent(position).load_state_dict(agent.state_dict())
return stats
class DMCTrainer:
"""
Deep Monte-Carlo
Args:
env: RLCard environment
load_model (boolean): Whether loading an existing model
xpid (string): Experiment id (default: dmc)
save_interval (int): Time interval (in minutes) at which to save the model
num_actor_devices (int): The number devices used for simulation
num_actors (int): Number of actors for each simulation device
training_device (str): The index of the GPU used for training models, or `cpu`.
savedir (string): Root dir where experiment data will be saved
total_frames (int): Total environment frames to train for
exp_epsilon (float): The prbability for exploration
batch_size (int): Learner batch size
unroll_length (int): The unroll length (time dimension)
num_buffers (int): Number of shared-memory buffers
num_threads (int): Number learner threads
max_grad_norm (int): Max norm of gradients
learning_rate (float): Learning rate
alpha (float): RMSProp smoothing constant
momentum (float): RMSProp momentum
epsilon (float): RMSProp epsilon
"""
def __init__(
self,
env,
cuda="",
is_pettingzoo_env=False,
load_model=False,
xpid='dmc',
save_interval=30,
num_actor_devices=1,
num_actors=5,
training_device="0",
savedir='experiments/dmc_result',
total_frames=100000000000,
exp_epsilon=0.01,
batch_size=32,
unroll_length=100,
num_buffers=50,
num_threads=4,
max_grad_norm=40,
learning_rate=0.0001,
alpha=0.99,
momentum=0,
epsilon=0.00001
):
self.env = env
self.plogger = FileWriter(
xpid=xpid,
rootdir=savedir,
)
self.checkpointpath = os.path.expandvars(
os.path.expanduser('%s/%s/%s' % (savedir, xpid, 'model.tar')))
self.T = unroll_length
self.B = batch_size
self.xpid = xpid
self.load_model = load_model
self.savedir = savedir
self.save_interval = save_interval
self.num_actor_devices = num_actor_devices
self.num_actors = num_actors
self.training_device = training_device
self.total_frames = total_frames
self.exp_epsilon = exp_epsilon
self.num_buffers = num_buffers
self.num_threads = num_threads
self.max_grad_norm = max_grad_norm
self.learning_rate =learning_rate
self.alpha = alpha
self.momentum = momentum
self.epsilon = epsilon
self.is_pettingzoo_env = is_pettingzoo_env
if not self.is_pettingzoo_env:
self.num_players = self.env.num_players
self.action_shape = self.env.action_shape
if self.action_shape[0] == None: # One-hot encoding
self.action_shape = [[self.env.num_actions] for _ in range(self.num_players)]
def model_func(device):
return DMCModel(
self.env.state_shape,
self.action_shape,
exp_epsilon=self.exp_epsilon,
device=str(device),
)
else:
self.num_players = self.env.num_agents
def model_func(device):
return DMCModelPettingZoo(
self.env,
exp_epsilon=self.exp_epsilon,
device=device
)
self.model_func = model_func
self.mean_episode_return_buf = [deque(maxlen=100) for _ in range(self.num_players)]
if cuda == "": # Use CPU
self.device_iterator = ['cpu']
self.training_device = "cpu"
else:
self.device_iterator = range(num_actor_devices)
def start(self):
# Initialize actor models
models = {}
for device in self.device_iterator:
model = self.model_func(device)
model.share_memory()
model.eval()
models[device] = model
# Initialize buffers
if not self.is_pettingzoo_env:
buffers = create_buffers(
self.T,
self.num_buffers,
self.env.state_shape,
self.action_shape,
self.device_iterator,
)
else:
buffers = create_buffers_pettingzoo(
self.T,
self.num_buffers,
self.env,
self.device_iterator,
)
# Initialize queues
actor_processes = []
ctx = mp.get_context('spawn')
free_queue = {}
full_queue = {}
for device in self.device_iterator:
_free_queue = [ctx.SimpleQueue() for _ in range(self.num_players)]
_full_queue = [ctx.SimpleQueue() for _ in range(self.num_players)]
free_queue[device] = _free_queue
full_queue[device] = _full_queue
# Learner model for training
learner_model = self.model_func(self.training_device)
# Create optimizers
optimizers = create_optimizers(
self.num_players,
self.learning_rate,
self.momentum,
self.epsilon,
self.alpha,
learner_model,
)
# Stat Keys
stat_keys = []
for p in range(self.num_players):
stat_keys.append('mean_episode_return_'+str(p))
stat_keys.append('loss_'+str(p))
frames, stats = 0, {k: 0 for k in stat_keys}
# Load models if any
if self.load_model and os.path.exists(self.checkpointpath):
checkpoint_states = torch.load(
self.checkpointpath,
map_location="cuda:"+str(self.training_device) if self.training_device != "cpu" else "cpu"
)
for p in range(self.num_players):
learner_model.get_agent(p).load_state_dict(checkpoint_states["model_state_dict"][p])
optimizers[p].load_state_dict(checkpoint_states["optimizer_state_dict"][p])
for device in self.device_iterator:
models[device].get_agent(p).load_state_dict(learner_model.get_agent(p).state_dict())
stats = checkpoint_states["stats"]
frames = checkpoint_states["frames"]
log.info(f"Resuming preempted job, current stats:\n{stats}")
# Starting actor processes
for device in self.device_iterator:
num_actors = self.num_actors
for i in range(self.num_actors):
actor = ctx.Process(
target=act_pettingzoo if self.is_pettingzoo_env else act,
args=(i, device, self.T, free_queue[device], full_queue[device], models[device], buffers[device], self.env))
actor.start()
actor_processes.append(actor)
def batch_and_learn(i, device, position, local_lock, position_lock, lock=threading.Lock()):
"""Thread target for the learning process."""
nonlocal frames, stats
while frames < self.total_frames:
batch = get_batch(
free_queue[device][position],
full_queue[device][position],
buffers[device][position],
self.B,
local_lock
)
_stats = learn(
position,
models,
learner_model.get_agent(position),
batch,
optimizers[position],
self.training_device,
self.max_grad_norm,
self.mean_episode_return_buf,
position_lock
)
with lock:
for k in _stats:
stats[k] = _stats[k]
to_log = dict(frames=frames)
to_log.update({k: stats[k] for k in stat_keys})
self.plogger.log(to_log)
frames += self.T * self.B
for device in self.device_iterator:
for m in range(self.num_buffers):
for p in range(self.num_players):
free_queue[device][p].put(m)
threads = []
locks = {device: [threading.Lock() for _ in range(self.num_players)] for device in self.device_iterator}
position_locks = [threading.Lock() for _ in range(self.num_players)]
for device in self.device_iterator:
for i in range(self.num_threads):
for position in range(self.num_players):
thread = threading.Thread(
target=batch_and_learn,
name='batch-and-learn-%d' % i,
args=(
i,
device,
position,
locks[device][position],
position_locks[position])
)
thread.start()
threads.append(thread)
def checkpoint(frames):
log.info('Saving checkpoint to %s', self.checkpointpath)
_agents = learner_model.get_agents()
torch.save({
'model_state_dict': [_agent.state_dict() for _agent in _agents],
'optimizer_state_dict': [optimizer.state_dict() for optimizer in optimizers],
"stats": stats,
'frames': frames,
}, self.checkpointpath)
# Save the weights for evaluation purpose
for position in range(self.num_players):
model_weights_dir = os.path.expandvars(os.path.expanduser(
'%s/%s/%s' % (self.savedir, self.xpid, str(position)+'_'+str(frames)+'.pth')))
torch.save(
learner_model.get_agent(position),
model_weights_dir
)
timer = timeit.default_timer
try:
last_checkpoint_time = timer() - self.save_interval * 60
while frames < self.total_frames:
start_frames = frames
start_time = timer()
time.sleep(5)
if timer() - last_checkpoint_time > self.save_interval * 60:
checkpoint(frames)
last_checkpoint_time = timer()
end_time = timer()
fps = (frames - start_frames) / (end_time - start_time)
log.info(
'After %i frames: @ %.1f fps Stats:\n%s',
frames,
fps,
pprint.pformat(stats),
)
except KeyboardInterrupt:
return
else:
for thread in threads:
thread.join()
log.info('Learning finished after %d frames.', frames)
checkpoint(frames)
self.plogger.close() | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/agents/dmc_agent/trainer.py | 0.818338 | 0.303758 | trainer.py | pypi |
import numpy as np
import torch
from torch import nn
class DMCNet(nn.Module):
def __init__(
self,
state_shape,
action_shape,
mlp_layers=[512,512,512,512,512]
):
super().__init__()
input_dim = np.prod(state_shape) + np.prod(action_shape)
layer_dims = [input_dim] + mlp_layers
fc = []
for i in range(len(layer_dims)-1):
fc.append(nn.Linear(layer_dims[i], layer_dims[i+1]))
fc.append(nn.ReLU())
fc.append(nn.Linear(layer_dims[-1], 1))
self.fc_layers = nn.Sequential(*fc)
def forward(self, obs, actions):
obs = torch.flatten(obs, 1)
actions = torch.flatten(actions, 1)
x = torch.cat((obs, actions), dim=1)
values = self.fc_layers(x).flatten()
return values
class DMCAgent:
def __init__(
self,
state_shape,
action_shape,
mlp_layers=[512,512,512,512,512],
exp_epsilon=0.01,
device="0",
):
self.use_raw = False
self.device = 'cuda:'+device if device != "cpu" else "cpu"
self.net = DMCNet(state_shape, action_shape, mlp_layers).to(self.device)
self.exp_epsilon = exp_epsilon
self.action_shape = action_shape
def step(self, state):
action_keys, values = self.predict(state)
if self.exp_epsilon > 0 and np.random.rand() < self.exp_epsilon:
action = np.random.choice(action_keys)
else:
action_idx = np.argmax(values)
action = action_keys[action_idx]
return action
def eval_step(self, state):
action_keys, values = self.predict(state)
action_idx = np.argmax(values)
action = action_keys[action_idx]
info = {}
info['values'] = {state['raw_legal_actions'][i]: float(values[i]) for i in range(len(action_keys))}
return action, info
def share_memory(self):
self.net.share_memory()
def eval(self):
self.net.eval()
def parameters(self):
return self.net.parameters()
def predict(self, state):
# Prepare obs and actions
obs = state['obs'].astype(np.float32)
legal_actions = state['legal_actions']
action_keys = np.array(list(legal_actions.keys()))
action_values = list(legal_actions.values())
# One-hot encoding if there is no action features
for i in range(len(action_values)):
if action_values[i] is None:
action_values[i] = np.zeros(self.action_shape[0])
action_values[i][action_keys[i]] = 1
action_values = np.array(action_values, dtype=np.float32)
obs = np.repeat(obs[np.newaxis, :], len(action_keys), axis=0)
# Predict Q values
values = self.net.forward(torch.from_numpy(obs).to(self.device),
torch.from_numpy(action_values).to(self.device))
return action_keys, values.cpu().detach().numpy()
def forward(self, obs, actions):
return self.net.forward(obs, actions)
def load_state_dict(self, state_dict):
return self.net.load_state_dict(state_dict)
def state_dict(self):
return self.net.state_dict()
def set_device(self, device):
self.device = device
class DMCModel:
def __init__(
self,
state_shape,
action_shape,
mlp_layers=[512,512,512,512,512],
exp_epsilon=0.01,
device=0
):
self.agents = []
for player_id in range(len(state_shape)):
agent = DMCAgent(
state_shape[player_id],
action_shape[player_id],
mlp_layers,
exp_epsilon,
device,
)
self.agents.append(agent)
def share_memory(self):
for agent in self.agents:
agent.share_memory()
def eval(self):
for agent in self.agents:
agent.eval()
def parameters(self, index):
return self.agents[index].parameters()
def get_agent(self, index):
return self.agents[index]
def get_agents(self):
return self.agents | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/agents/dmc_agent/model.py | 0.879903 | 0.583381 | model.py | pypi |
import numpy as np
from collections import OrderedDict
from rlcard.envs import Env
from rlcard.games.mahjong import Game
from rlcard.games.mahjong import Card
from rlcard.games.mahjong.utils import card_encoding_dict, encode_cards, pile2list
class MahjongEnv(Env):
''' Mahjong Environment
'''
def __init__(self, config):
self.name = 'mahjong'
self.game = Game()
super().__init__(config)
self.action_id = card_encoding_dict
self.de_action_id = {self.action_id[key]: key for key in self.action_id.keys()}
self.state_shape = [[6, 34, 4] for _ in range(self.num_players)]
self.action_shape = [None for _ in range(self.num_players)]
def _extract_state(self, state):
''' Encode state
Args:
state (dict): dict of original state
Returns:
numpy array: 6*5*15 array
6 : current hand
the union of the other two players' hand
the recent three actions
the union of all played cards
'''
players_pile = state['players_pile']
hand_rep = encode_cards(state['current_hand'])
piles_rep = []
for p in players_pile.keys():
piles_rep.append(encode_cards(pile2list(players_pile[p])))
piles_rep = np.array(piles_rep)
table_rep = encode_cards(state['table'])
rep = [hand_rep, table_rep]
rep.extend(piles_rep)
obs = np.array(rep)
extracted_state = {'obs': obs, 'legal_actions': self._get_legal_actions()}
extracted_state['raw_obs'] = state
extracted_state['raw_legal_actions'] = [a for a in state['action_cards']]
extracted_state['action_record'] = self.action_recorder
return extracted_state
def get_payoffs(self):
''' Get the payoffs of players. Must be implemented in the child class.
Returns:
payoffs (list): a list of payoffs for each player
'''
_, player, _ = self.game.judger.judge_game(self.game)
if player == -1:
payoffs = [0, 0, 0, 0]
else:
payoffs = [-1, -1, -1, -1]
payoffs[player] = 1
return np.array(payoffs)
def _decode_action(self, action_id):
''' Action id -> the action in the game. Must be implemented in the child class.
Args:
action_id (int): the id of the action
Returns:
action (string): the action that will be passed to the game engine.
'''
action = self.de_action_id[action_id]
if action_id < 34:
candidates = self.game.get_legal_actions(self.game.get_state(self.game.round.current_player))
for card in candidates:
if card.get_str() == action:
action = card
break
return action
def _get_legal_actions(self):
''' Get all legal actions for current state
Returns:
if type(legal_actions[0]) == Card:
print("GET:", [c.get_str() for c in legal_actions])
else:
print(legal_actions)
legal_actions (list): a list of legal actions' id
'''
legal_action_id = {}
legal_actions = self.game.get_legal_actions(self.game.get_state(self.game.round.current_player))
if legal_actions:
for action in legal_actions:
if isinstance(action, Card):
action = action.get_str()
action_id = self.action_id[action]
legal_action_id[action_id] = None
else:
print("##########################")
print("No Legal Actions")
print(self.game.judger.judge_game(self.game))
print(self.game.is_over())
print([len(p.pile) for p in self.game.players])
#print(self.game.get_state(self.game.round.current_player))
#exit()
return OrderedDict(legal_action_id) | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/envs/mahjong.py | 0.469763 | 0.230205 | mahjong.py | pypi |
import importlib
# Default Config
DEFAULT_CONFIG = {
'allow_step_back': False,
'seed': None,
}
class EnvSpec(object):
''' A specification for a particular instance of the environment.
'''
def __init__(self, env_id, entry_point=None):
''' Initilize
Args:
env_id (string): The name of the environent
entry_point (string): A string the indicates the location of the envronment class
'''
self.env_id = env_id
mod_name, class_name = entry_point.split(':')
self._entry_point = getattr(importlib.import_module(mod_name), class_name)
def make(self, config=DEFAULT_CONFIG):
''' Instantiates an instance of the environment
Returns:
env (Env): An instance of the environemnt
config (dict): A dictionary of the environment settings
'''
env = self._entry_point(config)
return env
class EnvRegistry(object):
''' Register an environment (game) by ID
'''
def __init__(self):
''' Initilize
'''
self.env_specs = {}
def register(self, env_id, entry_point):
''' Register an environment
Args:
env_id (string): The name of the environent
entry_point (string): A string the indicates the location of the envronment class
'''
if env_id in self.env_specs:
raise ValueError('Cannot re-register env_id: {}'.format(env_id))
self.env_specs[env_id] = EnvSpec(env_id, entry_point)
def make(self, env_id, config=DEFAULT_CONFIG):
''' Create and environment instance
Args:
env_id (string): The name of the environment
config (dict): A dictionary of the environment settings
'''
if env_id not in self.env_specs:
raise ValueError('Cannot find env_id: {}'.format(env_id))
return self.env_specs[env_id].make(config)
# Have a global registry
registry = EnvRegistry()
def register(env_id, entry_point):
''' Register an environment
Args:
env_id (string): The name of the environent
entry_point (string): A string the indicates the location of the envronment class
'''
return registry.register(env_id, entry_point)
def make(env_id, config={}):
''' Create and environment instance
Args:
env_id (string): The name of the environment
config (dict): A dictionary of the environment settings
env_num (int): The number of environments
'''
_config = DEFAULT_CONFIG.copy()
for key in config:
_config[key] = config[key]
return registry.make(env_id, _config) | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/envs/registration.py | 0.668664 | 0.157266 | registration.py | pypi |
import numpy as np
from collections import OrderedDict
from rlcard.envs import Env
from rlcard.games.blackjack import Game
DEFAULT_GAME_CONFIG = {
'game_num_players': 1,
'game_num_decks': 1
}
class BlackjackEnv(Env):
''' Blackjack Environment
'''
def __init__(self, config):
''' Initialize the Blackjack environment
'''
self.name = 'blackjack'
self.default_game_config = DEFAULT_GAME_CONFIG
self.game = Game()
super().__init__(config)
self.actions = ['hit', 'stand']
self.state_shape = [[2] for _ in range(self.num_players)]
self.action_shape = [None for _ in range(self.num_players)]
def _get_legal_actions(self):
''' Get all leagal actions
Returns:
encoded_action_list (list): return encoded legal action list (from str to int)
'''
encoded_action_list = []
for i in range(len(self.actions)):
encoded_action_list.append(i)
return encoded_action_list
def _extract_state(self, state):
''' Extract the state representation from state dictionary for agent
Args:
state (dict): Original state from the game
Returns:
observation (list): combine the player's score and dealer's observable score for observation
'''
cards = state['state']
my_cards = cards[0]
dealer_cards = cards[1]
my_score = get_score(my_cards)
dealer_score = get_score(dealer_cards)
obs = np.array([my_score, dealer_score])
legal_actions = OrderedDict({i: None for i in range(len(self.actions))})
extracted_state = {'obs': obs, 'legal_actions': legal_actions}
extracted_state['raw_obs'] = state
extracted_state['raw_legal_actions'] = [a for a in self.actions]
extracted_state['action_record'] = self.action_recorder
return extracted_state
def get_payoffs(self):
''' Get the payoff of a game
Returns:
payoffs (list): list of payoffs
'''
payoffs = []
for i in range(self.num_players):
if self.game.winner['player' + str(i)] == 2:
payoffs.append(1) # Dealer bust or player get higher score than dealer
elif self.game.winner['player' + str(i)] == 1:
payoffs.append(0) # Dealer and player tie
else:
payoffs.append(-1) # Player bust or Dealer get higher score than player
return np.array(payoffs)
def _decode_action(self, action_id):
''' Decode the action for applying to the game
Args:
action id (int): action id
Returns:
action (str): action for the game
'''
return self.actions[action_id]
rank2score = {"A":11, "2":2, "3":3, "4":4, "5":5, "6":6, "7":7, "8":8, "9":9, "T":10, "J":10, "Q":10, "K":10}
def get_score(hand):
score = 0
count_a = 0
for card in hand:
score += rank2score[card[1:]]
if card[1] == 'A':
count_a += 1
while score > 21 and count_a > 0:
count_a -= 1
score -= 10
return score | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/envs/blackjack.py | 0.58818 | 0.264495 | blackjack.py | pypi |
import json
import os
import numpy as np
from collections import OrderedDict
import rlcard
from rlcard.envs import Env
from rlcard.games.limitholdem import Game
DEFAULT_GAME_CONFIG = {
'game_num_players': 2,
}
class LimitholdemEnv(Env):
''' Limitholdem Environment
'''
def __init__(self, config):
''' Initialize the Limitholdem environment
'''
self.name = 'limit-holdem'
self.default_game_config = DEFAULT_GAME_CONFIG
self.game = Game()
super().__init__(config)
self.actions = ['call', 'raise', 'fold', 'check']
self.state_shape = [[72] for _ in range(self.num_players)]
self.action_shape = [None for _ in range(self.num_players)]
with open(os.path.join(rlcard.__path__[0], 'games/limitholdem/card2index.json'), 'r') as file:
self.card2index = json.load(file)
def _get_legal_actions(self):
''' Get all leagal actions
Returns:
encoded_action_list (list): return encoded legal action list (from str to int)
'''
return self.game.get_legal_actions()
def _extract_state(self, state):
''' Extract the state representation from state dictionary for agent
Note: Currently the use the hand cards and the public cards. TODO: encode the states
Args:
state (dict): Original state from the game
Returns:
observation (list): combine the player's score and dealer's observable score for observation
'''
extracted_state = {}
legal_actions = OrderedDict({self.actions.index(a): None for a in state['legal_actions']})
extracted_state['legal_actions'] = legal_actions
public_cards = state['public_cards']
hand = state['hand']
raise_nums = state['raise_nums']
cards = public_cards + hand
idx = [self.card2index[card] for card in cards]
obs = np.zeros(72)
obs[idx] = 1
for i, num in enumerate(raise_nums):
obs[52 + i * 5 + num] = 1
extracted_state['obs'] = obs
extracted_state['raw_obs'] = state
extracted_state['raw_legal_actions'] = [a for a in state['legal_actions']]
extracted_state['action_record'] = self.action_recorder
return extracted_state
def get_payoffs(self):
''' Get the payoff of a game
Returns:
payoffs (list): list of payoffs
'''
return self.game.get_payoffs()
def _decode_action(self, action_id):
''' Decode the action for applying to the game
Args:
action id (int): action id
Returns:
action (str): action for the game
'''
legal_actions = self.game.get_legal_actions()
if self.actions[action_id] not in legal_actions:
if 'check' in legal_actions:
return 'check'
else:
return 'fold'
return self.actions[action_id]
def get_perfect_information(self):
''' Get the perfect information of the current state
Returns:
(dict): A dictionary of all the perfect information of the current state
'''
state = {}
state['chips'] = [self.game.players[i].in_chips for i in range(self.num_players)]
state['public_card'] = [c.get_index() for c in self.game.public_cards] if self.game.public_cards else None
state['hand_cards'] = [[c.get_index() for c in self.game.players[i].hand] for i in range(self.num_players)]
state['current_player'] = self.game.game_pointer
state['legal_actions'] = self.game.get_legal_actions()
return state | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/envs/limitholdem.py | 0.584983 | 0.22762 | limitholdem.py | pypi |
from collections import Counter, OrderedDict
import numpy as np
from rlcard.envs import Env
class DoudizhuEnv(Env):
''' Doudizhu Environment
'''
def __init__(self, config):
from rlcard.games.doudizhu.utils import ACTION_2_ID, ID_2_ACTION
from rlcard.games.doudizhu.utils import cards2str, cards2str_with_suit
from rlcard.games.doudizhu import Game
self._cards2str = cards2str
self._cards2str_with_suit = cards2str_with_suit
self._ACTION_2_ID = ACTION_2_ID
self._ID_2_ACTION = ID_2_ACTION
self.name = 'doudizhu'
self.game = Game()
super().__init__(config)
self.state_shape = [[790], [901], [901]]
self.action_shape = [[54] for _ in range(self.num_players)]
def _extract_state(self, state):
''' Encode state
Args:
state (dict): dict of original state
'''
current_hand = _cards2array(state['current_hand'])
others_hand = _cards2array(state['others_hand'])
last_action = ''
if len(state['trace']) != 0:
if state['trace'][-1][1] == 'pass':
last_action = state['trace'][-2][1]
else:
last_action = state['trace'][-1][1]
last_action = _cards2array(last_action)
last_9_actions = _action_seq2array(_process_action_seq(state['trace']))
if state['self'] == 0: # landlord
landlord_up_played_cards = _cards2array(state['played_cards'][2])
landlord_down_played_cards = _cards2array(state['played_cards'][1])
landlord_up_num_cards_left = _get_one_hot_array(state['num_cards_left'][2], 17)
landlord_down_num_cards_left = _get_one_hot_array(state['num_cards_left'][1], 17)
obs = np.concatenate((current_hand,
others_hand,
last_action,
last_9_actions,
landlord_up_played_cards,
landlord_down_played_cards,
landlord_up_num_cards_left,
landlord_down_num_cards_left))
else:
landlord_played_cards = _cards2array(state['played_cards'][0])
for i, action in reversed(state['trace']):
if i == 0:
last_landlord_action = action
last_landlord_action = _cards2array(last_landlord_action)
landlord_num_cards_left = _get_one_hot_array(state['num_cards_left'][0], 20)
teammate_id = 3 - state['self']
teammate_played_cards = _cards2array(state['played_cards'][teammate_id])
last_teammate_action = 'pass'
for i, action in reversed(state['trace']):
if i == teammate_id:
last_teammate_action = action
last_teammate_action = _cards2array(last_teammate_action)
teammate_num_cards_left = _get_one_hot_array(state['num_cards_left'][teammate_id], 17)
obs = np.concatenate((current_hand,
others_hand,
last_action,
last_9_actions,
landlord_played_cards,
teammate_played_cards,
last_landlord_action,
last_teammate_action,
landlord_num_cards_left,
teammate_num_cards_left))
extracted_state = OrderedDict({'obs': obs, 'legal_actions': self._get_legal_actions()})
extracted_state['raw_obs'] = state
extracted_state['raw_legal_actions'] = [a for a in state['actions']]
extracted_state['action_record'] = self.action_recorder
return extracted_state
def get_payoffs(self):
''' Get the payoffs of players. Must be implemented in the child class.
Returns:
payoffs (list): a list of payoffs for each player
'''
return self.game.judger.judge_payoffs(self.game.round.landlord_id, self.game.winner_id)
def _decode_action(self, action_id):
''' Action id -> the action in the game. Must be implemented in the child class.
Args:
action_id (int): the id of the action
Returns:
action (string): the action that will be passed to the game engine.
'''
return self._ID_2_ACTION[action_id]
def _get_legal_actions(self):
''' Get all legal actions for current state
Returns:
legal_actions (list): a list of legal actions' id
'''
legal_actions = self.game.state['actions']
legal_actions = {self._ACTION_2_ID[action]: _cards2array(action) for action in legal_actions}
return legal_actions
def get_perfect_information(self):
''' Get the perfect information of the current state
Returns:
(dict): A dictionary of all the perfect information of the current state
'''
state = {}
state['hand_cards_with_suit'] = [self._cards2str_with_suit(player.current_hand) for player in self.game.players]
state['hand_cards'] = [self._cards2str(player.current_hand) for player in self.game.players]
state['trace'] = self.game.state['trace']
state['current_player'] = self.game.round.current_player
state['legal_actions'] = self.game.state['actions']
return state
def get_action_feature(self, action):
''' For some environments such as DouDizhu, we can have action features
Returns:
(numpy.array): The action features
'''
return _cards2array(self._decode_action(action))
Card2Column = {'3': 0, '4': 1, '5': 2, '6': 3, '7': 4, '8': 5, '9': 6, 'T': 7,
'J': 8, 'Q': 9, 'K': 10, 'A': 11, '2': 12}
NumOnes2Array = {0: np.array([0, 0, 0, 0]),
1: np.array([1, 0, 0, 0]),
2: np.array([1, 1, 0, 0]),
3: np.array([1, 1, 1, 0]),
4: np.array([1, 1, 1, 1])}
def _cards2array(cards):
if cards == 'pass':
return np.zeros(54, dtype=np.int8)
matrix = np.zeros([4, 13], dtype=np.int8)
jokers = np.zeros(2, dtype=np.int8)
counter = Counter(cards)
for card, num_times in counter.items():
if card == 'B':
jokers[0] = 1
elif card == 'R':
jokers[1] = 1
else:
matrix[:, Card2Column[card]] = NumOnes2Array[num_times]
return np.concatenate((matrix.flatten('F'), jokers))
def _get_one_hot_array(num_left_cards, max_num_cards):
one_hot = np.zeros(max_num_cards, dtype=np.int8)
one_hot[num_left_cards - 1] = 1
return one_hot
def _action_seq2array(action_seq_list):
action_seq_array = np.zeros((len(action_seq_list), 54), np.int8)
for row, cards in enumerate(action_seq_list):
action_seq_array[row, :] = _cards2array(cards)
action_seq_array = action_seq_array.flatten()
return action_seq_array
def _process_action_seq(sequence, length=9):
sequence = [action[1] for action in sequence[-length:]]
if len(sequence) < length:
empty_sequence = ['' for _ in range(length - len(sequence))]
empty_sequence.extend(sequence)
sequence = empty_sequence
return sequence | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/envs/doudizhu.py | 0.598547 | 0.208723 | doudizhu.py | pypi |
import json
import os
import numpy as np
from collections import OrderedDict
import rlcard
from rlcard.envs import Env
from rlcard.games.leducholdem import Game
from rlcard.utils import *
DEFAULT_GAME_CONFIG = {
'game_num_players': 2,
}
class LeducholdemEnv(Env):
''' Leduc Hold'em Environment
'''
def __init__(self, config):
''' Initialize the Limitholdem environment
'''
self.name = 'leduc-holdem'
self.default_game_config = DEFAULT_GAME_CONFIG
self.game = Game()
super().__init__(config)
self.actions = ['call', 'raise', 'fold', 'check']
self.state_shape = [[36] for _ in range(self.num_players)]
self.action_shape = [None for _ in range(self.num_players)]
with open(os.path.join(rlcard.__path__[0], 'games/leducholdem/card2index.json'), 'r') as file:
self.card2index = json.load(file)
def _get_legal_actions(self):
''' Get all leagal actions
Returns:
encoded_action_list (list): return encoded legal action list (from str to int)
'''
return self.game.get_legal_actions()
def _extract_state(self, state):
''' Extract the state representation from state dictionary for agent
Note: Currently the use the hand cards and the public cards. TODO: encode the states
Args:
state (dict): Original state from the game
Returns:
observation (list): combine the player's score and dealer's observable score for observation
'''
extracted_state = {}
legal_actions = OrderedDict({self.actions.index(a): None for a in state['legal_actions']})
extracted_state['legal_actions'] = legal_actions
public_card = state['public_card']
hand = state['hand']
obs = np.zeros(36)
obs[self.card2index[hand]] = 1
if public_card:
obs[self.card2index[public_card]+3] = 1
obs[state['my_chips']+6] = 1
obs[sum(state['all_chips'])-state['my_chips']+21] = 1
extracted_state['obs'] = obs
extracted_state['raw_obs'] = state
extracted_state['raw_legal_actions'] = [a for a in state['legal_actions']]
extracted_state['action_record'] = self.action_recorder
return extracted_state
def get_payoffs(self):
''' Get the payoff of a game
Returns:
payoffs (list): list of payoffs
'''
return self.game.get_payoffs()
def _decode_action(self, action_id):
''' Decode the action for applying to the game
Args:
action id (int): action id
Returns:
action (str): action for the game
'''
legal_actions = self.game.get_legal_actions()
if self.actions[action_id] not in legal_actions:
if 'check' in legal_actions:
return 'check'
else:
return 'fold'
return self.actions[action_id]
def get_perfect_information(self):
''' Get the perfect information of the current state
Returns:
(dict): A dictionary of all the perfect information of the current state
'''
state = {}
state['chips'] = [self.game.players[i].in_chips for i in range(self.num_players)]
state['public_card'] = self.game.public_card.get_index() if self.game.public_card else None
state['hand_cards'] = [self.game.players[i].hand.get_index() for i in range(self.num_players)]
state['current_round'] = self.game.round_counter
state['current_player'] = self.game.game_pointer
state['legal_actions'] = self.game.get_legal_actions()
return state | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/envs/leducholdem.py | 0.589362 | 0.210584 | leducholdem.py | pypi |
from rlcard.utils import *
class Env(object):
'''
The base Env class. For all the environments in RLCard,
we should base on this class and implement as many functions
as we can.
'''
def __init__(self, config):
''' Initialize the environment
Args:
config (dict): A config dictionary. All the fields are
optional. Currently, the dictionary includes:
'seed' (int) - A environment local random seed.
'allow_step_back' (boolean) - True if allowing
step_back.
There can be some game specific configurations, e.g., the
number of players in the game. These fields should start with
'game_', e.g., 'game_num_players' which specify the number of
players in the game. Since these configurations may be game-specific,
The default settings should be put in the Env class. For example,
the default game configurations for Blackjack should be in
'rlcard/envs/blackjack.py'
TODO: Support more game configurations in the future.
'''
self.allow_step_back = self.game.allow_step_back = config['allow_step_back']
self.action_recorder = []
# Game specific configurations
# Currently only support blackjack、limit-holdem、no-limit-holdem
# TODO support game configurations for all the games
supported_envs = ['blackjack', 'leduc-holdem', 'limit-holdem', 'no-limit-holdem','uno']
if self.name in supported_envs:
_game_config = self.default_game_config.copy()
for key in config:
if key in _game_config:
_game_config[key] = config[key]
self.game.configure(_game_config)
# Get the number of players/actions in this game
self.num_players = self.game.get_num_players()
self.num_actions = self.game.get_num_actions()
# A counter for the timesteps
self.timestep = 0
# Set random seed, default is None
self.seed(config['seed'])
def reset(self):
''' Start a new game
Returns:
(tuple): Tuple containing:
(numpy.array): The begining state of the game
(int): The begining player
'''
state, player_id = self.game.init_game()
self.action_recorder = []
return self._extract_state(state), player_id
def step(self, action, raw_action=False):
''' Step forward
Args:
action (int): The action taken by the current player
raw_action (boolean): True if the action is a raw action
Returns:
(tuple): Tuple containing:
(dict): The next state
(int): The ID of the next player
'''
if not raw_action:
action = self._decode_action(action)
self.timestep += 1
# Record the action for human interface
self.action_recorder.append((self.get_player_id(), action))
next_state, player_id = self.game.step(action)
return self._extract_state(next_state), player_id
def step_back(self):
''' Take one step backward.
Returns:
(tuple): Tuple containing:
(dict): The previous state
(int): The ID of the previous player
Note: Error will be raised if step back from the root node.
'''
if not self.allow_step_back:
raise Exception('Step back is off. To use step_back, please set allow_step_back=True in rlcard.make')
if not self.game.step_back():
return False
player_id = self.get_player_id()
state = self.get_state(player_id)
return state, player_id
def set_agents(self, agents):
'''
Set the agents that will interact with the environment.
This function must be called before `run`.
Args:
agents (list): List of Agent classes
'''
self.agents = agents
def run(self, is_training=False):
'''
Run a complete game, either for evaluation or training RL agent.
Args:
is_training (boolean): True if for training purpose.
Returns:
(tuple) Tuple containing:
(list): A list of trajectories generated from the environment.
(list): A list payoffs. Each entry corresponds to one player.
Note: The trajectories are 3-dimension list. The first dimension is for different players.
The second dimension is for different transitions. The third dimension is for the contents of each transiton
'''
trajectories = [[] for _ in range(self.num_players)]
state, player_id = self.reset()
# Loop to play the game
trajectories[player_id].append(state)
while not self.is_over():
# Agent plays
if not is_training:
action, _ = self.agents[player_id].eval_step(state)
else:
action = self.agents[player_id].step(state)
# Environment steps
next_state, next_player_id = self.step(action, self.agents[player_id].use_raw)
# Save action
trajectories[player_id].append(action)
# Set the state and player
state = next_state
player_id = next_player_id
# Save state.
if not self.game.is_over():
trajectories[player_id].append(state)
# Add a final state to all the players
for player_id in range(self.num_players):
state = self.get_state(player_id)
trajectories[player_id].append(state)
# Payoffs
payoffs = self.get_payoffs()
return trajectories, payoffs
def is_over(self):
''' Check whether the curent game is over
Returns:
(boolean): True if current game is over
'''
return self.game.is_over()
def get_player_id(self):
''' Get the current player id
Returns:
(int): The id of the current player
'''
return self.game.get_player_id()
def get_state(self, player_id):
''' Get the state given player id
Args:
player_id (int): The player id
Returns:
(numpy.array): The observed state of the player
'''
return self._extract_state(self.game.get_state(player_id))
def get_payoffs(self):
''' Get the payoffs of players. Must be implemented in the child class.
Returns:
(list): A list of payoffs for each player.
Note: Must be implemented in the child class.
'''
raise NotImplementedError
def get_perfect_information(self):
''' Get the perfect information of the current state
Returns:
(dict): A dictionary of all the perfect information of the current state
'''
raise NotImplementedError
def get_action_feature(self, action):
''' For some environments such as DouDizhu, we can have action features
Returns:
(numpy.array): The action features
'''
# By default we use one-hot encoding
feature = np.zeros(self.num_actions, dtype=np.int8)
feature[action] = 1
return feature
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
self.game.np_random = self.np_random
return seed
def _extract_state(self, state):
''' Extract useful information from state for RL. Must be implemented in the child class.
Args:
state (dict): The raw state
Returns:
(numpy.array): The extracted state
'''
raise NotImplementedError
def _decode_action(self, action_id):
''' Decode Action id to the action in the game.
Args:
action_id (int): The id of the action
Returns:
(string): The action that will be passed to the game engine.
Note: Must be implemented in the child class.
'''
raise NotImplementedError
def _get_legal_actions(self):
''' Get all legal actions for current state.
Returns:
(list): A list of legal actions' id.
Note: Must be implemented in the child class.
'''
raise NotImplementedError | /rlcard-uno-2.0.2.tar.gz/rlcard-uno-2.0.2/rlcard/envs/env.py | 0.650578 | 0.336726 | env.py | pypi |
# RLCard: A Toolkit for Reinforcement Learning in Card Games
<img width="500" src="https://dczha.com/files/rlcard/logo.jpg" alt="Logo" />
[](https://github.com/datamllab/rlcard/actions/workflows/python-package.yml)
[](https://badge.fury.io/py/rlcard)
[](https://coveralls.io/github/datamllab/rlcard?branch=master)
[](https://pepy.tech/project/rlcard)
[](https://pepy.tech/project/rlcard)
[](https://opensource.org/licenses/MIT)
[中文文档](README.zh-CN.md)
RLCard is a toolkit for Reinforcement Learning (RL) in card games. It supports multiple card environments with easy-to-use interfaces for implementing various reinforcement learning and searching algorithms. The goal of RLCard is to bridge reinforcement learning and imperfect information games. RLCard is developed by [DATA Lab](http://faculty.cs.tamu.edu/xiahu/) at Rice and Texas A&M University, and community contributors.
* Official Website: [https://www.rlcard.org](https://www.rlcard.org)
* Tutorial in Jupyter Notebook: [https://github.com/datamllab/rlcard-tutorial](https://github.com/datamllab/rlcard-tutorial)
* Paper: [https://arxiv.org/abs/1910.04376](https://arxiv.org/abs/1910.04376)
* Video: [YouTube](https://youtu.be/krK2jmSdKZc)
* GUI: [RLCard-Showdown](https://github.com/datamllab/rlcard-showdown)
* Dou Dizhu Demo: [Demo](https://douzero.org/)
* Resources: [Awesome-Game-AI](https://github.com/datamllab/awesome-game-ai)
* Related Project: [DouZero Project](https://github.com/kwai/DouZero)
* Zhihu: https://zhuanlan.zhihu.com/p/526723604
* Miscellaneous Resources: Have you heard of data-centric AI? Please check out our [data-centric AI survey](https://arxiv.org/abs/2303.10158) and [awesome data-centric AI resources](https://github.com/daochenzha/data-centric-AI)!
**Community:**
* **Slack**: Discuss in our [#rlcard-project](https://join.slack.com/t/rlcard/shared_invite/zt-rkvktsaq-xkMwz8BfKupCM6zGhO01xg) slack channel.
* **QQ Group**: Join our QQ group to discuss. Password: rlcardqqgroup
* Group 1: 665647450
* Group 2: 117349516
**News:**
* We have updated the tutorials in Jupyter Notebook to help you walk through RLCard! Please check [RLCard Tutorial](https://github.com/datamllab/rlcard-tutorial).
* All the algorithms can suppport [PettingZoo](https://github.com/PettingZoo-Team/PettingZoo) now. Please check [here](examples/pettingzoo). Thanks the contribtuion from [Yifei Cheng](https://github.com/ycheng517).
* Please follow [DouZero](https://github.com/kwai/DouZero), a strong Dou Dizhu AI and the [ICML 2021 paper](https://arxiv.org/abs/2106.06135). An online demo is available [here](https://douzero.org/). The algorithm is also integrated in RLCard. See [Training DMC on Dou Dizhu](docs/toy-examples.md#training-dmc-on-dou-dizhu).
* Our package is used in [PettingZoo](https://github.com/PettingZoo-Team/PettingZoo). Please check it out!
* We have released RLCard-Showdown, GUI demo for RLCard. Please check out [here](https://github.com/datamllab/rlcard-showdown)!
* Jupyter Notebook tutorial available! We add some examples in R to call Python interfaces of RLCard with reticulate. See [here](docs/toy-examples-r.md)
* Thanks for the contribution of [@Clarit7](https://github.com/Clarit7) for supporting different number of players in Blackjack. We call for contributions for gradually making the games more configurable. See [here](CONTRIBUTING.md#making-configurable-environments) for more details.
* Thanks for the contribution of [@Clarit7](https://github.com/Clarit7) for the Blackjack and Limit Hold'em human interface.
* Now RLCard supports environment local seeding and multiprocessing. Thanks for the testing scripts provided by [@weepingwillowben](https://github.com/weepingwillowben).
* Human interface of NoLimit Holdem available. The action space of NoLimit Holdem has been abstracted. Thanks for the contribution of [@AdrianP-](https://github.com/AdrianP-).
* New game Gin Rummy and human GUI available. Thanks for the contribution of [@billh0420](https://github.com/billh0420).
* PyTorch implementation available. Thanks for the contribution of [@mjudell](https://github.com/mjudell).
## Contributors
The following games are mainly developed and maintained by community contributors. Thank you!
* Gin Rummy: [@billh0420](https://github.com/billh0420)
* Bridge: [@billh0420](https://github.com/billh0420)
Thank all the contributors!
<a href="https://github.com/daochenzha"><img src="https://github.com/daochenzha.png" width="40px" alt="daochenzha" /></a>
<a href="https://github.com/hsywhu"><img src="https://github.com/hsywhu.png" width="40px" alt="hsywhu" /></a>
<a href="https://github.com/CaoYuanpu"><img src="https://github.com/CaoYuanpu.png" width="40px" alt="CaoYuanpu" /></a>
<a href="https://github.com/billh0420"><img src="https://github.com/billh0420.png" width="40px" alt="billh0420" /></a>
<a href="https://github.com/ruzhwei"><img src="https://github.com/ruzhwei.png" width="40px" alt="ruzhwei" /></a>
<a href="https://github.com/adrianpgob"><img src="https://github.com/adrianpgob.png" width="40px" alt="adrianpgob" /></a>
<a href="https://github.com/Zhigal"><img src="https://github.com/Zhigal.png" width="40px" alt="Zhigal" /></a>
<a href="https://github.com/aypee19"><img src="https://github.com/aypee19.png" width="40px" alt="aypee19" /></a>
<a href="https://github.com/Clarit7"><img src="https://github.com/Clarit7.png" width="40px" alt="Clarit7" /></a>
<a href="https://github.com/lhenry15"><img src="https://github.com/lhenry15.png" width="40px" alt="lhenry15" /></a>
<a href="https://github.com/ismael-elatifi"><img src="https://github.com/ismael-elatifi.png" width="40px" alt="ismael-elatifi" /></a>
<a href="https://github.com/mjudell"><img src="https://github.com/mjudell.png" width="40px" alt="mjudell" /></a>
<a href="https://github.com/jkterry1"><img src="https://github.com/jkterry1.png" width="40px" alt="jkterry1" /></a>
<a href="https://github.com/kaanozdogru"><img src="https://github.com/kaanozdogru.png" width="40px" alt="kaanozdogru" /></a>
<a href="https://github.com/junyuGuo"><img src="https://github.com/junyuGuo.png" width="40px" alt="junyuGuo" /></a>
<br />
<a href="https://github.com/Xixo99"><img src="https://github.com/Xixo99.png" width="40px" alt="Xixo99" /></a>
<a href="https://github.com/rodrigodelazcano"><img src="https://github.com/rodrigodelazcano.png" width="40px" alt="rodrigodelazcano" /></a>
<a href="https://github.com/Michael1015198808"><img src="https://github.com/Michael1015198808.png" width="40px" alt="Michael1015198808" /></a>
<a href="https://github.com/mia1996"><img src="https://github.com/mia1996.png" width="40px" alt="mia1996" /></a>
<a href="https://github.com/kaiks"><img src="https://github.com/kaiks.png" width="40px" alt="kaiks" /></a>
<a href="https://github.com/claude9493"><img src="https://github.com/claude9493.png" width="40px" alt="claude9493" /></a>
<a href="https://github.com/SonSang"><img src="https://github.com/SonSang.png" width="40px" alt="SonSang" /></a>
<a href="https://github.com/rishabhvarshney14"><img src="https://github.com/rishabhvarshney14.png" width="40px" alt="rishabhvarshney14" /></a>
<a href="https://github.com/aetheryang"><img src="https://github.com/aetheryang.png" width="40px" alt="aetheryang" /></a>
<a href="https://github.com/rxng8"><img src="https://github.com/rxng8.png" width="40px" alt="rxng8" /></a>
<a href="https://github.com/nondecidibile"><img src="https://github.com/nondecidibile.png" width="40px" alt="nondecidibile" /></a>
<a href="https://github.com/benblack769"><img src="https://github.com/benblack769.png" width="40px" alt="benblack769" /></a>
<a href="https://github.com/zhengsx"><img src="https://github.com/zhengsx.png" width="40px" alt="zhengsx" /></a>
<a href="https://github.com/andrewnc"><img src="https://github.com/andrewnc.png" width="40px" alt="andrewnc" /></a>
## Cite this work
If you find this repo useful, you may cite:
Zha, Daochen, et al. "RLCard: A Platform for Reinforcement Learning in Card Games." IJCAI. 2020.
```bibtex
@inproceedings{zha2020rlcard,
title={RLCard: A Platform for Reinforcement Learning in Card Games},
author={Zha, Daochen and Lai, Kwei-Herng and Huang, Songyi and Cao, Yuanpu and Reddy, Keerthana and Vargas, Juan and Nguyen, Alex and Wei, Ruzhe and Guo, Junyu and Hu, Xia},
booktitle={IJCAI},
year={2020}
}
```
## Installation
Make sure that you have **Python 3.6+** and **pip** installed. We recommend installing the stable version of `rlcard` with `pip`:
```
pip3 install rlcard
```
The default installation will only include the card environments. To use PyTorch implementation of the training algorithms, run
```
pip3 install rlcard[torch]
```
If you are in China and the above command is too slow, you can use the mirror provided by Tsinghua University:
```
pip3 install rlcard -i https://pypi.tuna.tsinghua.edu.cn/simple
```
Alternatively, you can clone the latest version with (if you are in China and Github is slow, you can use the mirror in [Gitee](https://gitee.com/daochenzha/rlcard)):
```
git clone https://github.com/datamllab/rlcard.git
```
or only clone one branch to make it faster:
```
git clone -b master --single-branch --depth=1 https://github.com/datamllab/rlcard.git
```
Then install with
```
cd rlcard
pip3 install -e .
pip3 install -e .[torch]
```
We also provide [**conda** installation method](https://anaconda.org/toubun/rlcard):
```
conda install -c toubun rlcard
```
Conda installation only provides the card environments, you need to manually install Pytorch on your demands.
## Examples
A **short example** is as below.
```python
import rlcard
from rlcard.agents import RandomAgent
env = rlcard.make('blackjack')
env.set_agents([RandomAgent(num_actions=env.num_actions)])
print(env.num_actions) # 2
print(env.num_players) # 1
print(env.state_shape) # [[2]]
print(env.action_shape) # [None]
trajectories, payoffs = env.run()
```
RLCard can be flexibly connected to various algorithms. See the following examples:
* [Playing with random agents](docs/toy-examples.md#playing-with-random-agents)
* [Deep-Q learning on Blackjack](docs/toy-examples.md#deep-q-learning-on-blackjack)
* [Training CFR (chance sampling) on Leduc Hold'em](docs/toy-examples.md#training-cfr-on-leduc-holdem)
* [Having fun with pretrained Leduc model](docs/toy-examples.md#having-fun-with-pretrained-leduc-model)
* [Training DMC on Dou Dizhu](docs/toy-examples.md#training-dmc-on-dou-dizhu)
* [Evaluating Agents](docs/toy-examples.md#evaluating-agents)
* [Training Agents on PettingZoo](examples/pettingzoo)
## Demo
Run `examples/human/leduc_holdem_human.py` to play with the pre-trained Leduc Hold'em model. Leduc Hold'em is a simplified version of Texas Hold'em. Rules can be found [here](docs/games.md#leduc-holdem).
```
>> Leduc Hold'em pre-trained model
>> Start a new game!
>> Agent 1 chooses raise
=============== Community Card ===============
┌─────────┐
│░░░░░░░░░│
│░░░░░░░░░│
│░░░░░░░░░│
│░░░░░░░░░│
│░░░░░░░░░│
│░░░░░░░░░│
│░░░░░░░░░│
└─────────┘
=============== Your Hand ===============
┌─────────┐
│J │
│ │
│ │
│ ♥ │
│ │
│ │
│ J│
└─────────┘
=============== Chips ===============
Yours: +
Agent 1: +++
=========== Actions You Can Choose ===========
0: call, 1: raise, 2: fold
>> You choose action (integer):
```
We also provide a GUI for easy debugging. Please check [here](https://github.com/datamllab/rlcard-showdown/). Some demos:


## Available Environments
We provide a complexity estimation for the games on several aspects. **InfoSet Number:** the number of information sets; **InfoSet Size:** the average number of states in a single information set; **Action Size:** the size of the action space. **Name:** the name that should be passed to `rlcard.make` to create the game environment. We also provide the link to the documentation and the random example.
| Game | InfoSet Number | InfoSet Size | Action Size | Name | Usage |
| :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------: | :---------------: | :---------: | :-------------: | :-----------------------------------------------------------------------------------------: |
| Blackjack ([wiki](https://en.wikipedia.org/wiki/Blackjack), [baike](https://baike.baidu.com/item/21%E7%82%B9/5481683?fr=aladdin)) | 10^3 | 10^1 | 10^0 | blackjack | [doc](docs/games.md#blackjack), [example](examples/run_random.py) |
| Leduc Hold’em ([paper](http://poker.cs.ualberta.ca/publications/UAI05.pdf)) | 10^2 | 10^2 | 10^0 | leduc-holdem | [doc](docs/games.md#leduc-holdem), [example](examples/run_random.py) |
| Limit Texas Hold'em ([wiki](https://en.wikipedia.org/wiki/Texas_hold_%27em), [baike](https://baike.baidu.com/item/%E5%BE%B7%E5%85%8B%E8%90%A8%E6%96%AF%E6%89%91%E5%85%8B/83440?fr=aladdin)) | 10^14 | 10^3 | 10^0 | limit-holdem | [doc](docs/games.md#limit-texas-holdem), [example](examples/run_random.py) |
| Dou Dizhu ([wiki](https://en.wikipedia.org/wiki/Dou_dizhu), [baike](https://baike.baidu.com/item/%E6%96%97%E5%9C%B0%E4%B8%BB/177997?fr=aladdin)) | 10^53 ~ 10^83 | 10^23 | 10^4 | doudizhu | [doc](docs/games.md#dou-dizhu), [example](examples/run_random.py) |
| Mahjong ([wiki](https://en.wikipedia.org/wiki/Competition_Mahjong_scoring_rules), [baike](https://baike.baidu.com/item/%E9%BA%BB%E5%B0%86/215)) | 10^121 | 10^48 | 10^2 | mahjong | [doc](docs/games.md#mahjong), [example](examples/run_random.py) |
| No-limit Texas Hold'em ([wiki](https://en.wikipedia.org/wiki/Texas_hold_%27em), [baike](https://baike.baidu.com/item/%E5%BE%B7%E5%85%8B%E8%90%A8%E6%96%AF%E6%89%91%E5%85%8B/83440?fr=aladdin)) | 10^162 | 10^3 | 10^4 | no-limit-holdem | [doc](docs/games.md#no-limit-texas-holdem), [example](examples/run_random.py) |
| UNO ([wiki](https://en.wikipedia.org/wiki/Uno_\(card_game\)), [baike](https://baike.baidu.com/item/UNO%E7%89%8C/2249587)) | 10^163 | 10^10 | 10^1 | uno | [doc](docs/games.md#uno), [example](examples/run_random.py) |
| Gin Rummy ([wiki](https://en.wikipedia.org/wiki/Gin_rummy), [baike](https://baike.baidu.com/item/%E9%87%91%E6%8B%89%E7%B1%B3/3471710)) | 10^52 | - | - | gin-rummy | [doc](docs/games.md#gin-rummy), [example](examples/run_random.py) |
| Bridge ([wiki](https://en.wikipedia.org/wiki/Bridge), [baike](https://baike.baidu.com/item/%E6%A1%A5%E7%89%8C/332030)) | | - | - | bridge | [doc](docs/games.md#bridge), [example](examples/run_random.py) |
## Supported Algorithms
| Algorithm | example | reference |
| :--------------------------------------: | :-----------------------------------------: | :------------------------------------------------------------------------------------------------------: |
| Deep Monte-Carlo (DMC) | [examples/run\_dmc.py](examples/run_dmc.py) | [[paper]](https://arxiv.org/abs/2106.06135) |
| Deep Q-Learning (DQN) | [examples/run\_rl.py](examples/run_rl.py) | [[paper]](https://arxiv.org/abs/1312.5602) |
| Neural Fictitious Self-Play (NFSP) | [examples/run\_rl.py](examples/run_rl.py) | [[paper]](https://arxiv.org/abs/1603.01121) |
| Counterfactual Regret Minimization (CFR) | [examples/run\_cfr.py](examples/run_cfr.py) | [[paper]](http://papers.nips.cc/paper/3306-regret-minimization-in-games-with-incomplete-information.pdf) |
## Pre-trained and Rule-based Models
We provide a [model zoo](rlcard/models) to serve as the baselines.
| Model | Explanation |
| :--------------------------------------: | :------------------------------------------------------: |
| leduc-holdem-cfr | Pre-trained CFR (chance sampling) model on Leduc Hold'em |
| leduc-holdem-rule-v1 | Rule-based model for Leduc Hold'em, v1 |
| leduc-holdem-rule-v2 | Rule-based model for Leduc Hold'em, v2 |
| uno-rule-v1 | Rule-based model for UNO, v1 |
| limit-holdem-rule-v1 | Rule-based model for Limit Texas Hold'em, v1 |
| doudizhu-rule-v1 | Rule-based model for Dou Dizhu, v1 |
| gin-rummy-novice-rule | Gin Rummy novice rule model |
## API Cheat Sheet
### How to create an environment
You can use the the following interface to make an environment. You may optionally specify some configurations with a dictionary.
* **env = rlcard.make(env_id, config={})**: Make an environment. `env_id` is a string of a environment; `config` is a dictionary that specifies some environment configurations, which are as follows.
* `seed`: Default `None`. Set a environment local random seed for reproducing the results.
* `allow_step_back`: Default `False`. `True` if allowing `step_back` function to traverse backward in the tree.
* Game specific configurations: These fields start with `game_`. Currently, we only support `game_num_players` in Blackjack, .
Once the environemnt is made, we can access some information of the game.
* **env.num_actions**: The number of actions.
* **env.num_players**: The number of players.
* **env.state_shape**: The shape of the state space of the observations.
* **env.action_shape**: The shape of the action features (Dou Dizhu's action can encoded as features)
### What is state in RLCard
State is a Python dictionary. It consists of observation `state['obs']`, legal actions `state['legal_actions']`, raw observation `state['raw_obs']` and raw legal actions `state['raw_legal_actions']`.
### Basic interfaces
The following interfaces provide a basic usage. It is easy to use but it has assumtions on the agent. The agent must follow [agent template](docs/developping-algorithms.md).
* **env.set_agents(agents)**: `agents` is a list of `Agent` object. The length of the list should be equal to the number of the players in the game.
* **env.run(is_training=False)**: Run a complete game and return trajectories and payoffs. The function can be used after the `set_agents` is called. If `is_training` is `True`, it will use `step` function in the agent to play the game. If `is_training` is `False`, `eval_step` will be called instead.
### Advanced interfaces
For advanced usage, the following interfaces allow flexible operations on the game tree. These interfaces do not make any assumtions on the agent.
* **env.reset()**: Initialize a game. Return the state and the first player ID.
* **env.step(action, raw_action=False)**: Take one step in the environment. `action` can be raw action or integer; `raw_action` should be `True` if the action is raw action (string).
* **env.step_back()**: Available only when `allow_step_back` is `True`. Take one step backward. This can be used for algorithms that operate on the game tree, such as CFR (chance sampling).
* **env.is_over()**: Return `True` if the current game is over. Otherewise, return `False`.
* **env.get_player_id()**: Return the Player ID of the current player.
* **env.get_state(player_id)**: Return the state that corresponds to `player_id`.
* **env.get_payoffs()**: In the end of the game, return a list of payoffs for all the players.
* **env.get_perfect_information()**: (Currently only support some of the games) Obtain the perfect information at the current state.
## Library Structure
The purposes of the main modules are listed as below:
* [/examples](examples): Examples of using RLCard.
* [/docs](docs): Documentation of RLCard.
* [/tests](tests): Testing scripts for RLCard.
* [/rlcard/agents](rlcard/agents): Reinforcement learning algorithms and human agents.
* [/rlcard/envs](rlcard/envs): Environment wrappers (state representation, action encoding etc.)
* [/rlcard/games](rlcard/games): Various game engines.
* [/rlcard/models](rlcard/models): Model zoo including pre-trained models and rule models.
## More Documents
For more documentation, please refer to the [Documents](docs/README.md) for general introductions. API documents are available at our [website](http://www.rlcard.org).
## Contributing
Contribution to this project is greatly appreciated! Please create an issue for feedbacks/bugs. If you want to contribute codes, please refer to [Contributing Guide](./CONTRIBUTING.md). If you have any questions, please contact [Daochen Zha](https://github.com/daochenzha) with [daochen.zha@rice.edu](mailto:daochen.zha@rice.edu).
## Acknowledgements
We would like to thank JJ World Network Technology Co.,LTD for the generous support and all the contributions from the community contributors.
| /rlcard-1.2.0.tar.gz/rlcard-1.2.0/README.md | 0.530966 | 0.918261 | README.md | pypi |
import requests
class RLClient:
def __init__(self, name, password, hostname="10.216.3.238"):
self.TEAM_NAME = name
self.TEAM_PASSWORD = password
self.SERVER = "http://" + hostname + ":80/rl"
@staticmethod
def validate_ids(run_id, request_number):
if run_id < 0:
raise ValueError("run_id has an invalid value of {}".format(run_id))
if request_number < 0 or request_number > 9999:
raise ValueError("request_number has an invalid value of {}".format(request_number))
def get_context(self, run_id, request_number):
self.validate_ids(run_id, request_number)
params = {
"team_id": self.TEAM_NAME,
"team_password": self.TEAM_PASSWORD,
"run_id": run_id,
"request_number": request_number
}
r = requests.get(self.SERVER + "/get_context", params=params)
if not r.status_code == 200:
print(r.text)
raise Exception("Something went wrong, see message above.")
else:
return r.json()
def serve_page(self, run_id, request_number, header, language, adtype, color, price):
self.validate_ids(run_id, request_number)
data = {
"team_id": self.TEAM_NAME,
"team_password": self.TEAM_PASSWORD,
"run_id": run_id,
"request_number": request_number,
"header": header,
"language": language,
"adtype": adtype,
"color": color,
"price": price
}
r = requests.post(self.SERVER + "/serve_page", data=data)
if not r.status_code == 200:
print(r.text)
raise Exception("Something went wrong, see message above.")
else:
return r.json()
def reset_leaderboard(self):
data = {
"team_id": self.TEAM_NAME,
"team_password": self.TEAM_PASSWORD
}
r = requests.post(self.SERVER + "/reset_leaderboard", data=data)
if not r.status_code == 200:
print(r.text)
raise Exception("Something went wrong, see message above.")
else:
return r.json() | /rlclientbdr-1.11.tar.gz/rlclientbdr-1.11/rlclient/client.py | 0.47926 | 0.155335 | client.py | pypi |
import numpy as np
import networkx as nx
def _rho(D, target_fraction=0.02, mode='gaussian'):
"""Calculates the RL rho values from a distance matrix"""
dcut = np.sort(D)[:,1 + int(len(D) * target_fraction)].mean()
if mode == 'classic':
r = np.array([len(np.where(d < dcut)[0]) for d in D])
elif mode == 'gaussian':
r = np.exp(-D ** 2/(dcut**2)).sum(axis=0)
else:
raise ValueError('Error: unknown density mode {}'.format(mode))
return r
def _delta(D, rho):
"""Calculated the RL delta values for points in a distribution"""
m = D.max() + 1.0
dm = D + np.identity(len(D)) * m
idel = [np.where(rho <= rho[i], m, dm[i]).argmin() for i in range(len(rho))]
d = [D[i, idel[i]] for i in range(len(idel))]
idel[rho.argmax()] = rho.argmax()
d[rho.argmax()] = max(d) + 1.0
return np.array(d), np.array(idel)
def _centres(d, sigma=10.0):
"""Find the centres in an RL clustering"""
dmean = d.mean()
dstdev = d.std()
dthresh = dmean + dstdev * sigma
centres = []
for i in range(len(d)):
if d[i] > dthresh:
centres.append(i)
return np.array(centres), dthresh
class RLClusterResult(object):
"""The result from an RL clustering"""
def __init__(self):
self.assignments = None
self.rhos = None
self.deltas = None
self.centres = None
self.threshold = None
def cluster(d, target_fraction=0.02, sigma=5.0, mode='gaussian', rho=None):
"""Do Rodriguez-Laio clustering on a square-form distance matrix"""
D = np.array(d)
if len(D.shape) != 2:
raise ValueError('Error - the input is not a 2D matrix')
if D.shape[0] != D.shape[1]:
raise ValueError('Error - the input distance matrix is not square')
if target_fraction < 0 or target_fraction > 1.0:
raise ValueError('Error: target fraction must be between 0.0 and 1.0')
if not mode in ['gaussian', 'classic']:
raise ValueError('Error: unknown density mode {}'.format(mode))
if rho is None:
r = _rho(D, target_fraction=target_fraction, mode=mode)
else:
r = np.array(rho)
if len(r.shape) != 1:
raise ValueError('Error - rho must be a 1D vector')
if len(r) != D.shape[0]:
raise ValueError('Error - rho must be a vetor of length {}'.format(D.shape[0]))
d, id = _delta(D, r)
o, dthresh = _centres(d, sigma=sigma)
dg = nx.DiGraph()
for i in range(len(id)):
if not i in o:
dg.add_edge(i, id[i])
wcc = sorted(nx.weakly_connected_components(dg), key=len, reverse=True)
cids = -np.ones(len(d), dtype=int)
id = 0
for wc in wcc:
cids[list(wc)] = id
id += 1
n_clusters = id
result = RLClusterResult()
result.assignments = cids
result.centres = np.zeros(n_clusters, dtype=int)
for i, j in enumerate(result.assignments[o]):
if j > -1:
result.centres[j] = o[i]
result.rhos = np.array(r)
result.deltas = np.array(d)
result.threshold = dthresh
return result
def decision_graph(result, axes):
"""Plot an RL decision graph on a set of matplotlib axes"""
try:
from matplotlib import pyplot as plt
except ImportError:
raise RuntimeError('Error: RLdecisionGraph() requires matplotlib')
for id in np.unique(result.assignments):
axes.plot(result.rhos[result.assignments==id], result.deltas[result.assignments==id], 'o')
axes.plot([result.rhos.min(), result.rhos.max()], [result.threshold, result.threshold], '--') | /rlcluster-0.0.5.tar.gz/rlcluster-0.0.5/rlcluster.py | 0.528533 | 0.635873 | rlcluster.py | pypi |
`rlda`: Robust Latent Dirichlet Allocation models
-------------------------
This python module provides a set of functions to fit multiple LDA models to a
text corpus and then search for the robust topics present in multiple models.
In natural language processing LDA models are used to classify text into topics. However, the substance of
topics often varies depending on model specification (e.g. number of *k* topics), making them
quite unstable (see Chuang_ 2015). This `python` module implements a method
proposed by Wilkerson and Casas (2017) to add a level of robustness when using
unsupervised topic models. You can find the replication material for the Wilkerson and Casas (2017) paper in this_ GitHub repository.
Please cite as:
Wilkerson, John and Andreu Casas. 2017. "Large-scale Computerized Text
Analysis in Political Science: Opportunities and Challenges." *Annual Review
of Political Science*, 20:x-x. (Forthcoming)
Installation
-------------------------
``pip install rlda``
Example: studying the topic of one-minute floor speeches
--------------------------------------------------------
>>> import rlda
>>> import random
Loading all one-minute floor speeches from House representatives of the 113th Congress (n = 9,704). This dataset already comes with the module
>>> sample_data = rlda.speeches_data
Each observation or speech is a `dictionary` with the following keys: bioguide_ide, speech, date, party, id, captiolwords_url.
.. image:: images/observation_example.png
:height: 100px
:width: 200 px
:scale: 50 %
:alt: alternate text
:align: center
Create a list conatining only the speeches. Using only a sample of 1,000 random speeches for this example so that it runs faster.
>>> speeches = [d['speech'] for d in sample_data]
>>> random.seed(1)
>>> rand_vector = random.sample(xrange(len(speeches)), 1000)
>>> sample = speeches[:100]
Create an object of class RLDA so that you can implement all functions in this module
>>> robust_model = rlda.RLDA()
Pre-process the sample of speeches. These are the default settings, but you can choose your pre-processing parameters:
- Parsing speeches into words (features)
- Removing punctuation
- Removing stopwords (the default list, <stopw>, is the english stopwords list from the `nltk` module)
- Removing words shorter than 3 characters
- Stemming remaining words (Porter Stemmer)
>>> clean_speeches = rlda.pre_processing(sample, remove_punct = True,
remove_stopwords = True, stopwords_list = stopw,
remove_words_shorter_than = 3, steming = True)
Construct a Term Document Matrix (TDM) from the speeches text
>>> robust_model.get_tdm(clean_speeches)
Specify in a list the number of topics (k) of the LDA models you want to estimate. For example, 3 LDA models, one with 45 topics, one with 50, and one with 55
>>> k_list = [45, 50, 55]
Specify the number of iterations when estimating the LDA models (e.g. 300)
>>> n_iter = 300
Fit the multiple LDA models
>>> robust_model.fit_models(k_list = k_list, n_iter = n_iter)
Get the feature-topic-probabilty vectors for each topic, and also the top(e.g. 50) keywords for each topic
>>> robust_model.get_all_ftp(features_top_n = 50)
You can explore now the top keywords of a topic in the console by using this funciton and specifying the topic label: "k-t" where k = the number of topics of that model, and t = the topic number. For example, "45-1" is the first topic of the topic-model with 45 topics...
>>> robust_model.show_top_kws('45-1')
.. image:: images/topic_kws_example.png
:height: 100px
:width: 200 px
:scale: 50 %
:alt: alternate text
:align: center
... or you can also save the top keywords for each model's topics in a separate "csv" file and explore them in Excel
>>> robust_model.save_top_kws()
.. image:: images/tm_45_1_example.png
:height: 100px
:width: 200 px
:scale: 50 %
:alt: alternate text
:align: center
Save the classifications (topic with the highest probability) made by each lda model. Run this function to create a directory named "classifications" that will have as many "csv" files as topic-models you run. The "csv" files will have 2 variables: "top_topic" (the topic of each document), and "text" (the text of the document).
>>> robust_model.save_models_classificiations()
Clustering topics to get more robust meta-topics
--------------------------------------------------------
Create a cosine similarity matrix. Dimensions = TxT, where T = (number topics from all topic models). In this example the dimensions of the cosine matrix will be 150x150
>>> robust_model.get_cosine_matrix()
Clustering the topics into N clusters, e.g. 50 clusters, using Spectral_ Clustering.
>>> clusters = robust_model.cluster_topics(clusters_n = 50)
... still editing! To be continued...
.. _Chuang: http://www.aclweb.org/anthology/N15-1018
.. _Spectral: http://scikit-learn.org/stable/modules/generated/sklearn.cluster.SpectralClustering.html
.. _this: https://github.com/CasAndreu/wilkerson_casas_2016_TAD
| /rlda-0.61.tar.gz/rlda-0.61/README.rst | 0.910051 | 0.759894 | README.rst | pypi |
# coding=utf-8
"""TFDS episode writer."""
from typing import Optional
from absl import logging
from rlds import rlds_types
import tensorflow_datasets as tfds
DatasetConfig = tfds.rlds.rlds_base.DatasetConfig
class EpisodeWriter():
"""Class that writes trajectory data in TFDS format (and RLDS structure)."""
def __init__(self,
data_directory: str,
ds_config: DatasetConfig,
max_episodes_per_file: int = 1000,
split_name: Optional[str] = 'train',
version: str = '0.0.1',
overwrite: bool = True):
"""Constructor.
Args:
data_directory: Directory to store the data
ds_config: Dataset Configuration.
max_episodes_per_file: Number of episodes to store per shard.
split_name: Name to be used by the split. If None, the name of the parent
directory will be used.
version: version (major.minor.patch) of the dataset.
overwrite: if False, and there is an existing dataset, it will append to
it.
"""
self._data_directory = data_directory
ds_identity = tfds.core.dataset_info.DatasetIdentity(
name=ds_config.name,
version=tfds.core.Version(version),
data_dir=data_directory,
module_name='')
self._ds_info = tfds.rlds.rlds_base.build_info(ds_config, ds_identity)
self._ds_info.set_file_format('tfrecord')
self._sequential_writer = tfds.core.SequentialWriter(
self._ds_info, max_episodes_per_file, overwrite=overwrite)
self._split_name = split_name
self._sequential_writer.initialize_splits([split_name],
fail_if_exists=overwrite)
logging.info('Creating dataset in: %r', self._data_directory)
def add_episode(self, episode: rlds_types.Episode) -> None:
"""Adds the episode to the dataset.
Args:
episode: episode to add to the dataset.
"""
self._sequential_writer.add_examples({self._split_name: [episode]})
def close(self) -> None:
self._sequential_writer.close_all() | /tfds/episode_writer.py | 0.874158 | 0.328314 | episode_writer.py | pypi |
# coding=utf-8
"""Library to generate a TFDS config."""
from typing import Any, Dict, List, Optional, Union
import numpy as np
from rlds import rlds_types
import tensorflow as tf
import tensorflow_datasets as tfds
_STEP_KEYS = [
rlds_types.OBSERVATION, rlds_types.ACTION, rlds_types.DISCOUNT,
rlds_types.REWARD, rlds_types.IS_LAST, rlds_types.IS_FIRST,
rlds_types.IS_TERMINAL
]
def _is_image(data: tf.Tensor, field_name: Optional[str],
image_encoding: str) -> bool:
"""Checks if data corresponds to an image."""
if not field_name or not('image' in field_name or 'Image' in field_name):
return False
try:
_ = tfds.features.image_feature.get_and_validate_encoding(image_encoding)
except ValueError:
return False
if data.shape == tf.TensorShape([]):
# Scalars are only considered images if they are encoded as strings.
if data.dtype is tf.string or data.dtype is str:
return True
return False
if len(data.shape) > 4:
# GIF images have 4 dims, the rest have 3.
return False
try:
_ = tfds.features.image_feature.get_and_validate_shape(
data.shape, image_encoding)
_ = tfds.features.image_feature.get_and_validate_dtype(
data.dtype, image_encoding)
except ValueError:
return False
# Extra check for float32 images
if data.shape[-1] != 1:
return False
return True
def _is_scalar(data: Union[tf.Tensor, np.ndarray],
squeeze: bool = True) -> bool:
"""Checks if the data is a scalar.
Args:
data: data to check.
squeeze: if True, considers shape (1,) as a scalar.
Returns:
True if data is a scalar.
"""
if not hasattr(data, 'shape'):
return True
# Some numpy arrays will still be treated as Tensors of shape=(). It's not
# very relevant because that's how TFDS treats all scalars internally anyway.
if squeeze:
return data.shape == (1,) or data.shape == tf.TensorShape(
[]) or not data.shape
else:
# Note that numpy arrays with one element have shape (1,),so they will
# be treated as Tensors of shape (1,)
return data.shape == tf.TensorShape([]) or not data.shape
def extract_feature_from_data(
data: Union[Dict[str, Any], List[Any], Union[tf.Tensor, np.ndarray, Any]],
use_images: bool,
image_encoding: Optional[str],
field_name: Optional[str] = None,
squeeze_scalars: bool = True
) -> Union[Dict[str, Any], tfds.features.FeatureConnector]:
"""Returns the data type of providing data.
Args:
data: supports data of the following types: nested dictionary/union/list of
tf.Tensor, np.Arrays, scalars or types that have 'shape' and 'dtype' args.
Lists have to contain uniform elements.
use_images: if True, encodes uint8 tensors and string scalars with a field
name that includes `image` or `Image` as images.
image_encoding: if `use_images`, uses this encoding for the detected images.
field_name: if present, is used to decide if data of tf.string type should
be encoded as an image.
squeeze_scalars: if True, it will treat arrays of shape (1,) as
`tfds.features.Scalar`.
Returns:
the same nested data structure with the data expressed as TFDS Features.
Raises:
ValueError for data that is not supported by TFDS.
"""
if isinstance(data, dict):
return tfds.features.FeaturesDict({
k: extract_feature_from_data(data[k], use_images, image_encoding, k,
squeeze_scalars) for k in data
})
elif isinstance(data, tuple):
raise ValueError('Tuples are not supported in TFDS. '
'Use dictionaries or lists instead.')
elif isinstance(data, list):
if not data:
raise ValueError('Trying to extract the type of an empty list.')
# Elements of a list are expected to have the same types. We don't check all
# the elements of the list the same way that we don't check all the steps in
# an episode.
feature = extract_feature_from_data(data[0], use_images, image_encoding,
field_name, squeeze_scalars)
return tfds.features.Sequence(feature=feature)
elif use_images and _is_image(data, field_name, image_encoding):
if not image_encoding:
raise ValueError('Image encoding is not defined.')
if _is_scalar(data, squeeze_scalars):
return tfds.features.Image(encoding_format=image_encoding)
else:
return tfds.features.Image(
shape=data.shape,
dtype=tf.as_dtype(data.dtype),
encoding_format=image_encoding)
return data.dtype
elif _is_scalar(data, squeeze_scalars):
return tf.as_dtype(data.dtype)
else:
return tfds.features.Tensor(
shape=data.shape,
dtype=tf.as_dtype(data.dtype),
encoding=tfds.features.Encoding.ZLIB)
def generate_config_from_spec(
episode_spec: tf.TensorSpec,
name: str = 'default_config',
use_images: bool = True,
image_encoding: str = 'png') -> tfds.rlds.rlds_base.DatasetConfig:
"""Generates a config for a dataset.
Args:
episode_spec: RLDS episode spec in terms of tensor specs. This can be an
environment spec or a tf.data.Dataset.element_spec.
name: name of the config to generate
use_images: if True (default), encodes uint8 tensors and string scalars with
a field name that includes `image` or `Image` as images.
image_encoding: if `use_images`, uses this encoding for the detected images.
Defaults to `png`. See `tfds.features.Image` for valid values for this
argument.
Returns:
a dictionary contains a config for the dataset.
"""
episode_metadata = {
k: extract_feature_from_data(episode_spec[k], use_images, image_encoding,
k)
for k in episode_spec
if k != rlds_types.STEPS
}
step_spec = episode_spec[rlds_types.STEPS].element_spec
step_metadata = {
k: extract_feature_from_data(step_spec[k], use_images, image_encoding, k)
for k in step_spec
if k not in _STEP_KEYS
}
# pytype: disable=wrong-keyword-args
return tfds.rlds.rlds_base.DatasetConfig(
name=name,
observation_info=extract_feature_from_data(
step_spec[rlds_types.OBSERVATION], use_images, image_encoding),
action_info=extract_feature_from_data(step_spec[rlds_types.ACTION],
use_images, image_encoding),
reward_info=extract_feature_from_data(step_spec[rlds_types.REWARD],
use_images, image_encoding),
discount_info=extract_feature_from_data(step_spec[rlds_types.DISCOUNT],
use_images, image_encoding),
episode_metadata_info=episode_metadata,
step_metadata_info=step_metadata,
)
# pytype: enable=wrong-keyword-args | /tfds/config_generator.py | 0.927223 | 0.495178 | config_generator.py | pypi |
import time
import scipy.optimize
import theano
from rllab.core import Serializable
from rllab.misc import compile_function
from rllab.misc import flatten_tensor_variables
from rllab.misc import lazydict
class LbfgsOptimizer(Serializable):
"""
Performs unconstrained optimization via L-BFGS.
"""
def __init__(self, max_opt_itr=20, callback=None):
Serializable.quick_init(self, locals())
self._max_opt_itr = max_opt_itr
self._opt_fun = None
self._target = None
self._callback = callback
def update_opt(self,
loss,
target,
inputs,
extra_inputs=None,
gradients=None,
*args,
**kwargs):
"""
:param loss: Symbolic expression for the loss function.
:param target: A parameterized object to optimize over. It should
implement methods of the :class:`rllab.core.paramerized.Parameterized`
class.
:param leq_constraint: A constraint provided as a tuple (f, epsilon), of
the form f(*inputs) <= epsilon.
:param inputs: A list of symbolic variables as inputs
:param gradients: symbolic expressions for the gradients of trainable
parameters of the target. By default this will be computed by calling
theano.grad
:return: No return value.
"""
self._target = target
def get_opt_output(gradients):
if gradients is None:
gradients = theano.grad(
loss, target.get_params(trainable=True))
flat_grad = flatten_tensor_variables(gradients)
return [loss.astype('float64'), flat_grad.astype('float64')]
if extra_inputs is None:
extra_inputs = list()
self._opt_fun = lazydict(
f_loss=lambda: compile_function(inputs + extra_inputs, loss),
f_opt=lambda: compile_function(
inputs=inputs + extra_inputs,
outputs=get_opt_output(gradients),
)
)
def loss(self, inputs, extra_inputs=None):
if extra_inputs is None:
extra_inputs = list()
return self._opt_fun["f_loss"](*(list(inputs) + list(extra_inputs)))
def optimize(self, inputs, extra_inputs=None):
f_opt = self._opt_fun["f_opt"]
if extra_inputs is None:
extra_inputs = list()
def f_opt_wrapper(flat_params):
self._target.set_param_values(flat_params, trainable=True)
return f_opt(*inputs)
itr = [0]
start_time = time.time()
if self._callback:
def opt_callback(params):
loss = self._opt_fun["f_loss"](*(inputs + extra_inputs))
elapsed = time.time() - start_time
self._callback(
dict(
loss=loss,
params=params,
itr=itr[0],
elapsed=elapsed,
))
itr[0] += 1
else:
opt_callback = None
scipy.optimize.fmin_l_bfgs_b(
func=f_opt_wrapper,
x0=self._target.get_param_values(trainable=True),
maxiter=self._max_opt_itr,
callback=opt_callback,
) | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/optimizers/lbfgs_optimizer.py | 0.847179 | 0.249802 | lbfgs_optimizer.py | pypi |
from _ast import Num
import itertools # noqa: I100,I201
import numpy as np
import theano
import theano.tensor as TT
from rllab.core import Serializable
from rllab.misc import ext
from rllab.misc import krylov
from rllab.misc import logger
from rllab.misc import sliced_fun
class PerlmutterHvp(Serializable):
def __init__(self, num_slices=1):
Serializable.quick_init(self, locals())
self.target = None
self.reg_coeff = None
self.opt_fun = None
self._num_slices = num_slices
def update_opt(self, f, target, inputs, reg_coeff):
self.target = target
self.reg_coeff = reg_coeff
params = target.get_params(trainable=True)
constraint_grads = theano.grad(
f, wrt=params, disconnected_inputs='warn')
xs = tuple([ext.new_tensor_like("%s x" % p.name, p) for p in params])
def Hx_plain():
Hx_plain_splits = TT.grad(
TT.sum([TT.sum(g * x) for g, x in zip(constraint_grads, xs)]),
wrt=params,
disconnected_inputs='warn')
return TT.concatenate([TT.flatten(s) for s in Hx_plain_splits])
self.opt_fun = ext.lazydict(
f_Hx_plain=lambda: ext.compile_function(
inputs=inputs + xs,
outputs=Hx_plain(),
log_name="f_Hx_plain",
),
)
def build_eval(self, inputs):
def eval(x):
xs = tuple(self.target.flat_to_params(x, trainable=True))
ret = sliced_fun(self.opt_fun["f_Hx_plain"], self._num_slices)(
inputs, xs) + self.reg_coeff * x
return ret
return eval
class FiniteDifferenceHvp(Serializable):
def __init__(self,
base_eps=1e-8,
symmetric=True,
grad_clip=None,
num_slices=1):
Serializable.quick_init(self, locals())
self.base_eps = base_eps
self.symmetric = symmetric
self.grad_clip = grad_clip
self._num_slices = num_slices
def update_opt(self, f, target, inputs, reg_coeff):
self.target = target
self.reg_coeff = reg_coeff
params = target.get_params(trainable=True)
constraint_grads = theano.grad(
f, wrt=params, disconnected_inputs='warn')
flat_grad = ext.flatten_tensor_variables(constraint_grads)
def f_Hx_plain(*args):
inputs_ = args[:len(inputs)]
xs = args[len(inputs):]
flat_xs = np.concatenate([np.reshape(x, (-1, )) for x in xs])
param_val = self.target.get_param_values(trainable=True)
eps = np.cast['float32'](
self.base_eps / (np.linalg.norm(param_val) + 1e-8))
self.target.set_param_values(
param_val + eps * flat_xs, trainable=True)
flat_grad_dvplus = self.opt_fun["f_grad"](*inputs_)
if self.symmetric:
self.target.set_param_values(
param_val - eps * flat_xs, trainable=True)
flat_grad_dvminus = self.opt_fun["f_grad"](*inputs_)
hx = (flat_grad_dvplus - flat_grad_dvminus) / (2 * eps)
self.target.set_param_values(param_val, trainable=True)
else:
self.target.set_param_values(param_val, trainable=True)
flat_grad = self.opt_fun["f_grad"](*inputs_)
hx = (flat_grad_dvplus - flat_grad) / eps
return hx
self.opt_fun = ext.lazydict(
f_grad=lambda: ext.compile_function(
inputs=inputs,
outputs=flat_grad,
log_name="f_grad",
),
f_Hx_plain=lambda: f_Hx_plain,
)
def build_eval(self, inputs):
def eval(x):
xs = tuple(self.target.flat_to_params(x, trainable=True))
ret = sliced_fun(self.opt_fun["f_Hx_plain"], self._num_slices)(
inputs, xs) + self.reg_coeff * x
return ret
return eval
class ConjugateGradientOptimizer(Serializable):
"""
Performs constrained optimization via line search. The search direction is
computed using a conjugate gradient algorithm, which gives x = A^{-1}g,
where A is a second order approximation of the constraint and g is the
gradient of the loss function.
"""
def __init__(self,
cg_iters=10,
reg_coeff=1e-5,
subsample_factor=1.,
backtrack_ratio=0.8,
max_backtracks=15,
accept_violation=False,
hvp_approach=None,
num_slices=1):
"""
:param cg_iters: The number of CG iterations used to calculate A^-1 g
:param reg_coeff: A small value so that A -> A + reg*I
:param subsample_factor: Subsampling factor to reduce samples when using
"conjugate gradient. Since the computation time for the descent
direction dominates, this can greatly reduce the overall computation
time.
:param accept_violation: whether to accept the descent step if it
violates the line search condition after exhausting all backtracking
budgets
:return:
"""
Serializable.quick_init(self, locals())
self._cg_iters = cg_iters
self._reg_coeff = reg_coeff
self._subsample_factor = subsample_factor
self._backtrack_ratio = backtrack_ratio
self._max_backtracks = max_backtracks
self._num_slices = num_slices
self._opt_fun = None
self._target = None
self._max_constraint_val = None
self._constraint_name = None
self._accept_violation = accept_violation
if hvp_approach is None:
hvp_approach = PerlmutterHvp(num_slices)
self._hvp_approach = hvp_approach
def update_opt(self,
loss,
target,
leq_constraint,
inputs,
extra_inputs=None,
constraint_name="constraint",
*args,
**kwargs):
"""
:param loss: Symbolic expression for the loss function.
:param target: A parameterized object to optimize over. It should
implement methods of the :class:`rllab.core.paramerized.Parameterized`
class.
:param leq_constraint: A constraint provided as a tuple (f, epsilon), of
the form f(*inputs) <= epsilon.
:param inputs: A list of symbolic variables as inputs, which could be
subsampled if needed. It is assumed that the first dimension of these
inputs should correspond to the number of data points
:param extra_inputs: A list of symbolic variables as extra inputs which
should not be subsampled
:return: No return value.
"""
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
else:
extra_inputs = tuple(extra_inputs)
constraint_term, constraint_value = leq_constraint
params = target.get_params(trainable=True)
grads = theano.grad(loss, wrt=params, disconnected_inputs='warn')
flat_grad = ext.flatten_tensor_variables(grads)
self._hvp_approach.update_opt(
f=constraint_term,
target=target,
inputs=inputs + extra_inputs,
reg_coeff=self._reg_coeff)
self._target = target
self._max_constraint_val = constraint_value
self._constraint_name = constraint_name
self._opt_fun = ext.lazydict(
f_loss=lambda: ext.compile_function(
inputs=inputs + extra_inputs,
outputs=loss,
log_name="f_loss",
),
f_grad=lambda: ext.compile_function(
inputs=inputs + extra_inputs,
outputs=flat_grad,
log_name="f_grad",
),
f_constraint=lambda: ext.compile_function(
inputs=inputs + extra_inputs,
outputs=constraint_term,
log_name="constraint",
),
f_loss_constraint=lambda: ext.compile_function(
inputs=inputs + extra_inputs,
outputs=[loss, constraint_term],
log_name="f_loss_constraint",
),
)
def loss(self, inputs, extra_inputs=None):
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
return sliced_fun(self._opt_fun["f_loss"],
self._num_slices)(inputs, extra_inputs)
def constraint_val(self, inputs, extra_inputs=None):
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
return sliced_fun(self._opt_fun["f_constraint"],
self._num_slices)(inputs, extra_inputs)
def optimize(self,
inputs,
extra_inputs=None,
subsample_grouped_inputs=None):
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
if self._subsample_factor < 1:
if subsample_grouped_inputs is None:
subsample_grouped_inputs = [inputs]
subsample_inputs = tuple()
for inputs_grouped in subsample_grouped_inputs:
n_samples = len(inputs_grouped[0])
inds = np.random.choice(
n_samples,
int(n_samples * self._subsample_factor),
replace=False)
subsample_inputs += tuple([x[inds] for x in inputs_grouped])
else:
subsample_inputs = inputs
logger.log("computing loss before")
loss_before = sliced_fun(self._opt_fun["f_loss"],
self._num_slices)(inputs, extra_inputs)
logger.log("performing update")
logger.log("computing descent direction")
flat_g = sliced_fun(self._opt_fun["f_grad"],
self._num_slices)(inputs, extra_inputs)
Hx = self._hvp_approach.build_eval(subsample_inputs + extra_inputs)
descent_direction = krylov.cg(Hx, flat_g, cg_iters=self._cg_iters)
initial_step_size = np.sqrt(
2.0 * self._max_constraint_val *
(1. / (descent_direction.dot(Hx(descent_direction)) + 1e-8)))
if np.isnan(initial_step_size):
initial_step_size = 1.
flat_descent_step = initial_step_size * descent_direction
logger.log("descent direction computed")
prev_param = np.copy(self._target.get_param_values(trainable=True))
n_iter = 0
for n_iter, ratio in enumerate(self._backtrack_ratio
**np.arange(self._max_backtracks)):
cur_step = ratio * flat_descent_step
cur_param = prev_param - cur_step
self._target.set_param_values(cur_param, trainable=True)
loss, constraint_val = sliced_fun(
self._opt_fun["f_loss_constraint"],
self._num_slices)(inputs, extra_inputs)
if loss < loss_before \
and constraint_val <= self._max_constraint_val:
break
if (np.isnan(loss) or np.isnan(constraint_val) or loss >= loss_before
or constraint_val >= self._max_constraint_val
) and not self._accept_violation:
logger.log("Line search condition violated. Rejecting the step!")
if np.isnan(loss):
logger.log("Violated because loss is NaN")
if np.isnan(constraint_val):
logger.log("Violated because constraint %s is NaN" %
self._constraint_name)
if loss >= loss_before:
logger.log("Violated because loss not improving")
if constraint_val >= self._max_constraint_val:
logger.log("Violated because constraint %s is violated" %
self._constraint_name)
self._target.set_param_values(prev_param, trainable=True)
logger.log("backtrack iters: %d" % n_iter)
logger.log("computing loss after")
logger.log("optimization finished") | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/optimizers/conjugate_gradient_optimizer.py | 0.765155 | 0.23557 | conjugate_gradient_optimizer.py | pypi |
import time
from rllab.core import Serializable
from rllab.misc import compile_function,
from rllab.misc import lazydict
from rllab.optimizers import BatchDataset
from rllab.optimizers import hf_optimizer
class HessianFreeOptimizer(Serializable):
"""
Performs unconstrained optimization via Hessian-Free Optimization
"""
def __init__(self,
max_opt_itr=20,
batch_size=32,
cg_batch_size=100,
callback=None):
Serializable.quick_init(self, locals())
self._max_opt_itr = max_opt_itr
self._opt_fun = None
self._target = None
self._batch_size = batch_size
self._cg_batch_size = cg_batch_size
self._hf_optimizer = None
self._callback = callback
def update_opt(self,
loss,
target,
inputs,
network_outputs,
extra_inputs=None):
"""
:param loss: Symbolic expression for the loss function.
:param target: A parameterized object to optimize over. It should
implement methods of the
:class:`rllab.core.paramerized.Parameterized` class.
:param inputs: A list of symbolic variables as inputs
:return: No return value.
"""
self._target = target
if extra_inputs is None:
extra_inputs = list()
self._hf_optimizer = hf_optimizer(
_p=target.get_params(trainable=True),
inputs=(inputs + extra_inputs),
s=network_outputs,
costs=[loss],
)
self._opt_fun = lazydict(
f_loss=lambda: compile_function(inputs + extra_inputs, loss), )
def loss(self, inputs, extra_inputs=None):
if extra_inputs is None:
extra_inputs = list()
return self._opt_fun["f_loss"](*(inputs + extra_inputs))
def optimize(self, inputs, extra_inputs=None):
if extra_inputs is None:
extra_inputs = list()
# import ipdb; ipdb.set_trace()
dataset = BatchDataset(
inputs=inputs,
batch_size=self._batch_size,
extra_inputs=extra_inputs)
cg_dataset = BatchDataset(
inputs=inputs,
batch_size=self._cg_batch_size,
extra_inputs=extra_inputs)
itr = [0]
start_time = time.time()
if self._callback:
def opt_callback():
loss = self._opt_fun["f_loss"](*(inputs + extra_inputs))
elapsed = time.time() - start_time
self._callback(
dict(
loss=loss,
params=self._target.get_param_values(trainable=True),
itr=itr[0],
elapsed=elapsed,
))
itr[0] += 1
else:
opt_callback = None
self._hf_optimizer.train(
gradient_dataset=dataset,
cg_dataset=cg_dataset,
itr_callback=opt_callback,
num_updates=self._max_opt_itr,
preconditioner=True,
verbose=True) | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/optimizers/hessian_free_optimizer.py | 0.858006 | 0.205535 | hessian_free_optimizer.py | pypi |
from collections import OrderedDict
from functools import partial
import time
import lasagne.updates
import pyprind
import theano
from rllab.core import Serializable
from rllab.misc import ext
from rllab.misc import logger
from rllab.optimizers import BatchDataset
class FirstOrderOptimizer(Serializable):
"""
Performs (stochastic) gradient descent, possibly using fancier methods like
adam etc.
"""
def __init__(self,
update_method=lasagne.updates.adam,
learning_rate=1e-3,
max_epochs=1000,
tolerance=1e-6,
batch_size=32,
callback=None,
verbose=False,
**kwargs):
"""
:param max_epochs:
:param tolerance:
:param update_method:
:param batch_size: None or an integer. If None the whole dataset will be
used.
:param callback:
:param kwargs:
:return:
"""
Serializable.quick_init(self, locals())
self._opt_fun = None
self._target = None
self._callback = callback
update_method = partial(update_method, learning_rate=learning_rate)
self._update_method = update_method
self._max_epochs = max_epochs
self._tolerance = tolerance
self._batch_size = batch_size
self._verbose = verbose
def update_opt(self,
loss,
target,
inputs,
extra_inputs=None,
gradients=None,
**kwargs):
"""
:param loss: Symbolic expression for the loss function.
:param target: A parameterized object to optimize over. It should
implement methods of the :class:`rllab.core.paramerized.Parameterized`
class.
:param leq_constraint: A constraint provided as a tuple (f, epsilon),
of the form f(*inputs) <= epsilon.
:param inputs: A list of symbolic variables as inputs
:return: No return value.
"""
self._target = target
if gradients is None:
gradients = theano.grad(
loss,
target.get_params(trainable=True),
disconnected_inputs='ignore')
updates = self._update_method(
gradients, target.get_params(trainable=True))
updates = OrderedDict(
[(k, v.astype(k.dtype)) for k, v in updates.items()])
if extra_inputs is None:
extra_inputs = list()
self._opt_fun = ext.lazydict(
f_loss=lambda: ext.compile_function(inputs + extra_inputs, loss),
f_opt=lambda: ext.compile_function(
inputs=inputs + extra_inputs,
outputs=loss,
updates=updates,
)
)
def loss(self, inputs, extra_inputs=None):
if extra_inputs is None:
extra_inputs = tuple()
return self._opt_fun["f_loss"](*(tuple(inputs) + extra_inputs))
def optimize_gen(self,
inputs,
extra_inputs=None,
callback=None,
yield_itr=None):
if len(inputs) == 0:
# Assumes that we should always sample mini-batches
raise NotImplementedError
f_opt = self._opt_fun["f_opt"]
f_loss = self._opt_fun["f_loss"]
if extra_inputs is None:
extra_inputs = tuple()
last_loss = f_loss(*(tuple(inputs) + extra_inputs))
start_time = time.time()
dataset = BatchDataset(
inputs,
self._batch_size,
extra_inputs=extra_inputs
#, randomized=self._randomized
)
itr = 0
for epoch in pyprind.prog_bar(list(range(self._max_epochs))):
for batch in dataset.iterate(update=True):
f_opt(*batch)
if yield_itr is not None and (itr % (yield_itr + 1)) == 0:
yield
itr += 1
new_loss = f_loss(*(tuple(inputs) + extra_inputs))
if self._verbose:
logger.log("Epoch %d, loss %s" % (epoch, new_loss))
if self._callback or callback:
elapsed = time.time() - start_time
callback_args = dict(
loss=new_loss,
params=self._target.get_param_values(trainable=True)
if self._target else None,
itr=epoch,
elapsed=elapsed,
)
if self._callback:
self._callback(callback_args)
if callback:
callback(**callback_args)
if abs(last_loss - new_loss) < self._tolerance:
break
last_loss = new_loss
def optimize(self, inputs, **kwargs):
for _ in self.optimize_gen(inputs, **kwargs):
pass | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/optimizers/first_order_optimizer.py | 0.902014 | 0.22718 | first_order_optimizer.py | pypi |
import lasagne
import lasagne.layers as L
import lasagne.nonlinearities as NL
import numpy as np
import theano
import theano.tensor as TT
from rllab.core import ConvNetwork
from rllab.core import LasagnePowered
from rllab.core import ParamLayer
from rllab.core import Serializable
from rllab.distributions import DiagonalGaussian
from rllab.misc import compile_function
from rllab.misc import iterate_minibatches_generic
from rllab.misc import logger
from rllab.misc import tensor_utils
from rllab.optimizers import LbfgsOptimizer
from rllab.optimizers import PenaltyLbfgsOptimizer
class GaussianConvRegressor(LasagnePowered):
"""
A class for performing regression by fitting a Gaussian distribution to the
outputs.
"""
def __init__(
self,
name,
input_shape,
output_dim,
hidden_sizes,
conv_filters,
conv_filter_sizes,
conv_strides,
conv_pads,
hidden_nonlinearity=NL.rectify,
mean_network=None,
optimizer=None,
use_trust_region=True,
step_size=0.01,
subsample_factor=1.0,
batchsize=None,
learn_std=True,
init_std=1.0,
adaptive_std=False,
std_share_network=False,
std_conv_filters=[],
std_conv_filters_sizes=[],
std_conv_strides=[],
std_conv_pads=[],
std_hidden_sizes=(32, 32),
std_nonlinearity=None,
normalize_inputs=True,
normalize_outputs=True,
):
"""
:param input_shape: usually for images of the form
(width,height,channel)
:param output_dim: Dimension of output.
:param hidden_sizes: Number of hidden units of each layer of the mean
network.
:param hidden_nonlinearity: Non-linearity used for each layer of the
mean network.
:param optimizer: Optimizer for minimizing the negative log-likelihood.
:param use_trust_region: Whether to use trust region constraint.
:param step_size: KL divergence constraint for each iteration
:param learn_std: Whether to learn the standard deviations. Only
effective if adaptive_std is False. If adaptive_std is True, this
parameter is ignored, and the weights for the std network are always
learned.
:param adaptive_std: Whether to make the std a function of the states.
:param std_share_network: Whether to use the same network as the mean.
:param std_hidden_sizes: Number of hidden units of each layer of the std
network. Only used if `std_share_network` is False. It defaults to the
same architecture as the mean.
:param std_nonlinearity: Non-linearity used for each layer of the std
network. Only used if `std_share_network` is False. It defaults to the
same non-linearity as the mean.
"""
Serializable.quick_init(self, locals())
if optimizer is None:
if use_trust_region:
optimizer = PenaltyLbfgsOptimizer("optimizer")
else:
optimizer = LbfgsOptimizer("optimizer")
self._optimizer = optimizer
self.input_shape = input_shape
if mean_network is None:
mean_network = ConvNetwork(
name="mean_network",
input_shape=input_shape,
output_dim=output_dim,
conv_filters=conv_filters,
conv_filter_sizes=conv_filter_sizes,
conv_strides=conv_strides,
conv_pads=conv_pads,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=None,
)
l_mean = mean_network.output_layer
if adaptive_std:
l_log_std = ConvNetwork(
name="log_std_network",
input_shape=input_shape,
input_var=mean_network.input_layer.input_var,
output_dim=output_dim,
conv_filters=std_conv_filters,
conv_filter_sizes=std_conv_filter_sizes,
conv_strides=std_conv_strides,
conv_pads=std_conv_pads,
hidden_sizes=std_hidden_sizes,
hidden_nonlinearity=std_nonlinearity,
output_nonlinearity=None,
).output_layer
else:
l_log_std = ParamLayer(
mean_network.input_layer,
num_units=output_dim,
param=lasagne.init.Constant(np.log(init_std)),
name="output_log_std",
trainable=learn_std,
)
LasagnePowered.__init__(self, [l_mean, l_log_std])
xs_var = mean_network.input_layer.input_var
ys_var = TT.matrix("ys")
old_means_var = TT.matrix("old_means")
old_log_stds_var = TT.matrix("old_log_stds")
x_mean_var = theano.shared(
np.zeros((1, np.prod(input_shape)), dtype=theano.config.floatX),
name="x_mean",
broadcastable=(True, False),
)
x_std_var = theano.shared(
np.ones((1, np.prod(input_shape)), dtype=theano.config.floatX),
name="x_std",
broadcastable=(True, False),
)
y_mean_var = theano.shared(
np.zeros((1, output_dim), dtype=theano.config.floatX),
name="y_mean",
broadcastable=(True, False))
y_std_var = theano.shared(
np.ones((1, output_dim), dtype=theano.config.floatX),
name="y_std",
broadcastable=(True, False))
normalized_xs_var = (xs_var - x_mean_var) / x_std_var
normalized_ys_var = (ys_var - y_mean_var) / y_std_var
normalized_means_var = L.get_output(
l_mean, {mean_network.input_layer: normalized_xs_var})
normalized_log_stds_var = L.get_output(
l_log_std, {mean_network.input_layer: normalized_xs_var})
means_var = normalized_means_var * y_std_var + y_mean_var
log_stds_var = normalized_log_stds_var + TT.log(y_std_var)
normalized_old_means_var = (old_means_var - y_mean_var) / y_std_var
normalized_old_log_stds_var = old_log_stds_var - TT.log(y_std_var)
dist = self._dist = DiagonalGaussian(output_dim)
normalized_dist_info_vars = dict(
mean=normalized_means_var, log_std=normalized_log_stds_var)
mean_kl = TT.mean(
dist.kl_sym(
dict(
mean=normalized_old_means_var,
log_std=normalized_old_log_stds_var),
normalized_dist_info_vars,
))
loss = - \
TT.mean(dist.log_likelihood_sym(
normalized_ys_var, normalized_dist_info_vars))
self._f_predict = compile_function([xs_var], means_var)
self._f_pdists = compile_function([xs_var], [means_var, log_stds_var])
self._l_mean = l_mean
self._l_log_std = l_log_std
optimizer_args = dict(
loss=loss,
target=self,
network_outputs=[normalized_means_var, normalized_log_stds_var],
)
if use_trust_region:
optimizer_args["leq_constraint"] = (mean_kl, step_size)
optimizer_args["inputs"] = [
xs_var, ys_var, old_means_var, old_log_stds_var
]
else:
optimizer_args["inputs"] = [xs_var, ys_var]
self._optimizer.update_opt(**optimizer_args)
self._use_trust_region = use_trust_region
self._name = name
self._normalize_inputs = normalize_inputs
self._normalize_outputs = normalize_outputs
self._mean_network = mean_network
self._x_mean_var = x_mean_var
self._x_std_var = x_std_var
self._y_mean_var = y_mean_var
self._y_std_var = y_std_var
self._subsample_factor = subsample_factor
self._batchsize = batchsize
def fit(self, xs, ys):
if self._subsample_factor < 1:
num_samples_tot = xs.shape[0]
idx = np.random.randint(
0, num_samples_tot,
int(num_samples_tot * self._subsample_factor))
xs, ys = xs[idx], ys[idx]
if self._normalize_inputs:
# recompute normalizing constants for inputs
self._x_mean_var.set_value(
np.mean(xs, axis=0,
keepdims=True).astype(theano.config.floatX))
self._x_std_var.set_value(
(np.std(xs, axis=0, keepdims=True) + 1e-8).astype(
theano.config.floatX))
if self._normalize_outputs:
# recompute normalizing constants for outputs
self._y_mean_var.set_value(
np.mean(ys, axis=0,
keepdims=True).astype(theano.config.floatX))
self._y_std_var.set_value(
(np.std(ys, axis=0, keepdims=True) + 1e-8).astype(
theano.config.floatX))
if self._name:
prefix = self._name + "_"
else:
prefix = ""
# FIXME: needs batch computation to avoid OOM.
loss_before, loss_after, mean_kl, batch_count = 0., 0., 0., 0
for batch in iterate_minibatches_generic(
input_lst=[xs, ys], batchsize=self._batchsize, shuffle=True):
batch_count += 1
xs, ys = batch
if self._use_trust_region:
old_means, old_log_stds = self._f_pdists(xs)
inputs = [xs, ys, old_means, old_log_stds]
else:
inputs = [xs, ys]
loss_before += self._optimizer.loss(inputs)
self._optimizer.optimize(inputs)
loss_after += self._optimizer.loss(inputs)
if self._use_trust_region:
mean_kl += self._optimizer.constraint_val(inputs)
logger.record_tabular(prefix + 'LossBefore', loss_before / batch_count)
logger.record_tabular(prefix + 'LossAfter', loss_after / batch_count)
logger.record_tabular(prefix + 'dLoss',
loss_before - loss_after / batch_count)
if self._use_trust_region:
logger.record_tabular(prefix + 'MeanKL', mean_kl / batch_count)
def predict(self, xs):
"""
Return the maximum likelihood estimate of the predicted y.
:param xs:
:return:
"""
return self._f_predict(xs)
def sample_predict(self, xs):
"""
Sample one possible output from the prediction distribution.
:param xs:
:return:
"""
means, log_stds = self._f_pdists(xs)
return self._dist.sample(dict(mean=means, log_std=log_stds))
def predict_log_likelihood(self, xs, ys):
means, log_stds = self._f_pdists(xs)
return self._dist.log_likelihood(ys, dict(
mean=means, log_std=log_stds))
def log_likelihood_sym(self, x_var, y_var):
normalized_xs_var = (x_var - self._x_mean_var) / self._x_std_var
normalized_means_var, normalized_log_stds_var = \
L.get_output([self._l_mean, self._l_log_std], {
self._mean_network.input_layer: normalized_xs_var})
means_var = normalized_means_var * self._y_std_var + self._y_mean_var
log_stds_var = normalized_log_stds_var + TT.log(self._y_std_var)
return self._dist.log_likelihood_sym(
y_var, dict(mean=means_var, log_std=log_stds_var))
def get_param_values(self, **tags):
return LasagnePowered.get_param_values(self, **tags)
def set_param_values(self, flattened_params, **tags):
return LasagnePowered.set_param_values(self, flattened_params, **tags) | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/regressors/gaussian_conv_regressor.py | 0.841468 | 0.325601 | gaussian_conv_regressor.py | pypi |
import numpy as np
from rllab.core import Serializable
class ProductRegressor(Serializable):
"""
A class for performing MLE regression by fitting a product distribution to
the outputs. A separate regressor will be trained for each individual input
distribution.
"""
def __init__(self, regressors):
"""
:param regressors: List of individual regressors
"""
Serializable.quick_init(self, locals())
self.regressors = regressors
self.output_dims = [x.output_dim for x in regressors]
def _split_ys(self, ys):
ys = np.asarray(ys)
split_ids = np.cumsum(self.output_dims)[:-1]
return np.split(ys, split_ids, axis=1)
def fit(self, xs, ys):
for regressor, split_ys in zip(self.regressors, self._split_ys(ys)):
regressor.fit(xs, split_ys)
def predict(self, xs):
return np.concatenate(
[regressor.predict(xs) for regressor in self.regressors], axis=1)
def sample_predict(self, xs):
return np.concatenate(
[regressor.sample_predict(xs) for regressor in self.regressors],
axis=1)
def predict_log_likelihood(self, xs, ys):
return np.sum(
[
regressor.predict_log_likelihood(xs, split_ys)
for regressor, split_ys in zip(self.regressors,
self._split_ys(ys))
],
axis=0)
def get_param_values(self, **tags):
return np.concatenate([
regressor.get_param_values(**tags) for regressor in self.regressors
])
def set_param_values(self, flattened_params, **tags):
param_dims = [
np.prod(regressor.get_param_shapes(**tags))
for regressor in self.regressors
]
split_ids = np.cumsum(param_dims)[:-1]
for regressor, split_param_values in zip(
self.regressors, np.split(flattened_params, split_ids)):
regressor.set_param_values(split_param_values) | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/regressors/product_regressor.py | 0.805403 | 0.524699 | product_regressor.py | pypi |
import lasagne.layers as L
import lasagne.nonlinearities as NL
import numpy as np
import theano
import theano.tensor as TT
from rllab.core import LasagnePowered
from rllab.core import MLP
from rllab.core import Serializable
from rllab.distributions import Categorical
from rllab.misc import ext
from rllab.misc import logger
from rllab.misc import special
from rllab.optimizers import LbfgsOptimizer
from rllab.optimizers import PenaltyLbfgsOptimizer
NONE = list()
class CategoricalMLPRegressor(LasagnePowered):
"""
A class for performing regression (or classification, really) by fitting a
categorical distribution to the outputs. Assumes that the outputs will be
always a one hot vector.
"""
def __init__(
self,
input_shape,
output_dim,
prob_network=None,
hidden_sizes=(32, 32),
hidden_nonlinearity=NL.rectify,
optimizer=None,
use_trust_region=True,
step_size=0.01,
normalize_inputs=True,
name=None,
):
"""
:param input_shape: Shape of the input data.
:param output_dim: Dimension of output.
:param hidden_sizes: Number of hidden units of each layer of the mean
network.
:param hidden_nonlinearity: Non-linearity used for each layer of the
mean network.
:param optimizer: Optimizer for minimizing the negative log-likelihood.
:param use_trust_region: Whether to use trust region constraint.
:param step_size: KL divergence constraint for each iteration
"""
Serializable.quick_init(self, locals())
if optimizer is None:
if use_trust_region:
optimizer = PenaltyLbfgsOptimizer()
else:
optimizer = LbfgsOptimizer()
self.output_dim = output_dim
self._optimizer = optimizer
if prob_network is None:
prob_network = MLP(
input_shape=input_shape,
output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=NL.softmax,
)
l_prob = prob_network.output_layer
LasagnePowered.__init__(self, [l_prob])
xs_var = prob_network.input_layer.input_var
ys_var = TT.imatrix("ys")
old_prob_var = TT.matrix("old_prob")
x_mean_var = theano.shared(
np.zeros((1, ) + input_shape),
name="x_mean",
broadcastable=(True, ) + (False, ) * len(input_shape))
x_std_var = theano.shared(
np.ones((1, ) + input_shape),
name="x_std",
broadcastable=(True, ) + (False, ) * len(input_shape))
normalized_xs_var = (xs_var - x_mean_var) / x_std_var
prob_var = L.get_output(l_prob,
{prob_network.input_layer: normalized_xs_var})
old_info_vars = dict(prob=old_prob_var)
info_vars = dict(prob=prob_var)
dist = self._dist = Categorical(output_dim)
mean_kl = TT.mean(dist.kl_sym(old_info_vars, info_vars))
loss = -TT.mean(dist.log_likelihood_sym(ys_var, info_vars))
predicted = special.to_onehot_sym(
TT.argmax(prob_var, axis=1), output_dim)
self._f_predict = ext.compile_function([xs_var], predicted)
self._f_prob = ext.compile_function([xs_var], prob_var)
self._prob_network = prob_network
self._l_prob = l_prob
optimizer_args = dict(
loss=loss,
target=self,
network_outputs=[prob_var],
)
if use_trust_region:
optimizer_args["leq_constraint"] = (mean_kl, step_size)
optimizer_args["inputs"] = [xs_var, ys_var, old_prob_var]
else:
optimizer_args["inputs"] = [xs_var, ys_var]
self._optimizer.update_opt(**optimizer_args)
self._use_trust_region = use_trust_region
self._name = name
self._normalize_inputs = normalize_inputs
self._x_mean_var = x_mean_var
self._x_std_var = x_std_var
def fit(self, xs, ys):
if self._normalize_inputs:
# recompute normalizing constants for inputs
self._x_mean_var.set_value(np.mean(xs, axis=0, keepdims=True))
self._x_std_var.set_value(np.std(xs, axis=0, keepdims=True) + 1e-8)
if self._use_trust_region:
old_prob = self._f_prob(xs)
inputs = [xs, ys, old_prob]
else:
inputs = [xs, ys]
loss_before = self._optimizer.loss(inputs)
if self._name:
prefix = self._name + "_"
else:
prefix = ""
logger.record_tabular(prefix + 'LossBefore', loss_before)
self._optimizer.optimize(inputs)
loss_after = self._optimizer.loss(inputs)
logger.record_tabular(prefix + 'LossAfter', loss_after)
logger.record_tabular(prefix + 'dLoss', loss_before - loss_after)
def predict(self, xs):
return self._f_predict(np.asarray(xs))
def predict_log_likelihood(self, xs, ys):
prob = self._f_prob(np.asarray(xs))
return self._dist.log_likelihood(np.asarray(ys), dict(prob=prob))
def log_likelihood_sym(self, x_var, y_var):
normalized_xs_var = (x_var - self._x_mean_var) / self._x_std_var
prob = L.get_output(
self._l_prob, {self._prob_network.input_layer: normalized_xs_var})
return self._dist.log_likelihood_sym(
TT.cast(y_var, 'int32'), dict(prob=prob))
def get_param_values(self, **tags):
return LasagnePowered.get_param_values(self, **tags)
def set_param_values(self, flattened_params, **tags):
return LasagnePowered.set_param_values(self, flattened_params, **tags) | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/regressors/categorical_mlp_regressor.py | 0.825941 | 0.357988 | categorical_mlp_regressor.py | pypi |
import lasagne
import lasagne.layers as L
import lasagne.nonlinearities as NL
import numpy as np
import theano
import theano.tensor as TT
from rllab.core import LasagnePowered
from rllab.core import MLP
from rllab.core import ParamLayer
from rllab.core import Serializable
from rllab.distributions import DiagonalGaussian
from rllab.misc import compile_function
from rllab.misc import iterate_minibatches_generic
from rllab.misc import logger
from rllab.optimizers import LbfgsOptimizer
from rllab.optimizers import PenaltyLbfgsOptimizer
class GaussianMLPRegressor(LasagnePowered):
"""
A class for performing regression by fitting a Gaussian distribution to the
outputs.
"""
def __init__(
self,
input_shape,
output_dim,
mean_network=None,
hidden_sizes=(32, 32),
hidden_nonlinearity=NL.rectify,
optimizer=None,
use_trust_region=True,
step_size=0.01,
learn_std=True,
init_std=1.0,
adaptive_std=False,
std_share_network=False,
std_hidden_sizes=(32, 32),
std_nonlinearity=None,
normalize_inputs=True,
normalize_outputs=True,
name=None,
batchsize=None,
subsample_factor=1.,
):
"""
:param input_shape: Shape of the input data.
:param output_dim: Dimension of output.
:param hidden_sizes: Number of hidden units of each layer of the mean
network.
:param hidden_nonlinearity: Non-linearity used for each layer of the
mean network.
:param optimizer: Optimizer for minimizing the negative log-likelihood.
:param use_trust_region: Whether to use trust region constraint.
:param step_size: KL divergence constraint for each iteration
:param learn_std: Whether to learn the standard deviations. Only
effective if adaptive_std is False. If adaptive_std is True, this
parameter is ignored, and the weights for the std network are always
learned.
:param adaptive_std: Whether to make the std a function of the states.
:param std_share_network: Whether to use the same network as the mean.
:param std_hidden_sizes: Number of hidden units of each layer of the
std network. Only used if `std_share_network` is False. It defaults to
the same architecture as the mean.
:param std_nonlinearity: Non-linearity used for each layer of the std
network. Only used if `std_share_network`
is False. It defaults to the same non-linearity as the mean.
"""
Serializable.quick_init(self, locals())
self._batchsize = batchsize
self._subsample_factor = subsample_factor
if optimizer is None:
if use_trust_region:
optimizer = PenaltyLbfgsOptimizer()
else:
optimizer = LbfgsOptimizer()
self._optimizer = optimizer
if mean_network is None:
mean_network = MLP(
input_shape=input_shape,
output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=None,
)
l_mean = mean_network.output_layer
if adaptive_std:
l_log_std = MLP(
input_shape=input_shape,
input_var=mean_network.input_layer.input_var,
output_dim=output_dim,
hidden_sizes=std_hidden_sizes,
hidden_nonlinearity=std_nonlinearity,
output_nonlinearity=None,
).output_layer
else:
l_log_std = ParamLayer(
mean_network.input_layer,
num_units=output_dim,
param=lasagne.init.Constant(np.log(init_std)),
name="output_log_std",
trainable=learn_std,
)
LasagnePowered.__init__(self, [l_mean, l_log_std])
xs_var = mean_network.input_layer.input_var
ys_var = TT.matrix("ys")
old_means_var = TT.matrix("old_means")
old_log_stds_var = TT.matrix("old_log_stds")
x_mean_var = theano.shared(
np.zeros((1, ) + input_shape, dtype=theano.config.floatX),
name="x_mean",
broadcastable=(True, ) + (False, ) * len(input_shape))
x_std_var = theano.shared(
np.ones((1, ) + input_shape, dtype=theano.config.floatX),
name="x_std",
broadcastable=(True, ) + (False, ) * len(input_shape))
y_mean_var = theano.shared(
np.zeros((1, output_dim), dtype=theano.config.floatX),
name="y_mean",
broadcastable=(True, False))
y_std_var = theano.shared(
np.ones((1, output_dim), dtype=theano.config.floatX),
name="y_std",
broadcastable=(True, False))
normalized_xs_var = (xs_var - x_mean_var) / x_std_var
normalized_ys_var = (ys_var - y_mean_var) / y_std_var
normalized_means_var = L.get_output(
l_mean, {mean_network.input_layer: normalized_xs_var})
normalized_log_stds_var = L.get_output(
l_log_std, {mean_network.input_layer: normalized_xs_var})
means_var = normalized_means_var * y_std_var + y_mean_var
log_stds_var = normalized_log_stds_var + TT.log(y_std_var)
normalized_old_means_var = (old_means_var - y_mean_var) / y_std_var
normalized_old_log_stds_var = old_log_stds_var - TT.log(y_std_var)
dist = self._dist = DiagonalGaussian(output_dim)
normalized_dist_info_vars = dict(
mean=normalized_means_var, log_std=normalized_log_stds_var)
mean_kl = TT.mean(
dist.kl_sym(
dict(
mean=normalized_old_means_var,
log_std=normalized_old_log_stds_var),
normalized_dist_info_vars,
))
loss = - \
TT.mean(dist.log_likelihood_sym(
normalized_ys_var, normalized_dist_info_vars))
self._f_predict = compile_function([xs_var], means_var)
self._f_pdists = compile_function([xs_var], [means_var, log_stds_var])
self._l_mean = l_mean
self._l_log_std = l_log_std
optimizer_args = dict(
loss=loss,
target=self,
network_outputs=[normalized_means_var, normalized_log_stds_var],
)
if use_trust_region:
optimizer_args["leq_constraint"] = (mean_kl, step_size)
optimizer_args["inputs"] = [
xs_var, ys_var, old_means_var, old_log_stds_var
]
else:
optimizer_args["inputs"] = [xs_var, ys_var]
self._optimizer.update_opt(**optimizer_args)
self._use_trust_region = use_trust_region
self._name = name
self._normalize_inputs = normalize_inputs
self._normalize_outputs = normalize_outputs
self._mean_network = mean_network
self._x_mean_var = x_mean_var
self._x_std_var = x_std_var
self._y_mean_var = y_mean_var
self._y_std_var = y_std_var
def fit(self, xs, ys):
if self._subsample_factor < 1:
num_samples_tot = xs.shape[0]
idx = np.random.randint(
0, num_samples_tot,
int(num_samples_tot * self._subsample_factor))
xs, ys = xs[idx], ys[idx]
if self._normalize_inputs:
# recompute normalizing constants for inputs
self._x_mean_var.set_value(
np.mean(xs, axis=0,
keepdims=True).astype(theano.config.floatX))
self._x_std_var.set_value(
(np.std(xs, axis=0, keepdims=True) + 1e-8).astype(
theano.config.floatX))
if self._normalize_outputs:
# recompute normalizing constants for outputs
self._y_mean_var.set_value(
np.mean(ys, axis=0,
keepdims=True).astype(theano.config.floatX))
self._y_std_var.set_value(
(np.std(ys, axis=0, keepdims=True) + 1e-8).astype(
theano.config.floatX))
if self._name:
prefix = self._name + "_"
else:
prefix = ""
# FIXME: needs batch computation to avoid OOM.
loss_before, loss_after, mean_kl, batch_count = 0., 0., 0., 0
for batch in iterate_minibatches_generic(
input_lst=[xs, ys], batchsize=self._batchsize, shuffle=True):
batch_count += 1
xs, ys = batch
if self._use_trust_region:
old_means, old_log_stds = self._f_pdists(xs)
inputs = [xs, ys, old_means, old_log_stds]
else:
inputs = [xs, ys]
loss_before += self._optimizer.loss(inputs)
self._optimizer.optimize(inputs)
loss_after += self._optimizer.loss(inputs)
if self._use_trust_region:
mean_kl += self._optimizer.constraint_val(inputs)
logger.record_tabular(prefix + 'LossBefore', loss_before / batch_count)
logger.record_tabular(prefix + 'LossAfter', loss_after / batch_count)
logger.record_tabular(prefix + 'dLoss',
loss_before - loss_after / batch_count)
if self._use_trust_region:
logger.record_tabular(prefix + 'MeanKL', mean_kl / batch_count)
def predict(self, xs):
"""
Return the maximum likelihood estimate of the predicted y.
:param xs:
:return:
"""
return self._f_predict(xs)
def sample_predict(self, xs):
"""
Sample one possible output from the prediction distribution.
:param xs:
:return:
"""
means, log_stds = self._f_pdists(xs)
return self._dist.sample(dict(mean=means, log_std=log_stds))
def predict_log_likelihood(self, xs, ys):
means, log_stds = self._f_pdists(xs)
return self._dist.log_likelihood(ys, dict(
mean=means, log_std=log_stds))
def log_likelihood_sym(self, x_var, y_var):
normalized_xs_var = (x_var - self._x_mean_var) / self._x_std_var
normalized_means_var, normalized_log_stds_var = \
L.get_output([self._l_mean, self._l_log_std], {
self._mean_network.input_layer: normalized_xs_var})
means_var = normalized_means_var * self._y_std_var + self._y_mean_var
log_stds_var = normalized_log_stds_var + TT.log(self._y_std_var)
return self._dist.log_likelihood_sym(
y_var, dict(mean=means_var, log_std=log_stds_var))
def get_param_values(self, **tags):
return LasagnePowered.get_param_values(self, **tags)
def set_param_values(self, flattened_params, **tags):
return LasagnePowered.set_param_values(self, flattened_params, **tags) | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/regressors/gaussian_mlp_regressor.py | 0.788217 | 0.394376 | gaussian_mlp_regressor.py | pypi |
import lasagne.layers as L
import lasagne.nonlinearities as NL
import numpy as np
from rllab.core import ConvNetwork
from rllab.core import LasagnePowered
from rllab.core import Serializable
from rllab.distributions import Categorical
from rllab.misc import ext
from rllab.misc import logger
from rllab.misc import tensor_utils
from rllab.misc.overrides import overrides
from rllab.policies import StochasticPolicy
from rllab.spaces import Discrete
class CategoricalConvPolicy(StochasticPolicy, LasagnePowered):
def __init__(
self,
name,
env_spec,
conv_filters,
conv_filter_sizes,
conv_strides,
conv_pads,
hidden_sizes=[],
hidden_nonlinearity=NL.rectify,
output_nonlinearity=NL.softmax,
prob_network=None,
):
"""
:param env_spec: A spec for the mdp.
:param hidden_sizes: list of sizes for the fully connected hidden layers
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:param prob_network: manually specified network for this policy, other
network params are ignored
:return:
"""
Serializable.quick_init(self, locals())
assert isinstance(env_spec.action_space, Discrete)
self._env_spec = env_spec
if prob_network is None:
prob_network = ConvNetwork(
input_shape=env_spec.observation_space.shape,
output_dim=env_spec.action_space.n,
conv_filters=conv_filters,
conv_filter_sizes=conv_filter_sizes,
conv_strides=conv_strides,
conv_pads=conv_pads,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=NL.softmax,
name="prob_network",
)
self._l_prob = prob_network.output_layer
self._l_obs = prob_network.input_layer
self._f_prob = ext.compile_function(
[prob_network.input_layer.input_var],
L.get_output(prob_network.output_layer))
self._dist = Categorical(env_spec.action_space.n)
super(CategoricalConvPolicy, self).__init__(env_spec)
LasagnePowered.__init__(self, [prob_network.output_layer])
@property
def vectorized(self):
return True
@overrides
def dist_info_sym(self, obs_var, state_info_vars=None):
return dict(prob=L.get_output(self._l_prob, {self._l_obs: obs_var}))
@overrides
def dist_info(self, obs, state_infos=None):
return dict(prob=self._f_prob(obs))
# The return value is a pair. The first item is a matrix (N, A), where each
# entry corresponds to the action value taken. The second item is a vector
# of length N, where each entry is the density value for that action, under
# the current policy
@overrides
def get_action(self, observation):
flat_obs = self.observation_space.flatten(observation)
prob = self._f_prob([flat_obs])[0]
action = self.action_space.weighted_sample(prob)
return action, dict(prob=prob)
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
probs = self._f_prob(flat_obs)
actions = list(map(self.action_space.weighted_sample, probs))
return actions, dict(prob=probs)
@property
def distribution(self):
return self._dist | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/policies/categorical_conv_policy.py | 0.920057 | 0.437463 | categorical_conv_policy.py | pypi |
import lasagne.init
import lasagne.layers as L
import lasagne.nonlinearities as NL
import numpy as np
import theano.tensor as TT
from rllab.core import GRUNetwork
from rllab.core import LasagnePowered
from rllab.core import ParamLayer
from rllab.core import Serializable
from rllab.distributions import RecurrentDiagonalGaussian
from rllab.misc import ext
from rllab.misc.overrides import overrides
from rllab.policies import StochasticPolicy
class GaussianGRUPolicy(StochasticPolicy, LasagnePowered):
def __init__(
self,
env_spec,
hidden_sizes=(32, ),
state_include_action=True,
hidden_nonlinearity=NL.tanh,
learn_std=True,
init_std=1.0,
output_nonlinearity=None,
):
"""
:param env_spec: A spec for the env.
:param hidden_sizes: list of sizes for the fully connected hidden layers
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:return:
"""
Serializable.quick_init(self, locals())
super(GaussianGRUPolicy, self).__init__(env_spec)
assert len(hidden_sizes) == 1
if state_include_action:
obs_dim = env_spec.observation_space.flat_dim + \
env_spec.action_space.flat_dim
else:
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
mean_network = GRUNetwork(
input_shape=(obs_dim, ),
output_dim=action_dim,
hidden_dim=hidden_sizes[0],
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
)
l_mean = mean_network.output_layer
obs_var = mean_network.input_var
l_log_std = ParamLayer(
mean_network.input_layer,
num_units=action_dim,
param=lasagne.init.Constant(np.log(init_std)),
name="output_log_std",
trainable=learn_std,
)
l_step_log_std = ParamLayer(
mean_network.step_input_layer,
num_units=action_dim,
param=l_log_std.param,
name="step_output_log_std",
trainable=learn_std,
)
self._mean_network = mean_network
self._l_log_std = l_log_std
self._state_include_action = state_include_action
self._f_step_mean_std = ext.compile_function(
[
mean_network.step_input_layer.input_var,
mean_network.step_prev_hidden_layer.input_var
],
L.get_output([
mean_network.step_output_layer, l_step_log_std,
mean_network.step_hidden_layer
]))
self._prev_action = None
self._prev_hidden = None
self._hidden_sizes = hidden_sizes
self._dist = RecurrentDiagonalGaussian(action_dim)
self.reset()
LasagnePowered.__init__(self, [mean_network.output_layer, l_log_std])
@overrides
def dist_info_sym(self, obs_var, state_info_vars):
n_batches, n_steps = obs_var.shape[:2]
obs_var = obs_var.reshape((n_batches, n_steps, -1))
if self._state_include_action:
prev_action_var = state_info_vars["prev_action"]
all_input_var = TT.concatenate([obs_var, prev_action_var], axis=2)
else:
all_input_var = obs_var
means, log_stds = L.get_output(
[self._mean_network.output_layer, self._l_log_std], all_input_var)
return dict(mean=means, log_std=log_stds)
def reset(self):
self._prev_action = None
self._prev_hidden = self._mean_network.hid_init_param.get_value()
# The return value is a pair. The first item is a matrix (N, A), where each
# entry corresponds to the action value taken. The second item is a vector
# of length N, where each entry is the density value for that action, under
# the current policy
@overrides
def get_action(self, observation):
if self._state_include_action:
if self._prev_action is None:
prev_action = np.zeros((self.action_space.flat_dim, ))
else:
prev_action = self.action_space.flatten(self._prev_action)
all_input = np.concatenate(
[self.observation_space.flatten(observation), prev_action])
else:
all_input = self.observation_space.flatten(observation)
# should not be used
prev_action = np.nan
mean, log_std, hidden_vec = [
x[0]
for x in self._f_step_mean_std([all_input], [self._prev_hidden])
]
rnd = np.random.normal(size=mean.shape)
action = rnd * np.exp(log_std) + mean
self._prev_action = action
self._prev_hidden = hidden_vec
agent_info = dict(mean=mean, log_std=log_std)
if self._state_include_action:
agent_info["prev_action"] = prev_action
return action, agent_info
@property
@overrides
def recurrent(self):
return True
@property
def distribution(self):
return self._dist
@property
def state_info_keys(self):
if self._state_include_action:
return ["prev_action"]
else:
return [] | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/policies/gaussian_gru_policy.py | 0.790247 | 0.404802 | gaussian_gru_policy.py | pypi |
from rllab.core import Parameterized
class Policy(Parameterized):
def __init__(self, env_spec):
Parameterized.__init__(self)
self._env_spec = env_spec
# Should be implemented by all policies
def get_action(self, observation):
raise NotImplementedError
def reset(self):
pass
@property
def observation_space(self):
return self._env_spec.observation_space
@property
def action_space(self):
return self._env_spec.action_space
@property
def recurrent(self):
"""
Indicates whether the policy is recurrent.
:return:
"""
return False
def log_diagnostics(self, paths):
"""
Log extra information per iteration based on the collected paths
"""
pass
@property
def state_info_keys(self):
"""
Return keys for the information related to the policy's state when
taking an action.
:return:
"""
return list()
def terminate(self):
"""
Clean up operation
"""
pass
class StochasticPolicy(Policy):
@property
def distribution(self):
"""
:rtype Distribution
"""
raise NotImplementedError
def dist_info_sym(self, obs_var, state_info_vars):
"""
Return the symbolic distribution information about the actions.
:param obs_var: symbolic variable for observations
:param state_info_vars: a dictionary whose values should contain
information about the state of the policy at the time it received the
observation
:return:
"""
raise NotImplementedError
def dist_info(self, obs, state_infos):
"""
Return the distribution information about the actions.
:param obs_var: observation values
:param state_info_vars: a dictionary whose values should contain
information about the state of the policy at the time it received the
observation
:return:
"""
raise NotImplementedError | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/policies/base.py | 0.889018 | 0.398055 | base.py | pypi |
import lasagne
import lasagne.layers as L
import lasagne.nonlinearities as NL
import numpy as np
import theano.tensor as TT
from rllab.core import LasagnePowered
from rllab.core import MLP
from rllab.core import ParamLayer
from rllab.core import Serializable
from rllab.distributions import DiagonalGaussian
from rllab.misc import ext
from rllab.misc import logger
from rllab.misc.overrides import overrides
from rllab.policies import StochasticPolicy
from rllab.spaces import Box
class GaussianMLPPolicy(StochasticPolicy, LasagnePowered):
def __init__(
self,
env_spec,
hidden_sizes=(32, 32),
learn_std=True,
init_std=1.0,
adaptive_std=False,
std_share_network=False,
std_hidden_sizes=(32, 32),
min_std=1e-6,
std_hidden_nonlinearity=NL.tanh,
hidden_nonlinearity=NL.tanh,
output_nonlinearity=None,
mean_network=None,
std_network=None,
dist_cls=DiagonalGaussian,
):
"""
:param env_spec:
:param hidden_sizes: list of sizes for the fully-connected hidden layers
:param learn_std: Is std trainable
:param init_std: Initial std
:param adaptive_std:
:param std_share_network:
:param std_hidden_sizes: list of sizes for the fully-connected layers
for std
:param min_std: whether to make sure that the std is at least some
threshold value, to avoid numerical issues
:param std_hidden_nonlinearity:
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:param output_nonlinearity: nonlinearity for the output layer
:param mean_network: custom network for the output mean
:param std_network: custom network for the output log std
:return:
"""
Serializable.quick_init(self, locals())
assert isinstance(env_spec.action_space, Box)
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
# create network
if mean_network is None:
mean_network = MLP(
input_shape=(obs_dim, ),
output_dim=action_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
)
self._mean_network = mean_network
l_mean = mean_network.output_layer
obs_var = mean_network.input_layer.input_var
if std_network is not None:
l_log_std = std_network.output_layer
else:
if adaptive_std:
std_network = MLP(
input_shape=(obs_dim, ),
input_layer=mean_network.input_layer,
output_dim=action_dim,
hidden_sizes=std_hidden_sizes,
hidden_nonlinearity=std_hidden_nonlinearity,
output_nonlinearity=None,
)
l_log_std = std_network.output_layer
else:
l_log_std = ParamLayer(
mean_network.input_layer,
num_units=action_dim,
param=lasagne.init.Constant(np.log(init_std)),
name="output_log_std",
trainable=learn_std,
)
self.min_std = min_std
mean_var, log_std_var = L.get_output([l_mean, l_log_std])
if self.min_std is not None:
log_std_var = TT.maximum(log_std_var, np.log(min_std))
self._mean_var, self._log_std_var = mean_var, log_std_var
self._l_mean = l_mean
self._l_log_std = l_log_std
self._dist = dist_cls(action_dim)
LasagnePowered.__init__(self, [l_mean, l_log_std])
super(GaussianMLPPolicy, self).__init__(env_spec)
self._f_dist = ext.compile_function(
inputs=[obs_var],
outputs=[mean_var, log_std_var],
)
def dist_info_sym(self, obs_var, state_info_vars=None):
mean_var, log_std_var = L.get_output([self._l_mean, self._l_log_std],
obs_var)
if self.min_std is not None:
log_std_var = TT.maximum(log_std_var, np.log(self.min_std))
return dict(mean=mean_var, log_std=log_std_var)
@overrides
def get_action(self, observation):
flat_obs = self.observation_space.flatten(observation)
mean, log_std = [x[0] for x in self._f_dist([flat_obs])]
rnd = np.random.normal(size=mean.shape)
action = rnd * np.exp(log_std) + mean
return action, dict(mean=mean, log_std=log_std)
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
means, log_stds = self._f_dist(flat_obs)
rnd = np.random.normal(size=means.shape)
actions = rnd * np.exp(log_stds) + means
return actions, dict(mean=means, log_std=log_stds)
def get_reparam_action_sym(self, obs_var, action_var, old_dist_info_vars):
"""
Given observations, old actions, and distribution of old actions, return
a symbolically reparameterized representation of the actions in terms of
the policy parameters
:param obs_var:
:param action_var:
:param old_dist_info_vars:
:return:
"""
new_dist_info_vars = self.dist_info_sym(obs_var, action_var)
new_mean_var, new_log_std_var = new_dist_info_vars[
"mean"], new_dist_info_vars["log_std"]
old_mean_var, old_log_std_var = old_dist_info_vars[
"mean"], old_dist_info_vars["log_std"]
epsilon_var = (action_var - old_mean_var) / (
TT.exp(old_log_std_var) + 1e-8)
new_action_var = new_mean_var + epsilon_var * TT.exp(new_log_std_var)
return new_action_var
def log_diagnostics(self, paths):
log_stds = np.vstack(
[path["agent_infos"]["log_std"] for path in paths])
logger.record_tabular('AveragePolicyStd', np.mean(np.exp(log_stds)))
@property
def distribution(self):
return self._dist | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/policies/gaussian_mlp_policy.py | 0.85984 | 0.363336 | gaussian_mlp_policy.py | pypi |
import lasagne.layers as L
import lasagne.nonlinearities as NL
import numpy as np
from rllab.core import LasagnePowered
from rllab.core import MLP
from rllab.core import Serializable
from rllab.distributions import Categorical
from rllab.misc import ext
from rllab.misc.overrides import overrides
from rllab.policies import StochasticPolicy
from rllab.spaces import Discrete
class CategoricalMLPPolicy(StochasticPolicy, LasagnePowered):
def __init__(
self,
env_spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=NL.tanh,
num_seq_inputs=1,
prob_network=None,
):
"""
:param env_spec: A spec for the mdp.
:param hidden_sizes: list of sizes for the fully connected hidden layers
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:param prob_network: manually specified network for this policy, other
network params
are ignored
:return:
"""
Serializable.quick_init(self, locals())
assert isinstance(env_spec.action_space, Discrete)
if prob_network is None:
prob_network = MLP(
input_shape=(
env_spec.observation_space.flat_dim * num_seq_inputs, ),
output_dim=env_spec.action_space.n,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=NL.softmax,
)
self._l_prob = prob_network.output_layer
self._l_obs = prob_network.input_layer
self._f_prob = ext.compile_function(
[prob_network.input_layer.input_var],
L.get_output(prob_network.output_layer))
self._dist = Categorical(env_spec.action_space.n)
super(CategoricalMLPPolicy, self).__init__(env_spec)
LasagnePowered.__init__(self, [prob_network.output_layer])
@overrides
def dist_info_sym(self, obs_var, state_info_vars=None):
return dict(prob=L.get_output(self._l_prob, {self._l_obs: obs_var}))
@overrides
def dist_info(self, obs, state_infos=None):
return dict(prob=self._f_prob(obs))
# The return value is a pair. The first item is a matrix (N, A), where each
# entry corresponds to the action value taken. The second item is a vector
# of length N, where each entry is the density value for that action, under
# the current policy
@overrides
def get_action(self, observation, deterministic=False):
flat_obs = self.observation_space.flatten(observation)
prob = self._f_prob([flat_obs])[0]
if deterministic:
action = np.argmax(prob)
else:
action = self.action_space.weighted_sample(prob)
return action, dict(prob=prob)
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
probs = self._f_prob(flat_obs)
actions = list(map(self.action_space.weighted_sample, probs))
return actions, dict(prob=probs)
@property
def distribution(self):
return self._dist | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/policies/categorical_mlp_policy.py | 0.867457 | 0.433322 | categorical_mlp_policy.py | pypi |
import lasagne
import lasagne.init as LI
import lasagne.layers as L
import lasagne.nonlinearities as NL
from rllab.core import batch_norm
from rllab.core import LasagnePowered
from rllab.core import Serializable
from rllab.misc import ext
from rllab.policies import Policy
class DeterministicMLPPolicy(Policy, LasagnePowered):
def __init__(self,
env_spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=NL.rectify,
hidden_W_init=LI.HeUniform(),
hidden_b_init=LI.Constant(0.),
output_nonlinearity=NL.tanh,
output_W_init=LI.Uniform(-3e-3, 3e-3),
output_b_init=LI.Uniform(-3e-3, 3e-3),
bn=False):
Serializable.quick_init(self, locals())
l_obs = L.InputLayer(shape=(None, env_spec.observation_space.flat_dim))
l_hidden = l_obs
if bn:
l_hidden = batch_norm(l_hidden)
for idx, size in enumerate(hidden_sizes):
l_hidden = L.DenseLayer(
l_hidden,
num_units=size,
W=hidden_W_init,
b=hidden_b_init,
nonlinearity=hidden_nonlinearity,
name="h%d" % idx)
if bn:
l_hidden = batch_norm(l_hidden)
l_output = L.DenseLayer(
l_hidden,
num_units=env_spec.action_space.flat_dim,
W=output_W_init,
b=output_b_init,
nonlinearity=output_nonlinearity,
name="output")
# Note the deterministic=True argument. It makes sure that when getting
# actions from single observations, we do not update params in the
# batch normalization layers
action_var = L.get_output(l_output, deterministic=True)
self._output_layer = l_output
self._f_actions = ext.compile_function([l_obs.input_var], action_var)
super(DeterministicMLPPolicy, self).__init__(env_spec)
LasagnePowered.__init__(self, [l_output])
def get_action(self, observation):
action = self._f_actions([observation])[0]
return action, dict()
def get_actions(self, observations):
return self._f_actions(observations), dict()
def get_action_sym(self, obs_var):
return L.get_output(self._output_layer, obs_var) | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/policies/deterministic_mlp_policy.py | 0.768907 | 0.237642 | deterministic_mlp_policy.py | pypi |
import lasagne.layers as L
import lasagne.nonlinearities as NL
import numpy as np
import theano.tensor as TT
from rllab.core import GRUNetwork
from rllab.core import LasagnePowered
from rllab.core import OpLayer
from rllab.core import Serializable
from rllab.distributions import RecurrentCategorical
from rllab.misc import ext
from rllab.misc import special
from rllab.misc.overrides import overrides
from rllab.policies import StochasticPolicy
from rllab.spaces import Discrete
class CategoricalGRUPolicy(StochasticPolicy, LasagnePowered):
def __init__(self,
env_spec,
hidden_dim=32,
feature_network=None,
state_include_action=True,
hidden_nonlinearity=NL.tanh):
"""
:param env_spec: A spec for the env.
:param hidden_dim: dimension of hidden layer
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:return:
"""
assert isinstance(env_spec.action_space, Discrete)
Serializable.quick_init(self, locals())
super(CategoricalGRUPolicy, self).__init__(env_spec)
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
if state_include_action:
input_dim = obs_dim + action_dim
else:
input_dim = obs_dim
l_input = L.InputLayer(shape=(None, None, input_dim), name="input")
if feature_network is None:
feature_dim = input_dim
l_flat_feature = None
l_feature = l_input
else:
feature_dim = feature_network.output_layer.output_shape[-1]
l_flat_feature = feature_network.output_layer
l_feature = OpLayer(
l_flat_feature,
extras=[l_input],
name="reshape_feature",
op=lambda flat_feature, input: TT.reshape(
flat_feature,
[input.shape[0], input.shape[1], feature_dim]
),
shape_op=lambda _, input_shape: (
input_shape[0], input_shape[1], feature_dim)
)
prob_network = GRUNetwork(
input_shape=(feature_dim, ),
input_layer=l_feature,
output_dim=env_spec.action_space.n,
hidden_dim=hidden_dim,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=TT.nnet.softmax,
name="prob_network")
self.prob_network = prob_network
self.feature_network = feature_network
self.l_input = l_input
self.state_include_action = state_include_action
flat_input_var = TT.matrix("flat_input")
if feature_network is None:
feature_var = flat_input_var
else:
feature_var = L.get_output(
l_flat_feature, {feature_network.input_layer: flat_input_var})
self.f_step_prob = ext.compile_function(
[flat_input_var, prob_network.step_prev_hidden_layer.input_var],
L.get_output([
prob_network.step_output_layer, prob_network.step_hidden_layer
], {prob_network.step_input_layer: feature_var}))
self.input_dim = input_dim
self.action_dim = action_dim
self.hidden_dim = hidden_dim
self.prev_action = None
self.prev_hidden = None
self.dist = RecurrentCategorical(env_spec.action_space.n)
out_layers = [prob_network.output_layer]
if feature_network is not None:
out_layers.append(feature_network.output_layer)
LasagnePowered.__init__(self, out_layers)
@overrides
def dist_info_sym(self, obs_var, state_info_vars):
n_batches, n_steps = obs_var.shape[:2]
obs_var = obs_var.reshape((n_batches, n_steps, -1))
if self.state_include_action:
prev_action_var = state_info_vars["prev_action"]
all_input_var = TT.concatenate([obs_var, prev_action_var], axis=2)
else:
all_input_var = obs_var
if self.feature_network is None:
return dict(
prob=L.get_output(self.prob_network.output_layer,
{self.l_input: all_input_var}))
else:
flat_input_var = TT.reshape(all_input_var, (-1, self.input_dim))
return dict(
prob=L.get_output(
self.prob_network.output_layer, {
self.l_input: all_input_var,
self.feature_network.input_layer: flat_input_var
}))
def reset(self):
self.prev_action = None
self.prev_hidden = self.prob_network.hid_init_param.get_value()
# The return value is a pair. The first item is a matrix (N, A), where each
# entry corresponds to the action value taken. The second item is a vector
# of length N, where each entry is the density value for that action, under
# the current policy
@overrides
def get_action(self, observation):
if self.state_include_action:
if self.prev_action is None:
prev_action = np.zeros((self.action_space.flat_dim, ))
else:
prev_action = self.action_space.flatten(self.prev_action)
all_input = np.concatenate(
[self.observation_space.flatten(observation), prev_action])
else:
all_input = self.observation_space.flatten(observation)
# should not be used
prev_action = np.nan
probs, hidden_vec = [
x[0] for x in self.f_step_prob([all_input], [self.prev_hidden])
]
action = special.weighted_sample(probs, range(self.action_space.n))
self.prev_action = action
self.prev_hidden = hidden_vec
agent_info = dict(prob=probs)
if self.state_include_action:
agent_info["prev_action"] = prev_action
return action, agent_info
@property
@overrides
def recurrent(self):
return True
@property
def distribution(self):
return self.dist
@property
def state_info_keys(self):
if self.state_include_action:
return ["prev_action"]
else:
return [] | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/policies/categorical_gru_policy.py | 0.759939 | 0.347565 | categorical_gru_policy.py | pypi |
from rllab.algos import RLAlgorithm
import rllab.misc.logger as logger
from rllab.plotter import plotter
from rllab.policies import Policy
from rllab.sampler import parallel_sampler
from rllab.sampler.base import BaseSampler
from rllab.sampler.utils import rollout
class BatchSampler(BaseSampler):
def __init__(self, algo):
"""
:type algo: BatchPolopt
"""
self.algo = algo
def start_worker(self):
parallel_sampler.populate_task(
self.algo.env, self.algo.policy, scope=self.algo.scope)
def shutdown_worker(self):
parallel_sampler.terminate_task(scope=self.algo.scope)
def obtain_samples(self, itr):
cur_params = self.algo.policy.get_param_values()
paths = parallel_sampler.sample_paths(
policy_params=cur_params,
max_samples=self.algo.batch_size,
max_path_length=self.algo.max_path_length,
scope=self.algo.scope,
)
if self.algo.whole_paths:
return paths
else:
paths_truncated = parallel_sampler.truncate_paths(
paths, self.algo.batch_size)
return paths_truncated
class BatchPolopt(RLAlgorithm):
"""
Base class for batch sampling-based policy optimization methods.
This includes various policy gradient methods like vpg, npg, ppo, trpo, etc.
"""
def __init__(self,
env,
policy,
baseline,
scope=None,
n_itr=500,
start_itr=0,
batch_size=5000,
max_path_length=500,
discount=0.99,
gae_lambda=1,
plot=False,
pause_for_plot=False,
center_adv=True,
positive_adv=False,
store_paths=False,
whole_paths=True,
sampler_cls=None,
sampler_args=None,
**kwargs):
"""
:param env: Environment
:param policy: Policy
:type policy: Policy
:param baseline: Baseline
:param scope: Scope for identifying the algorithm. Must be specified if
running multiple algorithms
simultaneously, each using different environments and policies
:param n_itr: Number of iterations.
:param start_itr: Starting iteration.
:param batch_size: Number of samples per iteration.
:param max_path_length: Maximum length of a single rollout.
:param discount: Discount.
:param gae_lambda: Lambda used for generalized advantage estimation.
:param plot: Plot evaluation run after each iteration.
:param pause_for_plot: Whether to pause before contiuing when plotting.
:param center_adv: Whether to rescale the advantages so that they have
mean 0 and standard deviation 1.
:param positive_adv: Whether to shift the advantages so that they are
always positive. When used in
conjunction with center_adv the advantages will be standardized before
shifting.
:param store_paths: Whether to save all paths data to the snapshot.
"""
self.env = env
self.policy = policy
self.baseline = baseline
self.scope = scope
self.n_itr = n_itr
self.current_itr = start_itr
self.batch_size = batch_size
self.max_path_length = max_path_length
self.discount = discount
self.gae_lambda = gae_lambda
self.plot = plot
self.pause_for_plot = pause_for_plot
self.center_adv = center_adv
self.positive_adv = positive_adv
self.store_paths = store_paths
self.whole_paths = whole_paths
if sampler_cls is None:
sampler_cls = BatchSampler
if sampler_args is None:
sampler_args = dict()
self.sampler = sampler_cls(self, **sampler_args)
def start_worker(self):
self.sampler.start_worker()
if self.plot:
plotter.init_plot(self.env, self.policy)
def shutdown_worker(self):
self.sampler.shutdown_worker()
def train(self):
self.start_worker()
self.init_opt()
for itr in range(self.current_itr, self.n_itr):
with logger.prefix('itr #%d | ' % itr):
paths = self.sampler.obtain_samples(itr)
samples_data = self.sampler.process_samples(itr, paths)
self.log_diagnostics(paths)
self.optimize_policy(itr, samples_data)
logger.log("saving snapshot...")
params = self.get_itr_snapshot(itr, samples_data)
self.current_itr = itr + 1
params["algo"] = self
if self.store_paths:
params["paths"] = samples_data["paths"]
logger.save_itr_params(itr, params)
logger.log("saved")
logger.dump_tabular(with_prefix=False)
if self.plot:
self.update_plot()
if self.pause_for_plot:
input("Plotting evaluation run: Press Enter to "
"continue...")
self.shutdown_worker()
def log_diagnostics(self, paths):
self.env.log_diagnostics(paths)
self.policy.log_diagnostics(paths)
self.baseline.log_diagnostics(paths)
def init_opt(self):
"""
Initialize the optimization procedure. If using theano / cgt, this may
include declaring all the variables and compiling functions
"""
raise NotImplementedError
def get_itr_snapshot(self, itr, samples_data):
"""
Returns all the data that should be saved in the snapshot for this
iteration.
"""
raise NotImplementedError
def optimize_policy(self, itr, samples_data):
raise NotImplementedError
def update_plot(self):
if self.plot:
plotter.update_plot(self.policy, self.max_path_length) | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/algos/batch_polopt.py | 0.817064 | 0.309363 | batch_polopt.py | pypi |
import theano
import theano.tensor as TT
from rllab.algos import BatchPolopt
from rllab.core import Serializable
from rllab.misc import ext
from rllab.misc import logger
from rllab.misc.overrides import overrides
from rllab.optimizers import FirstOrderOptimizer
class VPG(BatchPolopt, Serializable):
"""
Vanilla Policy Gradient.
"""
def __init__(self,
env,
policy,
baseline,
optimizer=None,
optimizer_args=None,
**kwargs):
Serializable.quick_init(self, locals())
if optimizer is None:
default_args = dict(
batch_size=None,
max_epochs=1,
)
if optimizer_args is None:
optimizer_args = default_args
else:
optimizer_args = dict(default_args, **optimizer_args)
optimizer = FirstOrderOptimizer(**optimizer_args)
self.optimizer = optimizer
self.opt_info = None
super(VPG, self).__init__(
env=env, policy=policy, baseline=baseline, **kwargs)
@overrides
def init_opt(self):
is_recurrent = int(self.policy.recurrent)
obs_var = self.env.observation_space.new_tensor_variable(
'obs',
extra_dims=1 + is_recurrent,
)
action_var = self.env.action_space.new_tensor_variable(
'action',
extra_dims=1 + is_recurrent,
)
advantage_var = ext.new_tensor(
'advantage', ndim=1 + is_recurrent, dtype=theano.config.floatX)
dist = self.policy.distribution
old_dist_info_vars = {
k: ext.new_tensor(
'old_%s' % k,
ndim=2 + is_recurrent,
dtype=theano.config.floatX)
for k in dist.dist_info_keys
}
old_dist_info_vars_list = [
old_dist_info_vars[k] for k in dist.dist_info_keys
]
if is_recurrent:
valid_var = TT.matrix('valid')
else:
valid_var = None
state_info_vars = {
k: ext.new_tensor(
k, ndim=2 + is_recurrent, dtype=theano.config.floatX)
for k in self.policy.state_info_keys
}
state_info_vars_list = [
state_info_vars[k] for k in self.policy.state_info_keys
]
dist_info_vars = self.policy.dist_info_sym(obs_var, state_info_vars)
logli = dist.log_likelihood_sym(action_var, dist_info_vars)
kl = dist.kl_sym(old_dist_info_vars, dist_info_vars)
# formulate as a minimization problem
# The gradient of the surrogate objective is the policy gradient
if is_recurrent:
surr_obj = -TT.sum(
logli * advantage_var * valid_var) / TT.sum(valid_var)
mean_kl = TT.sum(kl * valid_var) / TT.sum(valid_var)
max_kl = TT.max(kl * valid_var)
else:
surr_obj = -TT.mean(logli * advantage_var)
mean_kl = TT.mean(kl)
max_kl = TT.max(kl)
input_list = [obs_var, action_var, advantage_var
] + state_info_vars_list
if is_recurrent:
input_list.append(valid_var)
self.optimizer.update_opt(
surr_obj, target=self.policy, inputs=input_list)
f_kl = ext.compile_function(
inputs=input_list + old_dist_info_vars_list,
outputs=[mean_kl, max_kl],
)
self.opt_info = dict(f_kl=f_kl, )
@overrides
def optimize_policy(self, itr, samples_data):
logger.log("optimizing policy")
inputs = ext.extract(samples_data, "observations", "actions",
"advantages")
agent_infos = samples_data["agent_infos"]
state_info_list = [agent_infos[k] for k in self.policy.state_info_keys]
inputs += tuple(state_info_list)
if self.policy.recurrent:
inputs += (samples_data["valids"], )
dist_info_list = [
agent_infos[k] for k in self.policy.distribution.dist_info_keys
]
loss_before = self.optimizer.loss(inputs)
self.optimizer.optimize(inputs)
loss_after = self.optimizer.loss(inputs)
logger.record_tabular("LossBefore", loss_before)
logger.record_tabular("LossAfter", loss_after)
mean_kl, max_kl = self.opt_info['f_kl'](
*(list(inputs) + dist_info_list))
logger.record_tabular('MeanKL', mean_kl)
logger.record_tabular('MaxKL', max_kl)
@overrides
def get_itr_snapshot(self, itr, samples_data):
return dict(
itr=itr,
policy=self.policy,
baseline=self.baseline,
env=self.env,
) | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/algos/vpg.py | 0.661267 | 0.151906 | vpg.py | pypi |
import numpy as np
import theano.tensor as TT
from rllab.algos import cma_es_lib
from rllab.algos import RLAlgorithm
from rllab.core import Serializable
from rllab.misc import ext
import rllab.misc.logger as logger
from rllab.misc.special import discount_cumsum
import rllab.plotter as plotter
from rllab.sampler import parallel_sampler
from rllab.sampler import stateful_pool
from rllab.sampler.utils import rollout
def sample_return(G, params, max_path_length, discount):
# env, policy, params, max_path_length, discount = args
# of course we make the strong assumption that there is no race condition
G.policy.set_param_values(params)
path = rollout(
G.env,
G.policy,
max_path_length,
)
path["returns"] = discount_cumsum(path["rewards"], discount)
path["undiscounted_return"] = sum(path["rewards"])
return path
class CMAES(RLAlgorithm, Serializable):
def __init__(self,
env,
policy,
n_itr=500,
max_path_length=500,
discount=0.99,
sigma0=1.,
batch_size=None,
plot=False,
**kwargs):
"""
:param n_itr: Number of iterations.
:param max_path_length: Maximum length of a single rollout.
:param batch_size: # of samples from trajs from param distribution, when
this is set, n_samples is ignored
:param discount: Discount.
:param plot: Plot evaluation run after each iteration.
:param sigma0: Initial std for param dist
:return:
"""
Serializable.quick_init(self, locals())
self.env = env
self.policy = policy
self.plot = plot
self.sigma0 = sigma0
self.discount = discount
self.max_path_length = max_path_length
self.n_itr = n_itr
self.batch_size = batch_size
def train(self):
cur_std = self.sigma0
cur_mean = self.policy.get_param_values()
es = cma_es_lib.CMAEvolutionStrategy(cur_mean, cur_std)
parallel_sampler.populate_task(self.env, self.policy)
if self.plot:
plotter.init_plot(self.env, self.policy)
cur_std = self.sigma0
cur_mean = self.policy.get_param_values()
itr = 0
while itr < self.n_itr and not es.stop():
if self.batch_size is None:
# Sample from multivariate normal distribution.
xs = es.ask()
xs = np.asarray(xs)
# For each sample, do a rollout.
infos = (stateful_pool.singleton_pool.run_map(
sample_return,
[(x, self.max_path_length, self.discount) for x in xs]))
else:
cum_len = 0
infos = []
xss = []
done = False
while not done:
sbs = stateful_pool.singleton_pool.n_parallel * 2
# Sample from multivariate normal distribution.
# You want to ask for sbs samples here.
xs = es.ask(sbs)
xs = np.asarray(xs)
xss.append(xs)
sinfos = stateful_pool.singleton_pool.run_map(
sample_return,
[(x, self.max_path_length, self.discount) for x in xs])
for info in sinfos:
infos.append(info)
cum_len += len(info['returns'])
if cum_len >= self.batch_size:
xs = np.concatenate(xss)
done = True
break
# Evaluate fitness of samples (negative as it is minimization
# problem).
fs = -np.array([info['returns'][0] for info in infos])
# When batching, you could have generated too many samples compared
# to the actual evaluations. So we cut it off in this case.
xs = xs[:len(fs)]
# Update CMA-ES params based on sample fitness.
es.tell(xs, fs)
logger.push_prefix('itr #%d | ' % itr)
logger.record_tabular('Iteration', itr)
logger.record_tabular('CurStdMean', np.mean(cur_std))
undiscounted_returns = np.array(
[info['undiscounted_return'] for info in infos])
logger.record_tabular('AverageReturn',
np.mean(undiscounted_returns))
logger.record_tabular('StdReturn', np.mean(undiscounted_returns))
logger.record_tabular('MaxReturn', np.max(undiscounted_returns))
logger.record_tabular('MinReturn', np.min(undiscounted_returns))
logger.record_tabular('AverageDiscountedReturn', np.mean(fs))
logger.record_tabular(
'AvgTrajLen',
np.mean([len(info['returns']) for info in infos]))
self.env.log_diagnostics(infos)
self.policy.log_diagnostics(infos)
logger.save_itr_params(
itr, dict(
itr=itr,
policy=self.policy,
env=self.env,
))
logger.dump_tabular(with_prefix=False)
if self.plot:
plotter.update_plot(self.policy, self.max_path_length)
logger.pop_prefix()
# Update iteration.
itr += 1
# Set final params.
self.policy.set_param_values(es.result()[0])
parallel_sampler.terminate_task() | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/algos/cma_es.py | 0.727395 | 0.401629 | cma_es.py | pypi |
import theano
import theano.tensor as TT
from rllab.algos import BatchPolopt
from rllab.misc import ext
import rllab.misc.logger as logger
from rllab.misc.overrides import overrides
from rllab.optimizers import PenaltyLbfgsOptimizer
class NPO(BatchPolopt):
"""
Natural Policy Optimization.
"""
def __init__(self,
optimizer=None,
optimizer_args=None,
step_size=0.01,
truncate_local_is_ratio=None,
**kwargs):
if optimizer is None:
if optimizer_args is None:
optimizer_args = dict()
optimizer = PenaltyLbfgsOptimizer(**optimizer_args)
self.optimizer = optimizer
self.step_size = step_size
self.truncate_local_is_ratio = truncate_local_is_ratio
super(NPO, self).__init__(**kwargs)
@overrides
def init_opt(self):
is_recurrent = int(self.policy.recurrent)
obs_var = self.env.observation_space.new_tensor_variable(
'obs',
extra_dims=1 + is_recurrent,
)
action_var = self.env.action_space.new_tensor_variable(
'action',
extra_dims=1 + is_recurrent,
)
advantage_var = ext.new_tensor(
'advantage', ndim=1 + is_recurrent, dtype=theano.config.floatX)
dist = self.policy.distribution
old_dist_info_vars = {
k: ext.new_tensor(
'old_%s' % k,
ndim=2 + is_recurrent,
dtype=theano.config.floatX)
for k in dist.dist_info_keys
}
old_dist_info_vars_list = [
old_dist_info_vars[k] for k in dist.dist_info_keys
]
state_info_vars = {
k: ext.new_tensor(
k, ndim=2 + is_recurrent, dtype=theano.config.floatX)
for k in self.policy.state_info_keys
}
state_info_vars_list = [
state_info_vars[k] for k in self.policy.state_info_keys
]
if is_recurrent:
valid_var = TT.matrix('valid')
else:
valid_var = None
dist_info_vars = self.policy.dist_info_sym(obs_var, state_info_vars)
kl = dist.kl_sym(old_dist_info_vars, dist_info_vars)
lr = dist.likelihood_ratio_sym(action_var, old_dist_info_vars,
dist_info_vars)
if self.truncate_local_is_ratio is not None:
lr = TT.minimum(self.truncate_local_is_ratio, lr)
if is_recurrent:
mean_kl = TT.sum(kl * valid_var) / TT.sum(valid_var)
surr_loss = -TT.sum(
lr * advantage_var * valid_var) / TT.sum(valid_var)
else:
mean_kl = TT.mean(kl)
surr_loss = -TT.mean(lr * advantage_var)
input_list = [
obs_var,
action_var,
advantage_var,
] + state_info_vars_list + old_dist_info_vars_list
if is_recurrent:
input_list.append(valid_var)
self.optimizer.update_opt(
loss=surr_loss,
target=self.policy,
leq_constraint=(mean_kl, self.step_size),
inputs=input_list,
constraint_name="mean_kl")
return dict()
@overrides
def optimize_policy(self, itr, samples_data):
all_input_values = tuple(
ext.extract(samples_data, "observations", "actions", "advantages"))
agent_infos = samples_data["agent_infos"]
state_info_list = [agent_infos[k] for k in self.policy.state_info_keys]
dist_info_list = [
agent_infos[k] for k in self.policy.distribution.dist_info_keys
]
all_input_values += tuple(state_info_list) + tuple(dist_info_list)
if self.policy.recurrent:
all_input_values += (samples_data["valids"], )
loss_before = self.optimizer.loss(all_input_values)
mean_kl_before = self.optimizer.constraint_val(all_input_values)
self.optimizer.optimize(all_input_values)
mean_kl = self.optimizer.constraint_val(all_input_values)
loss_after = self.optimizer.loss(all_input_values)
logger.record_tabular('LossBefore', loss_before)
logger.record_tabular('LossAfter', loss_after)
logger.record_tabular('MeanKLBefore', mean_kl_before)
logger.record_tabular('MeanKL', mean_kl)
logger.record_tabular('dLoss', loss_before - loss_after)
return dict()
@overrides
def get_itr_snapshot(self, itr, samples_data):
return dict(
itr=itr,
policy=self.policy,
baseline=self.baseline,
env=self.env,
) | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/algos/npo.py | 0.649579 | 0.185412 | npo.py | pypi |
import numpy as np
import theano.tensor as TT
from rllab.distributions import Distribution
TINY = 1e-8
class Bernoulli(Distribution):
def __init__(self, dim):
self._dim = dim
@property
def dim(self):
return self._dim
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
old_p = old_dist_info_vars["p"]
new_p = new_dist_info_vars["p"]
kl = old_p * (TT.log(old_p + TINY) - TT.log(new_p + TINY)) + \
(1 - old_p) * (TT.log(1 - old_p + TINY) - TT.log(1 - new_p + TINY))
return TT.sum(kl, axis=-1)
def kl(self, old_dist_info, new_dist_info):
old_p = old_dist_info["p"]
new_p = new_dist_info["p"]
kl = old_p * (np.log(old_p + TINY) - np.log(new_p + TINY)) + \
(1 - old_p) * (np.log(1 - old_p + TINY) - np.log(1 - new_p + TINY))
return np.sum(kl, axis=-1)
def sample(self, dist_info):
p = np.asarray(dist_info["p"])
return np.cast['int'](
np.random.uniform(low=0., high=1., size=p.shape) < p)
def likelihood_ratio_sym(self, x_var, old_dist_info_vars,
new_dist_info_vars):
old_p = old_dist_info_vars["p"]
new_p = new_dist_info_vars["p"]
return TT.prod(
x_var * new_p / (old_p + TINY) +
(1 - x_var) * (1 - new_p) / (1 - old_p + TINY),
axis=-1)
def log_likelihood_sym(self, x_var, dist_info_vars):
p = dist_info_vars["p"]
return TT.sum(
x_var * TT.log(p + TINY) + (1 - x_var) * TT.log(1 - p + TINY),
axis=-1)
def log_likelihood(self, xs, dist_info):
p = dist_info["p"]
return np.sum(
xs * np.log(p + TINY) + (1 - xs) * np.log(1 - p + TINY), axis=-1)
def entropy(self, dist_info):
p = dist_info["p"]
return np.sum(
-p * np.log(p + TINY) - (1 - p) * np.log(1 - p + TINY), axis=-1)
@property
def dist_info_keys(self):
return ["p"] | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/distributions/bernoulli.py | 0.729038 | 0.161849 | bernoulli.py | pypi |
import numpy as np
import theano
import theano.tensor as TT
from rllab.distributions import Categorical
from rllab.distributions import Distribution
TINY = 1e-8
class RecurrentCategorical(Distribution):
def __init__(self, dim):
self._cat = Categorical(dim)
self._dim = dim
@property
def dim(self):
return self._dim
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
"""
Compute the symbolic KL divergence of two categorical distributions
"""
old_prob_var = old_dist_info_vars["prob"]
new_prob_var = new_dist_info_vars["prob"]
# Assume layout is N * T * A
return TT.sum(
old_prob_var *
(TT.log(old_prob_var + TINY) - TT.log(new_prob_var + TINY)),
axis=2)
def kl(self, old_dist_info, new_dist_info):
"""
Compute the KL divergence of two categorical distributions
"""
old_prob = old_dist_info["prob"]
new_prob = new_dist_info["prob"]
return np.sum(
old_prob * (np.log(old_prob + TINY) - np.log(new_prob + TINY)),
axis=2)
def likelihood_ratio_sym(self, x_var, old_dist_info_vars,
new_dist_info_vars):
old_prob_var = old_dist_info_vars["prob"]
new_prob_var = new_dist_info_vars["prob"]
# Assume layout is N * T * A
a_dim = x_var.shape[-1]
flat_ratios = self._cat.likelihood_ratio_sym(
x_var.reshape((-1, a_dim)),
dict(prob=old_prob_var.reshape((-1, a_dim))),
dict(prob=new_prob_var.reshape((-1, a_dim))))
return flat_ratios.reshape(old_prob_var.shape[:2])
def entropy(self, dist_info):
probs = dist_info["prob"]
return -np.sum(probs * np.log(probs + TINY), axis=2)
def log_likelihood_sym(self, xs, dist_info_vars):
probs = dist_info_vars["prob"]
# Assume layout is N * T * A
a_dim = probs.shape[-1]
# a_dim = TT.printing.Print("lala")(a_dim)
flat_logli = self._cat.log_likelihood_sym(
xs.reshape((-1, a_dim)), dict(prob=probs.reshape((-1, a_dim))))
return flat_logli.reshape(probs.shape[:2])
def log_likelihood(self, xs, dist_info):
probs = dist_info["prob"]
# Assume layout is N * T * A
a_dim = probs.shape[-1]
flat_logli = self._cat.log_likelihood_sym(
xs.reshape((-1, a_dim)), dict(prob=probs.reshape((-1, a_dim))))
return flat_logli.reshape(probs.shape[:2])
@property
def dist_info_keys(self):
return ["prob"] | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/distributions/recurrent_categorical.py | 0.841858 | 0.27312 | recurrent_categorical.py | pypi |
import numpy as np
import theano.tensor as TT
from rllab.distributions import Distribution
class DiagonalGaussian(Distribution):
def __init__(self, dim):
self._dim = dim
@property
def dim(self):
return self._dim
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
old_means = old_dist_info_vars["mean"]
old_log_stds = old_dist_info_vars["log_std"]
new_means = new_dist_info_vars["mean"]
new_log_stds = new_dist_info_vars["log_std"]
"""
Compute the KL divergence of two multivariate Gaussian distribution with
diagonal covariance matrices
"""
old_std = TT.exp(old_log_stds)
new_std = TT.exp(new_log_stds)
# means: (N*A)
# std: (N*A)
# formula:
# { (\mu_1 - \mu_2)^2 + \sigma_1^2 - \sigma_2^2 } / (2\sigma_2^2) +
# ln(\sigma_2/\sigma_1)
numerator = TT.square(old_means - new_means) + \
TT.square(old_std) - TT.square(new_std)
denominator = 2 * TT.square(new_std) + 1e-8
return TT.sum(
numerator / denominator + new_log_stds - old_log_stds, axis=-1)
def kl(self, old_dist_info, new_dist_info):
old_means = old_dist_info["mean"]
old_log_stds = old_dist_info["log_std"]
new_means = new_dist_info["mean"]
new_log_stds = new_dist_info["log_std"]
"""
Compute the KL divergence of two multivariate Gaussian distribution with
diagonal covariance matrices
"""
old_std = np.exp(old_log_stds)
new_std = np.exp(new_log_stds)
# means: (N*A)
# std: (N*A)
# formula:
# { (\mu_1 - \mu_2)^2 + \sigma_1^2 - \sigma_2^2 } / (2\sigma_2^2) +
# ln(\sigma_2/\sigma_1)
numerator = np.square(old_means - new_means) + \
np.square(old_std) - np.square(new_std)
denominator = 2 * np.square(new_std) + 1e-8
return np.sum(
numerator / denominator + new_log_stds - old_log_stds, axis=-1)
def likelihood_ratio_sym(self, x_var, old_dist_info_vars,
new_dist_info_vars):
logli_new = self.log_likelihood_sym(x_var, new_dist_info_vars)
logli_old = self.log_likelihood_sym(x_var, old_dist_info_vars)
return TT.exp(logli_new - logli_old)
def log_likelihood_sym(self, x_var, dist_info_vars):
means = dist_info_vars["mean"]
log_stds = dist_info_vars["log_std"]
zs = (x_var - means) / TT.exp(log_stds)
return - TT.sum(log_stds, axis=-1) - \
0.5 * TT.sum(TT.square(zs), axis=-1) - \
0.5 * means.shape[-1] * np.log(2 * np.pi)
def sample(self, dist_info):
means = dist_info["mean"]
log_stds = dist_info["log_std"]
rnd = np.random.normal(size=means.shape)
return rnd * np.exp(log_stds) + means
def log_likelihood(self, xs, dist_info):
means = dist_info["mean"]
log_stds = dist_info["log_std"]
zs = (xs - means) / np.exp(log_stds)
return - np.sum(log_stds, axis=-1) - \
0.5 * np.sum(np.square(zs), axis=-1) - \
0.5 * means.shape[-1] * np.log(2 * np.pi)
def entropy(self, dist_info):
log_stds = dist_info["log_std"]
return np.sum(log_stds + np.log(np.sqrt(2 * np.pi * np.e)), axis=-1)
def entropy_sym(self, dist_info_var):
log_std_var = dist_info_var["log_std"]
return TT.sum(log_std_var + TT.log(np.sqrt(2 * np.pi * np.e)), axis=-1)
@property
def dist_info_keys(self):
return ["mean", "log_std"] | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/distributions/diagonal_gaussian.py | 0.822759 | 0.306864 | diagonal_gaussian.py | pypi |
import lasagne
import lasagne.layers as L
import theano
import theano.tensor as TT
class ParamLayer(L.Layer):
def __init__(self,
incoming,
num_units,
param=lasagne.init.Constant(0.),
trainable=True,
**kwargs):
super(ParamLayer, self).__init__(incoming, **kwargs)
self.num_units = num_units
self.param = self.add_param(
param, (num_units, ), name="param", trainable=trainable)
def get_output_shape_for(self, input_shape):
return input_shape[:-1] + (self.num_units, )
def get_output_for(self, input, **kwargs):
ndim = input.ndim
reshaped_param = TT.reshape(self.param,
(1, ) * (ndim - 1) + (self.num_units, ))
tile_arg = TT.concatenate([input.shape[:-1], [1]])
tiled = TT.tile(reshaped_param, tile_arg, ndim=ndim)
return tiled
class OpLayer(L.MergeLayer):
def __init__(self,
incoming,
op,
shape_op=lambda x: x,
extras=None,
**kwargs):
if extras is None:
extras = []
incomings = [incoming] + extras
super(OpLayer, self).__init__(incomings, **kwargs)
self.op = op
self.shape_op = shape_op
self.incomings = incomings
def get_output_shape_for(self, input_shapes):
return self.shape_op(*input_shapes)
def get_output_for(self, inputs, **kwargs):
return self.op(*inputs)
class BatchNormLayer(L.Layer):
"""
lasagne.layers.BatchNormLayer(incoming, axes='auto', epsilon=1e-4,
alpha=0.1, mode='low_mem',
beta=lasagne.init.Constant(0), gamma=lasagne.init.Constant(1),
mean=lasagne.init.Constant(0), std=lasagne.init.Constant(1), **kwargs)
Batch Normalization
This layer implements batch normalization of its inputs, following [1]_:
.. math::
y = \\frac{x - \\mu}{\\sqrt{\\sigma^2 + \\epsilon}} \\gamma + \\beta
That is, the input is normalized to zero mean and unit variance, and then
linearly transformed. The crucial part is that the mean and variance are
computed across the batch dimension, i.e., over examples, not per example.
During training, :math:`\\mu` and :math:`\\sigma^2` are defined to be the
mean and variance of the current input mini-batch :math:`x`, and during
testing, they are replaced with average statistics over the training
data. Consequently, this layer has four stored parameters: :math:`\\beta`,
:math:`\\gamma`, and the averages :math:`\\mu` and :math:`\\sigma^2`
(nota bene: instead of :math:`\\sigma^2`, the layer actually stores
:math:`1 / \\sqrt{\\sigma^2 + \\epsilon}`, for compatibility to cuDNN).
By default, this layer learns the average statistics as exponential moving
averages computed during training, so it can be plugged into an existing
network without any changes of the training procedure (see Notes).
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape
axes : 'auto', int or tuple of int
The axis or axes to normalize over. If ``'auto'`` (the default),
normalize over all axes except for the second: this will normalize over
the minibatch dimension for dense layers, and additionally over all
spatial dimensions for convolutional layers.
epsilon : scalar
Small constant :math:`\\epsilon` added to the variance before taking
the square root and dividing by it, to avoid numerical problems
alpha : scalar
Coefficient for the exponential moving average of batch-wise means and
standard deviations computed during training; the closer to one, the
more it will depend on the last batches seen
beta : Theano shared variable, expression, numpy array, callable or None
Initial value, expression or initializer for :math:`\\beta`. Must match
the incoming shape, skipping all axes in `axes`. Set to ``None`` to fix
it to 0.0 instead of learning it.
See :func:`lasagne.utils.create_param` for more information.
gamma : Theano shared variable, expression, numpy array, callable or None
Initial value, expression or initializer for :math:`\\gamma`. Must
match the incoming shape, skipping all axes in `axes`. Set to ``None``
to fix it to 1.0 instead of learning it.
See :func:`lasagne.utils.create_param` for more information.
mean : Theano shared variable, expression, numpy array, or callable
Initial value, expression or initializer for :math:`\\mu`. Must match
the incoming shape, skipping all axes in `axes`.
See :func:`lasagne.utils.create_param` for more information.
std : Theano shared variable, expression, numpy array, or callable
Initial value, expression or initializer for :math:`1 / \\sqrt{
\\sigma^2 + \\epsilon}`. Must match the incoming shape, skipping all
axes in `axes`.
See :func:`lasagne.utils.create_param` for more information.
**kwargs
Any additional keyword arguments are passed to the :class:`Layer`
superclass.
Notes
-----
This layer should be inserted between a linear transformation (such as a
:class:`DenseLayer`, or :class:`Conv2DLayer`) and its nonlinearity. The
convenience function :func:`batch_norm` modifies an existing layer to
insert batch normalization in front of its nonlinearity.
The behavior can be controlled by passing keyword arguments to
:func:`lasagne.layers.get_output()` when building the output expression
of any network containing this layer.
During training, [1]_ normalize each input mini-batch by its statistics
and update an exponential moving average of the statistics to be used for
validation. This can be achieved by passing ``deterministic=False``.
For validation, [1]_ normalize each input mini-batch by the stored
statistics. This can be achieved by passing ``deterministic=True``.
For more fine-grained control, ``batch_norm_update_averages`` can be passed
to update the exponential moving averages (``True``) or not (``False``),
and ``batch_norm_use_averages`` can be passed to use the exponential moving
averages for normalization (``True``) or normalize each mini-batch by its
own statistics (``False``). These settings override ``deterministic``.
Note that for testing a model after training, [1]_ replace the stored
exponential moving average statistics by fixing all network weights and
re-computing average statistics over the training data in a layerwise
fashion. This is not part of the layer implementation.
In case you set `axes` to not include the batch dimension (the first axis,
usually), normalization is done per example, not across examples. This does
not require any averages, so you can pass ``batch_norm_update_averages``
and ``batch_norm_use_averages`` as ``False`` in this case.
See also
--------
batch_norm : Convenience function to apply batch normalization to a layer
References
----------
.. [1] Ioffe, Sergey and Szegedy, Christian (2015):
Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift. http://arxiv.org/abs/1502.03167.
"""
def __init__(self,
incoming,
axes='auto',
epsilon=1e-4,
alpha=0.1,
mode='low_mem',
beta=lasagne.init.Constant(0),
gamma=lasagne.init.Constant(1),
mean=lasagne.init.Constant(0),
std=lasagne.init.Constant(1),
**kwargs):
super(BatchNormLayer, self).__init__(incoming, **kwargs)
if axes == 'auto':
# default: normalize over all but the second axis
axes = (0, ) + tuple(range(2, len(self.input_shape)))
elif isinstance(axes, int):
axes = (axes, )
self.axes = axes
self.epsilon = epsilon
self.alpha = alpha
self.mode = mode
# create parameters, ignoring all dimensions in axes
shape = [
size for axis, size in enumerate(self.input_shape)
if axis not in self.axes
]
if any(size is None for size in shape):
raise ValueError("BatchNormLayer needs specified input sizes for "
"all axes not normalized over.")
if beta is None:
self.beta = None
else:
self.beta = self.add_param(
beta, shape, 'beta', trainable=True, regularizable=False)
if gamma is None:
self.gamma = None
else:
self.gamma = self.add_param(
gamma, shape, 'gamma', trainable=True, regularizable=False)
self.mean = self.add_param(
mean, shape, 'mean', trainable=False, regularizable=False)
self.std = self.add_param(
std, shape, 'std', trainable=False, regularizable=False)
def get_output_for(self, input, deterministic=False, **kwargs):
input_mean = input.mean(self.axes)
input_std = TT.sqrt(input.var(self.axes) + self.epsilon)
# Decide whether to use the stored averages or mini-batch statistics
use_averages = kwargs.get('batch_norm_use_averages', deterministic)
if use_averages:
mean = self.mean
std = self.std
else:
mean = input_mean
std = input_std
# Decide whether to update the stored averages
update_averages = kwargs.get('batch_norm_update_averages',
not deterministic)
if update_averages:
# Trick: To update the stored statistics, we create memory-aliased
# clones of the stored statistics:
running_mean = theano.clone(self.mean, share_inputs=False)
running_std = theano.clone(self.std, share_inputs=False)
# set a default update for them:
running_mean.default_update = (
(1 - self.alpha) * running_mean + self.alpha * input_mean)
running_std.default_update = (
(1 - self.alpha) * running_std + self.alpha * input_std)
# and make sure they end up in the graph without participating in
# the computation (this way their default_update will be collected
# and applied, but the computation will be optimized away):
mean += 0 * running_mean
std += 0 * running_std
# prepare dimshuffle pattern inserting broadcastable axes as needed
param_axes = iter(list(range(input.ndim - len(self.axes))))
pattern = [
'x' if input_axis in self.axes else next(param_axes)
for input_axis in range(input.ndim)
]
# apply dimshuffle pattern to all parameters
beta = 0 if self.beta is None else self.beta.dimshuffle(pattern)
gamma = 1 if self.gamma is None else self.gamma.dimshuffle(pattern)
mean = mean.dimshuffle(pattern)
std = std.dimshuffle(pattern)
# normalize
normalized = (input - mean) * (gamma * TT.inv(std)) + beta
return normalized
def batch_norm(layer, **kwargs):
"""
Apply batch normalization to an existing layer. This is a convenience
function modifying an existing layer to include batch normalization: It
will steal the layer's nonlinearity if there is one (effectively
introducing the normalization right before the nonlinearity), remove
the layer's bias if there is one (because it would be redundant), and add
a :class:`BatchNormLayer` and :class:`NonlinearityLayer` on top.
Parameters
----------
layer : A :class:`Layer` instance
The layer to apply the normalization to; note that it will be
irreversibly modified as specified above
**kwargs
Any additional keyword arguments are passed on to the
:class:`BatchNormLayer` constructor.
Returns
-------
BatchNormLayer or NonlinearityLayer instance
A batch normalization layer stacked on the given modified `layer`, or
a nonlinearity layer stacked on top of both if `layer` was nonlinear.
Examples
--------
Just wrap any layer into a :func:`batch_norm` call on creating it:
>>> from lasagne.layers import InputLayer, DenseLayer, batch_norm
>>> from lasagne.nonlinearities import tanh
>>> l1 = InputLayer((64, 768))
>>> l2 = batch_norm(DenseLayer(l1, num_units=500, nonlinearity=tanh))
This introduces batch normalization right before its nonlinearity:
>>> from lasagne.layers import get_all_layers
>>> [l.__class__.__name__ for l in get_all_layers(l2)]
['InputLayer', 'DenseLayer', 'BatchNormLayer', 'NonlinearityLayer']
"""
nonlinearity = getattr(layer, 'nonlinearity', None)
if nonlinearity is not None:
layer.nonlinearity = lasagne.nonlinearities.identity
if hasattr(layer, 'b') and layer.b is not None:
del layer.params[layer.b]
layer.b = None
layer = BatchNormLayer(layer, **kwargs)
if nonlinearity is not None:
layer = L.NonlinearityLayer(layer, nonlinearity)
return layer | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/core/lasagne_layers.py | 0.885792 | 0.525551 | lasagne_layers.py | pypi |
from contextlib import contextmanager
from rllab.core import Serializable
from rllab.misc.tensor_utils import flatten_tensors, unflatten_tensors
load_params = True
@contextmanager
def suppress_params_loading():
global load_params
load_params = False
yield
load_params = True
class Parameterized(Serializable):
def __init__(self):
self._cached_params = {}
self._cached_param_dtypes = {}
self._cached_param_shapes = {}
def get_params_internal(self, **tags):
"""
Internal method to be implemented which does not perform caching
"""
raise NotImplementedError
def get_params(
self, **tags
): # adds the list to the _cached_params dict under the tuple key (one)
"""
Get the list of parameters, filtered by the provided tags.
Some common tags include 'regularizable' and 'trainable'
"""
tag_tuple = tuple(sorted(list(tags.items()), key=lambda x: x[0]))
if tag_tuple not in self._cached_params:
self._cached_params[tag_tuple] = self.get_params_internal(**tags)
return self._cached_params[tag_tuple]
def get_param_dtypes(self, **tags):
tag_tuple = tuple(sorted(list(tags.items()), key=lambda x: x[0]))
if tag_tuple not in self._cached_param_dtypes:
self._cached_param_dtypes[tag_tuple] = \
[param.get_value(borrow=True).dtype
for param in self.get_params(**tags)]
return self._cached_param_dtypes[tag_tuple]
def get_param_shapes(self, **tags):
tag_tuple = tuple(sorted(list(tags.items()), key=lambda x: x[0]))
if tag_tuple not in self._cached_param_shapes:
self._cached_param_shapes[tag_tuple] = \
[param.get_value(borrow=True).shape
for param in self.get_params(**tags)]
return self._cached_param_shapes[tag_tuple]
def get_param_values(self, **tags):
return flatten_tensors([
param.get_value(borrow=True) for param in self.get_params(**tags)
])
def set_param_values(self, flattened_params, **tags):
debug = tags.pop("debug", False)
param_values = unflatten_tensors(flattened_params,
self.get_param_shapes(**tags))
for param, dtype, value in zip(
self.get_params(**tags), self.get_param_dtypes(**tags),
param_values):
param.set_value(value.astype(dtype))
if debug:
print("setting value of %s" % param.name)
def flat_to_params(self, flattened_params, **tags):
return unflatten_tensors(flattened_params, self.get_param_shapes(
**tags))
def __getstate__(self):
d = Serializable.__getstate__(self)
d["params"] = self.get_param_values()
return d
def __setstate__(self, d):
Serializable.__setstate__(self, d)
global load_params
if load_params:
self.set_param_values(d["params"]) | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/core/parameterized.py | 0.813683 | 0.250517 | parameterized.py | pypi |
import numpy as np
from rllab.misc import special
from rllab.misc import tensor_utils
import rllab.misc.logger as logger
from rllab.sampler import utils
class Sampler(object):
def start_worker(self):
"""
Initialize the sampler, e.g. launching parallel workers if necessary.
"""
raise NotImplementedError
def obtain_samples(self, itr):
"""
Collect samples for the given iteration number.
:param itr: Iteration number.
:return: A list of paths.
"""
raise NotImplementedError
def process_samples(self, itr, paths):
"""
Return processed sample data (typically a dictionary of concatenated
tensors) based on the collected paths.
:param itr: Iteration number.
:param paths: A list of collected paths.
:return: Processed sample data.
"""
raise NotImplementedError
def shutdown_worker(self):
"""
Terminate workers if necessary.
"""
raise NotImplementedError
class BaseSampler(Sampler):
def __init__(self, algo):
"""
:type algo: BatchPolopt
"""
self.algo = algo
def process_samples(self, itr, paths):
baselines = []
returns = []
if hasattr(self.algo.baseline, "predict_n"):
all_path_baselines = self.algo.baseline.predict_n(paths)
else:
all_path_baselines = [
self.algo.baseline.predict(path) for path in paths
]
for idx, path in enumerate(paths):
path_baselines = np.append(all_path_baselines[idx], 0)
deltas = path["rewards"] + \
self.algo.discount * path_baselines[1:] - \
path_baselines[:-1]
path["advantages"] = special.discount_cumsum(
deltas, self.algo.discount * self.algo.gae_lambda)
path["returns"] = special.discount_cumsum(path["rewards"],
self.algo.discount)
baselines.append(path_baselines[:-1])
returns.append(path["returns"])
ev = special.explained_variance_1d(
np.concatenate(baselines), np.concatenate(returns))
if not self.algo.policy.recurrent:
observations = tensor_utils.concat_tensor_list(
[path["observations"] for path in paths])
actions = tensor_utils.concat_tensor_list(
[path["actions"] for path in paths])
rewards = tensor_utils.concat_tensor_list(
[path["rewards"] for path in paths])
returns = tensor_utils.concat_tensor_list(
[path["returns"] for path in paths])
advantages = tensor_utils.concat_tensor_list(
[path["advantages"] for path in paths])
env_infos = tensor_utils.concat_tensor_dict_list(
[path["env_infos"] for path in paths])
agent_infos = tensor_utils.concat_tensor_dict_list(
[path["agent_infos"] for path in paths])
if self.algo.center_adv:
advantages = utils.center_advantages(advantages)
if self.algo.positive_adv:
advantages = utils.shift_advantages_to_positive(advantages)
average_discounted_return = \
np.mean([path["returns"][0] for path in paths])
undiscounted_returns = [sum(path["rewards"]) for path in paths]
ent = np.mean(self.algo.policy.distribution.entropy(agent_infos))
samples_data = dict(
observations=observations,
actions=actions,
rewards=rewards,
returns=returns,
advantages=advantages,
env_infos=env_infos,
agent_infos=agent_infos,
paths=paths,
)
else:
max_path_length = max([len(path["advantages"]) for path in paths])
# make all paths the same length (pad extra advantages with 0)
obs = [path["observations"] for path in paths]
obs = tensor_utils.pad_tensor_n(obs, max_path_length)
if self.algo.center_adv:
raw_adv = np.concatenate(
[path["advantages"] for path in paths])
adv_mean = np.mean(raw_adv)
adv_std = np.std(raw_adv) + 1e-8
adv = [(path["advantages"] - adv_mean) / adv_std
for path in paths]
else:
adv = [path["advantages"] for path in paths]
adv = np.asarray(
[tensor_utils.pad_tensor(a, max_path_length) for a in adv])
actions = [path["actions"] for path in paths]
actions = tensor_utils.pad_tensor_n(actions, max_path_length)
rewards = [path["rewards"] for path in paths]
rewards = tensor_utils.pad_tensor_n(rewards, max_path_length)
returns = [path["returns"] for path in paths]
returns = tensor_utils.pad_tensor_n(returns, max_path_length)
agent_infos = [path["agent_infos"] for path in paths]
agent_infos = tensor_utils.stack_tensor_dict_list([
tensor_utils.pad_tensor_dict(p, max_path_length)
for p in agent_infos
])
env_infos = [path["env_infos"] for path in paths]
env_infos = tensor_utils.stack_tensor_dict_list([
tensor_utils.pad_tensor_dict(p, max_path_length)
for p in env_infos
])
valids = [np.ones_like(path["returns"]) for path in paths]
valids = tensor_utils.pad_tensor_n(valids, max_path_length)
average_discounted_return = \
np.mean([path["returns"][0] for path in paths])
undiscounted_returns = [sum(path["rewards"]) for path in paths]
ent = np.sum(
self.algo.policy.distribution.entropy(agent_infos) *
valids) / np.sum(valids)
samples_data = dict(
observations=obs,
actions=actions,
advantages=adv,
rewards=rewards,
returns=returns,
valids=valids,
agent_infos=agent_infos,
env_infos=env_infos,
paths=paths,
)
logger.log("fitting baseline...")
if hasattr(self.algo.baseline, 'fit_with_samples'):
self.algo.baseline.fit_with_samples(paths, samples_data)
else:
self.algo.baseline.fit(paths)
logger.log("fitted")
logger.record_tabular('Iteration', itr)
logger.record_tabular('AverageDiscountedReturn',
average_discounted_return)
logger.record_tabular('AverageReturn', np.mean(undiscounted_returns))
logger.record_tabular('ExplainedVariance', ev)
logger.record_tabular('NumTrajs', len(paths))
logger.record_tabular('Entropy', ent)
logger.record_tabular('Perplexity', np.exp(ent))
logger.record_tabular('StdReturn', np.std(undiscounted_returns))
logger.record_tabular('MaxReturn', np.max(undiscounted_returns))
logger.record_tabular('MinReturn', np.min(undiscounted_returns))
return samples_data | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/sampler/base.py | 0.756447 | 0.301137 | base.py | pypi |
import lasagne
import lasagne.init
import lasagne.layers as L
import lasagne.nonlinearities as NL
import theano.tensor as TT
from rllab.core import batch_norm
from rllab.core import LasagnePowered
from rllab.core import Serializable
from rllab.misc import ext
from rllab.q_functions import QFunction
class ContinuousMLPQFunction(QFunction, LasagnePowered):
def __init__(self,
env_spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=NL.rectify,
hidden_W_init=lasagne.init.HeUniform(),
hidden_b_init=lasagne.init.Constant(0.),
action_merge_layer=-2,
output_nonlinearity=None,
output_W_init=lasagne.init.Uniform(-3e-3, 3e-3),
output_b_init=lasagne.init.Uniform(-3e-3, 3e-3),
bn=False):
Serializable.quick_init(self, locals())
l_obs = L.InputLayer(
shape=(None, env_spec.observation_space.flat_dim), name="obs")
l_action = L.InputLayer(
shape=(None, env_spec.action_space.flat_dim), name="actions")
n_layers = len(hidden_sizes) + 1
if n_layers > 1:
action_merge_layer = \
(action_merge_layer % n_layers + n_layers) % n_layers
else:
action_merge_layer = 1
l_hidden = l_obs
for idx, size in enumerate(hidden_sizes):
if bn:
l_hidden = batch_norm(l_hidden)
if idx == action_merge_layer:
l_hidden = L.ConcatLayer([l_hidden, l_action])
l_hidden = L.DenseLayer(
l_hidden,
num_units=size,
W=hidden_W_init,
b=hidden_b_init,
nonlinearity=hidden_nonlinearity,
name="h%d" % (idx + 1))
if action_merge_layer == n_layers:
l_hidden = L.ConcatLayer([l_hidden, l_action])
l_output = L.DenseLayer(
l_hidden,
num_units=1,
W=output_W_init,
b=output_b_init,
nonlinearity=output_nonlinearity,
name="output")
output_var = L.get_output(l_output, deterministic=True).flatten()
self._f_qval = ext.compile_function(
[l_obs.input_var, l_action.input_var], output_var)
self._output_layer = l_output
self._obs_layer = l_obs
self._action_layer = l_action
self._output_nonlinearity = output_nonlinearity
LasagnePowered.__init__(self, [l_output])
def get_qval(self, observations, actions):
return self._f_qval(observations, actions)
def get_qval_sym(self, obs_var, action_var, **kwargs):
qvals = L.get_output(self._output_layer, {
self._obs_layer: obs_var,
self._action_layer: action_var
}, **kwargs)
return TT.reshape(qvals, (-1, )) | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/q_functions/continuous_mlp_q_function.py | 0.67971 | 0.270336 | continuous_mlp_q_function.py | pypi |
import numpy as np
from rllab.misc import ext
from rllab.spaces import Space
class Product(Space):
def __init__(self, *components):
if isinstance(components[0], (list, tuple)):
assert len(components) == 1
components = components[0]
self._components = tuple(components)
dtypes = [
c.new_tensor_variable("tmp", extra_dims=0).dtype
for c in components
]
if len(dtypes) > 0 and hasattr(dtypes[0], "as_numpy_dtype"):
dtypes = [d.as_numpy_dtype for d in dtypes]
self._common_dtype = np.core.numerictypes.find_common_type([], dtypes)
def sample(self):
return tuple(x.sample() for x in self._components)
@property
def components(self):
return self._components
def contains(self, x):
return isinstance(x, tuple) and all(
c.contains(xi) for c, xi in zip(self._components, x))
def new_tensor_variable(self, name, extra_dims):
return ext.new_tensor(
name=name,
ndim=extra_dims + 1,
dtype=self._common_dtype,
)
@property
def flat_dim(self):
return np.sum([c.flat_dim for c in self._components])
def flatten(self, x):
return np.concatenate(
[c.flatten(xi) for c, xi in zip(self._components, x)])
def flatten_n(self, xs):
xs_regrouped = [[x[i] for x in xs] for i in range(len(xs[0]))]
flat_regrouped = [
c.flatten_n(xi) for c, xi in zip(self.components, xs_regrouped)
]
return np.concatenate(flat_regrouped, axis=-1)
def unflatten(self, x):
dims = [c.flat_dim for c in self._components]
flat_xs = np.split(x, np.cumsum(dims)[:-1])
return tuple(
c.unflatten(xi) for c, xi in zip(self._components, flat_xs))
def unflatten_n(self, xs):
dims = [c.flat_dim for c in self._components]
flat_xs = np.split(xs, np.cumsum(dims)[:-1], axis=-1)
unflat_xs = [
c.unflatten_n(xi) for c, xi in zip(self.components, flat_xs)
]
unflat_xs_grouped = list(zip(*unflat_xs))
return unflat_xs_grouped
def __eq__(self, other):
if not isinstance(other, Product):
return False
return tuple(self.components) == tuple(other.components)
def __hash__(self):
return hash(tuple(self.components)) | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/spaces/product.py | 0.62395 | 0.421611 | product.py | pypi |
import numpy as np
import theano
from rllab.core import Serializable
from rllab.misc import ext
from rllab.spaces import Space
class Box(Space):
"""
A box in R^n.
I.e., each coordinate is bounded.
"""
def __init__(self, low, high, shape=None):
"""
Two kinds of valid input:
Box(-1.0, 1.0, (3,4)) # low and high are scalars, and shape is
provided
Box(np.array([-1.0,-2.0]), np.array([2.0,4.0])) # low and high are
arrays of the same shape
"""
if shape is None:
assert low.shape == high.shape
self.low = low
self.high = high
else:
assert np.isscalar(low) and np.isscalar(high)
self.low = low + np.zeros(shape)
self.high = high + np.zeros(shape)
def sample(self):
return np.random.uniform(
low=self.low, high=self.high, size=self.low.shape)
def contains(self, x):
return x.shape == self.shape and (x >= self.low).all() and (
x <= self.high).all()
@property
def shape(self):
return self.low.shape
@property
def flat_dim(self):
return np.prod(self.low.shape)
@property
def bounds(self):
return self.low, self.high
def flatten(self, x):
return np.asarray(x).flatten()
def unflatten(self, x):
return np.asarray(x).reshape(self.shape)
def flatten_n(self, xs):
xs = np.asarray(xs)
return xs.reshape((xs.shape[0], -1))
def unflatten_n(self, xs):
xs = np.asarray(xs)
return xs.reshape((xs.shape[0], ) + self.shape)
def __repr__(self):
return "Box" + str(self.shape)
def __eq__(self, other):
return isinstance(other, Box) and np.allclose(self.low, other.low) and \
np.allclose(self.high, other.high)
def __hash__(self):
return hash((self.low, self.high))
def new_tensor_variable(self, name, extra_dims):
return ext.new_tensor(
name=name, ndim=extra_dims + 1, dtype=theano.config.floatX) | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/spaces/box.py | 0.842053 | 0.585457 | box.py | pypi |
from cached_property import cached_property
import numpy as np
from rllab import spaces
from rllab.core import Serializable
from rllab.envs import Step
from rllab.envs.mujoco import MujocoEnv
from rllab.envs.proxy_env import ProxyEnv
from rllab.misc.overrides import overrides
BIG = 1e6
class OcclusionEnv(ProxyEnv, Serializable):
""" Occludes part of the observation."""
def __init__(self, env, sensor_idx):
"""
:param sensor_idx: list or ndarray of indices to be shown. Other indices
will be occluded. Can be either list of integer indices or boolean
mask.
"""
Serializable.quick_init(self, locals())
self._set_sensor_mask(env, sensor_idx)
super(OcclusionEnv, self).__init__(env)
self._dt = 1
if isinstance(env, MujocoEnv):
self._dt = env.sim.opt.timestep * env.frame_skip
def _set_sensor_mask(self, env, sensor_idx):
obsdim = env.observation_space.flat_dim
if len(sensor_idx) > obsdim:
raise ValueError(("Length of sensor mask ({0}) cannot be greater "
"than observation dim ({1})").format(
len(sensor_idx), obsdim))
if len(sensor_idx) == obsdim and not np.any(np.array(sensor_idx) > 1):
sensor_mask = np.array(sensor_idx, dtype=np.bool)
elif np.any(np.unique(sensor_idx, return_counts=True)[1] > 1):
raise ValueError(("Double entries or boolean mask "
"with dim ({0}) < observation dim ({1})").format(
len(sensor_idx), obsdim))
else:
sensor_mask = np.zeros((obsdim, ), dtype=np.bool)
sensor_mask[sensor_idx] = 1
self._sensor_mask = sensor_mask
def occlude(self, obs):
return obs[self._sensor_mask]
def get_current_obs(self):
return self.occlude(self._wrapped_env.get_current_obs())
@cached_property
@overrides
def observation_space(self):
shp = self.get_current_obs().shape
ub = BIG * np.ones(shp)
return spaces.Box(ub * -1, ub)
@overrides
def reset(self):
obs = self._wrapped_env.reset()
return self.occlude(obs)
@overrides
def step(self, action):
next_obs, reward, done, info = self._wrapped_env.step(action)
return Step(self.occlude(next_obs), reward, done, **info)
@property
def dt(self):
return self._dt
@overrides
def log_diagnostics(self, paths):
pass # the wrapped env will be expecting its own observations in paths,
# but they're not | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/envs/occlusion_env.py | 0.855187 | 0.370168 | occlusion_env.py | pypi |
import collections
from cached_property import cached_property
from rllab.envs import EnvSpec
class Env(object):
def step(self, action):
"""
Run one timestep of the environment's dynamics. When end of episode
is reached, reset() should be called to reset the environment's internal
state.
Input
-----
action : an action provided by the environment
Outputs
-------
(observation, reward, done, info)
observation : agent's observation of the current environment
reward [Float] : amount of reward due to the previous action
done : a boolean, indicating whether the episode has ended
info : a dictionary containing other diagnostic information from the
previous action
"""
raise NotImplementedError
def reset(self):
"""
Resets the state of the environment, returning an initial observation.
Outputs
-------
observation : the initial observation of the space. (Initial reward is
assumed to be 0.)
"""
raise NotImplementedError
@property
def action_space(self):
"""
Returns a Space object
:rtype: rllab.spaces.base.Space
"""
raise NotImplementedError
@property
def observation_space(self):
"""
Returns a Space object
:rtype: rllab.spaces.base.Space
"""
raise NotImplementedError
# Helpers that derive from Spaces
@property
def action_dim(self):
return self.action_space.flat_dim
def render(self):
pass
def log_diagnostics(self, paths):
"""
Log extra information per iteration based on the collected paths
"""
pass
@cached_property
def spec(self):
return EnvSpec(
observation_space=self.observation_space,
action_space=self.action_space,
)
@property
def horizon(self):
"""
Horizon of the environment, if it has one
"""
raise NotImplementedError
def terminate(self):
"""
Clean up operation,
"""
pass
def get_param_values(self):
return None
def set_param_values(self, params):
pass
_Step = collections.namedtuple("Step",
["observation", "reward", "done", "info"])
def Step(observation, reward, done, **kwargs):
"""
Convenience method creating a namedtuple with the results of the
environment.step method.
Put extra diagnostic info in the kwargs
"""
return _Step(observation, reward, done, kwargs) | /rlgarage-0.1.0.tar.gz/rlgarage-0.1.0/rllab/envs/base.py | 0.909739 | 0.535524 | base.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.